id
stringlengths 40
40
| text
stringlengths 9
86.7k
| metadata
stringlengths 3k
16.2k
| source
stringclasses 1
value | added
stringdate 2024-11-21 00:00:00
2024-12-12 00:00:00
| created
stringdate 2024-11-21 00:00:00
2024-12-12 00:00:00
|
|---|---|---|---|---|---|
8f72f744a90802a0974a89672ea89d00528a36a1
|
[REMOVED]
|
{"Source-Url": "https://www.microsoft.com/en-us/research/wp-content/uploads/2016/02/techreport-1.pdf", "len_cl100k_base": 13135, "olmocr-version": "0.1.53", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 66630, "total-output-tokens": 15535, "length": "2e13", "weborganizer": {"__label__adult": 0.0003294944763183594, "__label__art_design": 0.00024247169494628904, "__label__crime_law": 0.00033164024353027344, "__label__education_jobs": 0.0003752708435058594, "__label__entertainment": 5.233287811279297e-05, "__label__fashion_beauty": 0.00013649463653564453, "__label__finance_business": 0.00017464160919189453, "__label__food_dining": 0.00031113624572753906, "__label__games": 0.0006251335144042969, "__label__hardware": 0.0007848739624023438, "__label__health": 0.0003921985626220703, "__label__history": 0.00019216537475585935, "__label__home_hobbies": 8.600950241088867e-05, "__label__industrial": 0.00032806396484375, "__label__literature": 0.00020623207092285156, "__label__politics": 0.00025463104248046875, "__label__religion": 0.0004210472106933594, "__label__science_tech": 0.01206207275390625, "__label__social_life": 6.961822509765625e-05, "__label__software": 0.00472259521484375, "__label__software_dev": 0.97705078125, "__label__sports_fitness": 0.0003192424774169922, "__label__transportation": 0.0005388259887695312, "__label__travel": 0.00019931793212890625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 56163, 0.0397]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 56163, 0.46951]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 56163, 0.8897]], "google_gemma-3-12b-it_contains_pii": [[0, 821, false], [821, 4036, null], [4036, 7247, null], [7247, 9812, null], [9812, 13096, null], [13096, 16713, null], [16713, 19353, null], [19353, 23090, null], [23090, 24835, null], [24835, 27641, null], [27641, 31251, null], [31251, 34490, null], [34490, 36041, null], [36041, 38286, null], [38286, 41281, null], [41281, 44270, null], [44270, 47195, null], [47195, 49673, null], [49673, 53136, null], [53136, 55958, null], [55958, 56030, null], [56030, 56163, null]], "google_gemma-3-12b-it_is_public_document": [[0, 821, true], [821, 4036, null], [4036, 7247, null], [7247, 9812, null], [9812, 13096, null], [13096, 16713, null], [16713, 19353, null], [19353, 23090, null], [23090, 24835, null], [24835, 27641, null], [27641, 31251, null], [31251, 34490, null], [34490, 36041, null], [36041, 38286, null], [38286, 41281, null], [41281, 44270, null], [44270, 47195, null], [47195, 49673, null], [49673, 53136, null], [53136, 55958, null], [55958, 56030, null], [56030, 56163, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 56163, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 56163, null]], "pdf_page_numbers": [[0, 821, 1], [821, 4036, 2], [4036, 7247, 3], [7247, 9812, 4], [9812, 13096, 5], [13096, 16713, 6], [16713, 19353, 7], [19353, 23090, 8], [23090, 24835, 9], [24835, 27641, 10], [27641, 31251, 11], [31251, 34490, 12], [34490, 36041, 13], [36041, 38286, 14], [38286, 41281, 15], [41281, 44270, 16], [44270, 47195, 17], [47195, 49673, 18], [49673, 53136, 19], [53136, 55958, 20], [55958, 56030, 21], [56030, 56163, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 56163, 0.0678]]}
|
olmocr_science_pdfs
|
2024-12-05
|
2024-12-05
|
18024e416304d73350470151c748fba61f753d58
|
Weaving true-concurrent aspects using constraint solvers
Bowles, Juliana K F; Bordbar, Behzad; Alwanain, Mohammed
DOI:
10.1109/ACSD.2016.19
License:
None: All rights reserved
Document Version
Peer reviewed version
Citation for published version (Harvard):
Link to publication on Research at Birmingham portal
Publisher Rights Statement:
(c) 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works
Checked for eligibility: 01/07/2016
General rights
Unless a licence is specified above, all rights (including copyright and moral rights) in this document are retained by the authors and/or the copyright holders. The express permission of the copyright holder must be obtained for any use of this material other than for purposes permitted by law.
• Users may freely distribute the URL that is used to identify this publication.
• Users may download and/or print one copy of the publication from the University of Birmingham research portal for the purpose of private study or non-commercial research.
• Users may use extracts from the document in line with the concept of 'fair dealing' under the Copyright, Designs and Patents Act 1988 (?)
• Users may not further distribute the material nor use it for the purposes of commercial gain.
Where a licence is displayed above, please note the terms and conditions of the licence govern your use of this document.
Take down policy
While the University of Birmingham exercises care and attention in making items available there are rare occasions when an item has been uploaded in error or has been deemed to be commercially or otherwise sensitive.
If you believe that this is the case for this document, please contact UBIRA@lists.bham.ac.uk providing details and we will remove access to the work immediately and investigate.
Weaving True-Concurrent Aspects using Constraint Solvers
Juliana K. F. Bowles
School of Computer Science
University of St Andrews
St Andrews KY16 9SX, UK
Email: jkfb@st-andrews.ac.uk
Behzad Bordbar, Mohammed Alwanain
School of Computer Science
University of Birmingham
Edgbaston, Birmingham B15 2TT, UK
Email: {b.bordbar|m.i.alwanain}@cs.bham.ac.uk
Abstract—Large system models usually consist of several simpler models that can be understood more easily. Making changes to the behaviour of a component will likely affect several models and could introduce accidental errors. Aspects address this by modelling new functionality required in several places as an advice, which can be integrated with the original base models by specifying a pointcut. Before checking that the overall outcome is correct, we need to weave the crosscutting advice into the base models, and obtain new augmented models. Although considerable research has been done to weave models, many such approaches are not fully automated. This paper looks at aspect weaving of scenario-based models, where aspects are given a true-concurrent semantics based on event structures. Our contribution is a novel formal automated technique for weaving aspects using the Z3-SMT solver. We compare the performance of Alloy and Z3 to justify our choice.
1. Introduction
Aspect-oriented programming aims to modularise the development of software by separating crosscutting concerns into different modules. Although aspect-oriented programming was originally intended for increasing modularity and separation at the code level, the concepts have been proven very useful for reducing design complexity at the modelling level. Aspect-oriented modelling (AOM) offers mechanisms for separating crosscutting concerns in models through so-called aspects. AOM techniques use the term advice for the action an aspect will take, and pointcut to specify more general rules of where to apply an advice. AOM is particularly useful for dealing with non-functional properties and dependability concerns (including security, reliability, availability, safety, and so on) which usually cut across the system as a whole [1].
To obtain an overall model of the system, we may need to integrate one of more crosscutting concerns (advice models) into the system’s model (base). This process, known as aspect weaving, has received considerable attention in recent years [1], [2], [3], [4]. Providing an overall model is essential, because it contributes to a better understanding of the system behaviour, and makes it possible to analyse the correctness of the model. Here, we are concerned with a weaving method for UML 2.4 sequence diagrams [5].
Composing sequence diagrams manually for large and complex systems is unrealistic. As a result, various automated methods for model composition have been introduced [1], [2], [3], [4], [6], [7], [8], [9], [10]. Most of these methods introduce algorithms to produce a composite model from simpler models obtained from partial specifications. In recent work [9], [10], we presented a fully automated approach for the composition of sequence diagrams making use of the constraint solver Alloy [11]. Moreover, a well known problem of how poor tool performance impacts on their applicability for large and complex models is discussed for instance in [12], [13]. As a result, this paper investigates an alternative composition method in the context of AOM weaving through the use of Z3 [14]. Z3 is a high-performance SMT solver from Microsoft Research, targeted at solving problems arising in software analysis and verification [14]. Furthermore, we present a novel true-concurrent semantics for sequence diagram weaving, and a model-driven transformation of sequence diagrams to Z3, which preserves the semantics of composition and addresses the scalability of models. We carried out a series of experiments to evaluate and compare the suitability of two solvers (Z3 and Alloy).
The remainder of the paper is structured as follows: Section 2 highlights the key contributions of this paper. Sequence diagrams and their semantics are given in Section 3. We introduce an example in Section 4, and a detailed description of our approach in Section 5. Section 6 shows a comparison study between Z3 and Alloy. Finally, Section 7 discusses related work, and Section 8 concludes the paper.
2. Our Contribution
There are two fundamental problems that need to be considered when composing models, where weaving can be seen as a form of composition: composition must be well defined to be feasible for automation, and the associated algorithm must be efficient.
To address the first problem, we define a formal semantics of composition and encode this semantics as formal transformation rules. These transformation rules generate
the logical constraints associated to our source models, which serve as input to the constraint solver. Thereafter, the constraint solver produces (if existing) a solution for the composition in accordance to our formal semantics.
The second problem, namely the efficiency of the composition algorithm, requires some further analysis by running different experiments and performing a comparison with suitable alternatives. Naturally, the problem arises when the models to be composed increase in size and complexity, but it is also influenced by how the transformation was implemented, the complexity of the composition algorithm and the programming language used.
In recent work [9], [10], we have presented a method for sequence diagram composition based on Alloy. The approach taken does not directly involve an algorithm to compose sequence diagrams, but rather uses Alloy to produce all possible solutions for the composition, where each solution is a possible trace of execution in the composed model. The composed model in Alloy satisfies the conjunction of all logical constraints associated to the sequence diagrams and additional matching constraints. The approach does not, however, explicitly incorporate the semantics of scenarios in the transformation itself. Further, whereas in [9] composition is treated as a simple conjunction of models through syntactic matching of elements of both models, in [10] we allow the matching glue to consist of additional behavioural constraints (for instance imposing an order, disallowing event occurrences, etc). Our approach in this paper is more generic, and covers a more complex form of composition through aspect weaving.
3. Modelling Aspects
UML sequence diagrams capture scenarios of execution as object interactions, and are also commonly used to model aspects and aspect weaving. As such they naturally capture notions such as base model, advice and pointcut [1], [8], [15], [16].
A sequence diagram shows all objects involved in the interaction it describes. Each object has a vertical dashed line called lifeline showing the existence of the object at a particular time. Points along the lifeline are called locations (a terminology borrowed from LSCs [17]) and denote the occurrence of events such as sending/receiving a message. The order of locations along a lifeline is significant denoting, in general, the order in which the corresponding events occur. One example of a sequence diagram showing a base model is given in Figure 1. This example is explained in detail in Section 4. There are three instances involved in the interaction, and we show explicitly the locations along the lifeline of (the arbitrary) instance of class PetrolStation.
A message is a synchronous or asynchronous communication between two objects shown as an arrow connecting the respective lifelines, that is, the underlying send and receive events of the message. Asynchronous communication is shown by an open arrowhead and is the form of communication used in this paper. An interaction between several objects consists of one or more messages, but may be given further structure through so-called interaction fragments. There are several kinds of interaction fragments including seq (sequential behaviour), alt (alternative behaviour), par (parallel behaviour), neg (forbidden behaviour), assert (mandatory behaviour), loop (iterative behaviour), and so on [5]. Depending on the operator used, an interaction fragment consists of one or more operands. In the case of the alt fragment, each operand describes a choice of behaviour. Only one of the alternative operands is executed if the guard expression (if present) evaluates to true. If more than one operand has a guard that evaluates to true, one of the operands is selected nondeterministically for execution. In the case of the par fragment, there is a parallel merge between the behaviours of the operands. The event occurrences of the different operands can be interleaved in any way as long as the ordering imposed by each operand as such is preserved.
The model in Figure 1 contains one alternative fragment with two operands. The events associated to the locations of the instance PetrolStation are ordered (one occurs before or after another) unless they are associated to locations within different operands of the alternative fragment in which case they are mutually exclusive.
When modelling with aspects, we start with one or more scenarios capturing base behaviour. If we then need to add a new piece of functionality or a concern that may cut across many parts of the system (such as a dependability requirement) we model the new piece of behaviour as an advice. In order to integrate the advice into the base behaviour we need to specify how this should be done through a pointcut.
In order to be able to define and implement a technique for weaving aspects given as scenarios, we have to make sure that we understand the meaning of a scenario (base model or advice) and how to compose them under certain conditions (pointcut). In other words, we need to have a formal semantics for the scenario-based language and a formally defined parallel composition with synchronisation (done in accordance with the pointcut).
There is a plethora of papers defining a semantics for sequence diagrams and other UML diagrams. See [18] for
an overview. We have given a formal semantics to sequence diagrams in [19] using a true-concurrency model, namely labelled event structures (LES) from [20]. We have defined parallel composition with synchronisation for LES formally in [21]. Our approach in this paper effectively automates a weaving mechanism in accordance to this semantics.
LES are very suitable to describe the traces of execution in sequence diagrams, being able to capture directly the notions available such as sequential, parallel and iterative behaviour (or the unfoldings thereof) as well as nondeterminism. For each of the notions we use one of the (binary) relations available over events: causality, nondeterministic choice and true concurrency.
We keep the presentation of the semantics as simple as possible here. A LES consists of a set of events and binary relations on events which satisfy certain conditions. The binary relations are causality (a partial order) and nondeterministic choice also called conflict (an irreflexive, symmetric relation which propagates over causality). The formal definition is given below, where \( L \) is an alphabet of labels.
**Definition 1.** A labelled event structure (LES) is a tuple \( \mathcal{E} = (Ev, \rightarrow^{*}, \#, \mu) \) where \( Ev \) is a set of events with binary relations \( \rightarrow^{*} \), \( \# \subseteq Ev \times Ev \) for causality and conflict respectively. Causality \( \rightarrow^{*} \) is a partial order, and conflict \( \# \) is irreflexive and symmetric. Conflict propagates over causality, that is, for arbitrary events \( e_1, e_2, e_3 \in Ev \) if \( e_1 \# e_2 \) and \( e_2 \rightarrow^{*} e_3 \) then \( e_1 \# e_3 \). A labelling is given by \( \mu : Ev \rightarrow L \), a (possibly partial) function which maps events to labels in \( L \).
There is a further implicit relation between events in a LES, namely concurrency. Two events \( e_1, e_2 \) are concurrent, written \( e_1 \co e_2 \), iff they are neither related by causality nor by conflict. Since system computations always have a starting point, we only consider discrete event structures here, that is, structures where the set of previous occurrences of an event (also called local configuration) is finite. Formally, the local configuration of an event \( e \), given by \( \downarrow e = \{ e' \mid e' \rightarrow^{*} e \} \), is finite. This allows us to refer to the notion of immediate causality. Formally, two events \( e_1, e_2 \in Ev \), related by causality \( e_1 \rightarrow^{*} e_2 \), are related by immediate causality, written \( e_1 \rightarrow e_2 \), if we cannot find another event \( e_3 \) with \( e_1 \neq e_3 \neq e_2 \), such that \( e_1 \rightarrow^{*} e_3 \) and \( e_3 \rightarrow^{*} e_2 \). Further, a configuration \( C \) in \( \mathcal{E} \) consists of a subset of events that is conflict free and downwards closed (i.e., for an arbitrary event \( e \in C \), if \( e' \rightarrow^{*} e \) then \( e' \in C \)). A trace \( \tau \) in \( \mathcal{E} \) is a maximal configuration. A LES encodes all possible traces of execution. A further useful notion is that of a LES morphism. A LES morphism \( h : \mathcal{E}_1 \rightarrow \mathcal{E}_2 \) is a partial function such that for an arbitrary configuration \( C \) in \( \mathcal{E}_1 \), \( h(C) \) is a configuration in \( \mathcal{E}_2 \), and \( h \) is injective on \( C \).
To give a semantics to sequence diagrams with LES, we need a labelling function that relates events to diagram locations. To keep it simple, one possible label for an event could be \((m, s)\) or \((m, r)\) to denote sending or receiving a message \( m \) respectively. We assume that other events do not have labels. Let \( M \) be the set of message labels, and \( I \) be the set of instances associated to a diagram. We can partition the set of events \( Ev = \bigcup_{i \in I} Ev_i \) such that each subset \( Ev_i \) denotes the events along a lifeline of \( i \in I \). In other words, each event in a LES associated to a sequence diagram corresponds to a unique instance of the diagram.
The LES associated to the example from Figure 1 is illustrated in Figure 2. Minimal events, where a minimal event \( e \) satisfies \( \downarrow e = \{ e \} \), are always associated to a different instance from the sequence diagram. In Figure 2, only immediate causality \( \rightarrow \) is shown. Events belonging to different instances are related by immediate causality iff they are associated to the sending/receiving of the same message (for instance, \( g_{10} \rightarrow e_8 \) on message invalidPin). Note that in Figure 2 we indicate the name of the message on the immediate causality between send and receive events only for convenience. In effect what we have is for instance \( \mu(g_{10}) = (\text{invalidPin}, s) \) and \( \mu(e_8) = (\text{invalidPin}, r) \). The existence of events in conflict in this example reflects the alternative fragment in the sequence diagram which shows two alternative interactions. Conflict is shown, for instance between \( g_7 \# g_{10} \) denoting the case of a valid and invalid pin respectively. Conflict propagates and future events are also in conflict, so for instance \( g_{9} \# g_{10} \), and so on. This example contains two possible traces of execution.
As seen in [19], LES offer a suitable semantics for sequence diagrams and the various interaction fragments defined. Whereas operators such as \texttt{seq}, \texttt{alt}, and \texttt{par} have a natural correspondence to the relations within a LES, it may be less obvious how to capture other operators. To represent a \texttt{loop} fragment, a LES has to model all possible iterations of the loop as unfoldings (traces in the LES). In an automated approach, we must assume a finite number of possible iterations and hence unfoldings. To be able to
describe a **neg** fragment, we must distinguish allowed from **disallowed** traces of execution which we do not do here. We have dealt with this operator in [10], where the composition glue can specify disallowed behaviour overall. The effect is that we prune all possible (composite) traces that contain disallowed behaviour.
4. **Example**
Consider a simple example of how the behaviour of a system given as a scenario may have to be extended, and how this can be done through the use of aspects. The example describes a petrol station scenario which was adapted from [22]. Let us consider the base model first as shown in Figure 3(a). In this scenario a user of a petrol station can only fill their car with petrol provided they have a card (and know the pin code for the card).
The scenario starts with the user inserting a payment card (insertCard). The petrol station requests the pin code from the user (requestPin), which the user then enters (pinCode). The petrol station sends a message to the bank to validate the pin code (validate and result), and an alt fragment is used to model the two possible outcomes: (1) the pin code is valid, the user is allowed to start fuelling (startFuel), and when the user has finished he/she stops (stop); (2) the pin code is invalid, the user is informed that the pin code entered is invalid (invalidPin). In both cases, the scenario ends by ejecting the card (cardOut).
Now assume that we want a more refined model where we allow the user to indicate the exact amount of fuel required in advance. This is added by modelling an advice as shown in Figure 3(b).
The advice model starts with a valid pin code scenario. The idea here is that after entering the amount of fuel requested the petrol station forwards a message to the bank to validate whether the request is acceptable (basically the user has enough balance to cover the request). Again two options are possible. If the account balance covers this amount, it will be debited from the account and the petrol station will start fuelling. However, if the account balance cannot cover the amount requested the transaction is cancelled.
To consider the advice within the original base model corresponds to weaving it into the base model and obtain an augmented model. Strictly speaking we can have more than one base model in a system and may want to integrate more than one advice. Without loss of generality we can assume that we can first obtain a composed model for the base behaviour and deal with weaving of an advice one at a time. Note that there may be an interaction (conflict) between two advices and our method would detect that when applying weaving the second time, but we are not explicitly dealing with aspect interaction in this paper. The order in which advices are weaved into a model can produce different results, and the designer needs to take this into account when applying weaving in succession. Our technique can in addition be used to identify these differences automatically.
In order to do the weaving, we specify a pointcut which shows how the elements in the base and advice models match. The pointcut in Figure 3(c) indicates that the lifelines and messages validPin and StartFuel are matched.

the events identified in accordance to $f_b$ and $f_a$, and by
definition is such that configurations are preserved and have
injective mappings (for full details cf. [21]).
For our example, the LES for the base has been given
in Figure 2. Consider the LES for the advice as shown in
Figure 4. Assume all events are further indexed by $b$ (base)
or $a$ (advice) to avoid confusion.
$$\begin{align*}
\text{Figure 4: LES for the advice}
\end{align*}$$
The LES for the pointcut $E_p$ (not shown) contains
four events $\{e_{1p}, e_{2p}, e_{3p}, e_{4p}\}$ and the following
causality relations $e_{1p} \rightarrow e_{2p}, e_{1p} \rightarrow e_{3p}, e_{2p} \rightarrow e_{4p}$ and
$e_{3p} \rightarrow e_{4p}$. We would have morphisms defined such that
$f_b(g_{rb}) = e_{1p} = f_a(g_{1a}), f_b(e_{2b}) = e_{2p} = f_a(e_{1a}),$
$f_b(g_{3b}) = e_{3p} = f_a(g_{3a})$ and $f_b(e_{4b}) = e_{4p} = f_a(e_{3a})$.
The categorical construction is thus applicable and we are
able to obtain a solution. This paper automates the process
using Z3 [14].
5. Automated Weaving
5.1. Overview
Sequence diagrams capturing the base, advice and point-
cut models are transformed into equivalent textual represen-
tations of their underlying semantics in LES. The trans-
formation is defined at the metamodel level [23], that is,
we have a metamodel representation for sequence diagrams
and for LES, and translate elements of one metamodel into
elements of the other. We treat a pointcut as a simple
sequence diagram, which gives us an indication of the
matching constraints. We then transform the LES models
into equivalent Z3 [14] models. Since LES is a formal
model (essentially a set and relations on elements of this set,
and additional labels) the transformation to Z3 (where we
have first-order logical constraints and functions) is fairly
straightforward. A unique Z3 model is produced for each
LES model. The constraint solver considers the conjunction
of all logical constraints associated to the three models, and
generates a solution which corresponds to the augmented
model in accordance to the semantics of parallel composi-
tion as defined in [21]. If the matching cannot be done Z3
returns unsat (unsatisfiable) which means that no solution
exists. Recall that formally this means that we cannot find
surjective LES morphisms $f_b$ and $f_a$ on which to apply the
categorial construction.
In our approach, all models have to be converted into Z3
specifications, and we focus on the LES to Z3 transforma-
tion step. Transformation rules define the mapping between
a source (LES) and target (Z3) metamodel. A transformation
engine executes the transformation rules on a source model
to generate its equivalent target model.
Z3 supports many types of declarations, such as Integer,
Real and Boolean, as well as allowing users to declare
new sorts (types). Functions in Z3 are the basic building
blocks of SMT formulas. Moreover, functions have no
side effects and are total (i.e., they are defined for any
element in the domain). Z3 is based on first-order logic.
Constants are functions that take no arguments, and we
write $\text{Const}(a,A)$ to declare a constant $a$ of type $A$. In
addition, Z3 supports Boolean operators, such as $\text{And}, \text{Or},$
$\text{Not, Implies (logical implication), and equality }== ($used
for bi-implication) among others. Universal ($\text{ForAll}$) and
existential ($\text{Exists}$) quantifiers are also supported by Z3.
In Z3, it is possible to create a general purpose solver
using $\text{Solver()}$ and associate it to a particular variable by
declaring $s=\text{Solver()}$. Later we can add constraints to $s$
through the method $\text{add()}$. Finally, we can check (solve)
all the constraints associated to a solver by calling method
$\text{check()}$. The result is either $\text{sat}$ (satisfiable, a solution
was found), or $\text{unsat}$ (unsatisfiable, no solution exists).
5.2. Model transformation from LES to Z3
Table 1 shows how the main LES concepts are mapped
onto Z3. In particular, a LES is understood here as the
semantic model for sequence diagrams as discussed in Sec-
tion 3. All main LES notions including events $E_v$, instances
$I$ and messages $M$ have a matching new type of element
in Z3. This corresponds to creating new types called $E_v$,
$I$ and $M$ using $\text{DeclareSort}$ (rules 1,3,6 in Table 1).
Elements of these sets (as event, a message and a lifeline)
are mapped onto constants of the corresponding sort (rules
2,4,7). The set of events in a LES used as a semantic model
for sequence diagrams defines a partition determined by
the set of instances $I$. This is dealt with in Z3 through a
cover function. In particular, if an event $e$ belongs to
an instance $i_1$ it cannot belong to a different instance $i_2$
(rules 5). A message is captured in an LES as a triple
$(e_1, m, e_2)$ such that $\mu(e_1) = (m,s)$ and $\mu(e_2) = (m,r)$
and is captured in Z3 as a function $\text{isMsg}$ that for a triple
$(e_1, m, e_2)$ determines whether it corresponds to a valid
message tuple or not. A message always relates different
events by causality (rule 8).
For each lifeline in the sequence diagram, the transformation generates a constant in Z3.
// events of lifeline : PetrolStation
s.add(cover(e72, PetrolStation))
// events of lifeline : Bank
s.add(cover(e1, Bank))
//events of lifeline : User
s.add(cover(e0, User))
Similarly to what was done for lifelines, for each event in the LES the transformation generates a constant in Z3.
<table>
<thead>
<tr>
<th>LES</th>
<th>Z3</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>Set of events $Ev$</td>
</tr>
<tr>
<td>2</td>
<td>An event $e_1 \in Ev$</td>
</tr>
<tr>
<td>3</td>
<td>Set of instances or lifelines $I$</td>
</tr>
<tr>
<td>4</td>
<td>An instance $i_1 \in I$</td>
</tr>
<tr>
<td>5</td>
<td>$Ev = { e_1 \in Ev }$</td>
</tr>
<tr>
<td>6</td>
<td>Set of messages $M$</td>
</tr>
<tr>
<td>7</td>
<td>$\text{A message } m \in M$</td>
</tr>
<tr>
<td>8</td>
<td>$\text{For } (e_1, m, e_2)$ $\mu(e_1) = (m, s)$</td>
</tr>
<tr>
<td>9</td>
<td>$\text{For } \forall e_2 = (m, r)$ and $e_1 \neq e_2$</td>
</tr>
</tbody>
</table>
| 10 | $\text{Conflict}$
| 11 | $\text{Concurrence } e_1 \text{ co } e_2$
Furthermore, rules 9, 10 and 11 show how the binary relations between events in a LES are captured in Z3 and in accordance to the LES Definition 1. All relations are captured as functions in Z3 with additional constraints. The rules capture directly all the aspects of the formal definition given. For instance rule 9 shows how to define the partial order, that is, the relation is reflexive, antisymmetric and transitive. Rule 10 describes the conflict relation which is irreflexive, symmetric and propagates over causality.
The concurrency relation in an LES (rule 11) represents an additional binary relation between events. Rather than explicitly defining events in concurrency, any two events not related by causality or conflict are concurrent.
To keep it simple we only show the transformation of the advice model of Figure 3(b) and Figure 4. The Z3 model considered is obtained from the LES of Figure 4.
For each lifeline in the sequence diagram, the transformation generates a constant in Z3 as the snippet of code shows below.
Similarly to what was done for lifelines, for each event in the LES the transformation generates a constant in Z3.
// Declaring the lifelines of the advice.
User = Const('User', I)
PetrolStation = Const('PetrolStation', I)
Bank = Const('Bank', I)
I = DeclareSort('I')
// Declaring the lifelines of the advice.
For each message in the model, the transformation generates the following constants in Z3.
M = DeclareSort('M')
// Defining the set of messages in the advice model
...
Withdraw = Const('Withdraw', M)
The relationship between messages and associated send/receive events is given by the function isMsg given in Table 1 (rule 8). For our advice model example, this is as follows.
s.add(isMsg(g1, ValidPin, e1))
s.add(isMsg(g2, EnterFuelAmount, e2))
s.add(isMsg(g3, FuelAmount, e3))
s.add(isMsg(g4, CheckAmount, e4))
s.add(isMsg(g5, StartFuel, e5))
s.add(isMsg(g6, PaymentDeclined, e6))
s.add(isMsg(g7, Withdraw, g7))
s.add(isMsg(l3, BalanceOk, g6))
s.add(isMsg(l5, Cancel, g9))
Next, we define the relations between the events, namely causality and conflict. Concurrency is generated automatically by the solver in accordance to rule 11. The following code shows the User events related by causality and conflict. Further causality and conflict relations between other events are obtained automatically through the rules 9 and 10.
s.add(Next(e0, e1))
s.add(Next(e1, e2))
s.add(Next(e2, e3))
s.add(Next(e3, e4))
s.add(Next(e4, e5))
s.add(Next(e5, e6))
s.add(Next(e6, e7))
s.add(Conflict(e5, e6))
5.3. Aspect weaving
After producing the Z3 code for the advice, base, and pointcut, we need to create Z3 code that relates them together. This involves creating a set of constraints which identify how base model elements are matched to advice elements in accordance to the pointcut.
There is a wide range of interpretations of how pointcuts should be used to match model elements of the base and advice. Wimmer et al. [24] survey some of these interpretations. To produce the Z3 code that glues the advice and base, any chosen interpretation must be formalised. For example, Klein et al. [4] introduce and formalise four interpretations. These four interpretations describe the degree of strictness when trying to detect a set of model elements which relate to another. For example in Figure 3(c), if we are looking for message validPin followed by startFuel between two lifelines, we can be very strict and assume that the only acceptable match for this is to have the two messages appearing consecutively in a diagram. Alternatively, we can be less restrictive and allow a match provided every occurrence of message validPin happens before startFuel irrespective of the behaviour that may occur in between the messages. Klein et al. refer to the later as the general interpretation. Our implementation follows the general interpretation since this is what our categorical construction in [21] does. It is possible to replace this and follow any of the other three alternatives, but we would have to redefine the semantics for the composition in that case. Furthermore, choosing for instance the strict interpretation will not allow weaving of the models depicted in Figure 3.
Formal matching has been described for LES earlier (cf. Section 4) through the surjective morphisms \(f_b\) and \(f_a\) where source and target events have identical labels. We follow the same approach in Z3.
Assume that \(M_1\) and \(M_2\) represent model elements of the base and advice respectively. Model elements of the advice of Figure 3 include validPin and balanceOK. In addition, validPin is also a model element of the base. To distinguish the two we write \(M_1\).validPin and \(M_2\).validPin instead. Matching can be represented as a boolean partial function \(match\) on the cartesian product of the model elements of \(M_1\) and \(M_2\). For instance, \(match(M_1\).validPin, M_2\).validPin\) = true. The value of \(match\) can be obtained from the pointcut model which describes which elements can be matched. The following snippet of Z3 describes the code for matching messages, events and lifelines.
\[
\begin{align*}
MessageMatch & = \text{Function}('MessageMatch', M, M, \text{BoolSort}) \\
EventMatch & = \text{Function}('EventMatch', Ev, Ev, \text{BoolSort}) \\
LifelineMatch & = \text{Function}('LifelineMatch', I, I, \text{BoolSort})
\end{align*}
\]
\[
\text{ForAll} \ ((ei, ej, en, ek, Mi, Mj), \text{Implies}(\text{And} (\text{EventMatch}(ei, ej), \text{EventMatch}(ek, en))))
\]
\[
\text{ForAll} \ ((ei, ej, Li, Lj), \text{Implies}(\text{And} (\text{EventMatch}(ei, ej), \text{LifelineMatch}(Li, Lj)))
\]
Above, the first three lines declare boolean valued functions for equality of messages, events, and lifelines, respectively. The fourth line states that if two messages are matched then their send and receive events are matched as well. As two matched events must belong to matched lifelines, we have the final line. In addition to the above well-definedness criteria, further Z3 code is required to capture the complete definition of the matching morphisms, including how matching preserves configurations (causality and concurrency), and is injective over configurations. One example includes if two events are matched, then any event which follows one of them, will follow the other as well. For space reasons, we omit further rules.
\[
\text{ForAll} \ ((ei, ej, en), \text{Implies}(\text{And} (\text{EventMatch}(ei, ej), \text{Next}(ei, en), \text{Next}(ej, en))))
\]
Finally, it is possible that multiple instances of advice messages are found in the base. For example, consider the scenario that validPin and startFuel appear twice or more in the base. In such cases we may follow the "Per Pointcut Match" strategy introduced in [25] and assume that a new instance of the advice element is introduced
for each pointcut match. This is important because if the repeated occurrences of messages in the base are part of the same configuration, then our morphisms (injective on configurations) force the events associated to these messages to be mapped onto different events in the pointcut.
If all the constraints are satisfiable (outcome of \texttt{s.check()})
the transformation generates a new Z3 model which is a solution representing the result of merging the original models. For our example the model obtained corresponds to the diagram shown in Figure 5 as expected.
If all the constraints are satisfiable (outcome of \texttt{s.check()})
the transformation generates a new Z3 model which is a solution representing the result of merging the original models. For our example the model obtained corresponds to the diagram shown in Figure 5 as expected.
Figure 5: Woven sequence diagram
6. Comparing Z3 and Alloy
We want to compare the performance of our approach using Z3 with our earlier solutions that used Alloy for sequence diagram composition. For that purpose, we ran 12 experiments, divided in two phases. In Phase 1, we used sequence diagrams without combined fragments. The first experiment of Phase 1 consisted of composing two sequence diagrams each with 4 messages and 2 lifelines. Then, in the following experiments, the number of messages was increased until the composition time was very prolonged.
Table 2 shows the Phase 1 experiments in detail. The results illustrate that increasing the number of sequential messages strongly affected Alloy\’s performance.
Overall, this study showed that the maximum number of clauses Alloy can solve is 1753293, taking approximately 3 hours and 10 minutes to produce a solution. However, increasing the number of elements will run out of memory. This is due to the fact that the number of clauses and amount of composition time grows exponentially with respect to the increasing number of sequence diagram elements. The Alloy analyzer is SAT solver-based and SAT-solving time may increase enormously, depending on factors such as the number of variables and the average length of the clause \[12\]. On the other hand, Z3 showed good performance throughout most of the experiment and increasing the number of messages did not produce any significant effect on its performance (less than 1 minute on average - see Figure 7).
According to Nijjar and Bultan \[13\], there are several reasons that explain why Z3 performs better than Alloy. First, Z3 uses many heuristics to eliminate quantifiers in formulas. It uses an E-graph to instantiate quantified variables, code trees, and eager instantiation which makes it very effective at dealing with quantifiers \[26\]. Second, Z3 and Alloy use different implementation languages. For example, Z3 was implemented in C++, while Alloy and its SAT-solver were implemented in Java. Another reason that might make Z3 more efficient is that SMT solvers operate at a higher level of abstraction than SAT solvers. SMT solvers can use information about the \textit{structure} and \textit{semantics} of a formula to make the satisfiability process faster whereas a SAT-based approach converts the model to SAT formulas using a Boolean encoding. Due to the increasing size of the Boolean encoding, we then suffer from an exponential increase in composition time. We observed that the Z3-SMT clauses size is much smaller than the one produced by Alloy, which uses a SAT4J solver (see Figure 7).
Figure 6: Composition time in Z3 and Alloy
(a) Composition time in Z3. (b) Composition time in Alloy.
Figure 7: Number of clauses in Z3 and Alloy for phase 1.
In Phase 2, the experiments tested how combined fragments affected the performance of Alloy and Z3. We adopted one of the Phase 1 examples as a test case, namely Example 5, which already had a performance problem and inserted a combined fragment. The number of nested com-
Combined fragments was then increased until one of the solvers ran out of memory. Table 2 shows that when messages are structured further through combined fragments, the performance of Alloy is strongly affected. This study confirms the conclusion of [13], i.e., that Alloy’s performance is affected by the number of variable clauses. Indeed, with an increasing number of combined fragments the performance of Alloy becomes very slow. For examples 11 and 12, Alloy runs out of memory. Z3, on the other hand, had the conclusion of [13], i.e., that Alloy’s performance is strongly affected. This study confirms the performance of two diagrams. This approach has some similarities to ours, however we use constraint solvers to automatically check consistency of the semantics in the composed model. We have not addressed the order in which aspects are applied, and we do not believe that this will lead to necessarily the same outcome. Whether this can be claimed for a subset of problems remains under investigation.
Klein et al. [4] propose a semantics-based weaver for sequence diagrams. This approach has some similarities to ours, however we use constraint solvers to automatically check consistency of the semantics in the composed model. We have not addressed the order in which aspects are applied, and we do not believe that this will lead to necessarily the same outcome. Whether this can be claimed for a subset of problems remains under investigation.
Reddy et al. [1] use UML sequence diagram templates for describing behaviours of design aspects and use tags for behaviour composition. In their work, an aspect may include position fragments (e.g., begin, end) designating the location to be added in the sequence diagram. Clarke and Walker [27] use UML templates to define aspects. It composes static structural properties of aspects with interaction properties. However, this approach focuses on less complex structures and does not illustrate the methodology of composing interaction models.
Aspects can sometimes be used to model non-functional concerns such as dependability requirements which usually cut across several parts of the system. Regarding the use of AOM for security, [28] presents a method for the analysis of the performance effects of security properties specified as aspects. Moreover, Whittle et al. [7], uses sequence diagrams to model and execute misuse case scenarios(desired and attack scenarios) for secure systems development. Mitigation scenarios are then designed as aspect scenarios and woven into the core behaviour to prevent against the execution of the attack scenarios.
When looking at the integration of several model views or diagrams, Widl et al. [6] deal with composing concurrently evolved sequence diagrams in accordance to the overall behaviour given in state machine models. They make direct use of SAT-solvers for the composition. Liang et al. [29] present a method of integrating sequence diagrams based on the formalisation of sequence diagrams as typed graphs. Both these papers focus on less complex structures. For example, they do not deal with combined fragments, which can potentially cause substantial complexity. Bowles and Boldbar [30] presented a method of mapping a design consisting of class diagrams, OCL constraints and sequence diagrams into a mathematical model for detecting and analysing inconsistencies. It uses the same underlying categorical construction as done in [21] but it has not been automated. On the other hand, Zhang et al. [31] and Rubin et al. [32] use Alloy for the composition of class diagrams. They transform UML class diagrams into Alloy and compose them automatically. They focus on composing static models and the composition code is produced manually.
### Table 2: Experiments.
<table>
<thead>
<tr>
<th>Fragment</th>
<th>Lifelines</th>
<th>Messages</th>
<th>Events</th>
<th>Alloy</th>
<th>Z3</th>
</tr>
</thead>
<tbody>
<tr>
<td>Phase 1</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>1</td>
<td>22</td>
<td>0</td>
<td>2</td>
<td>6</td>
<td>14</td>
</tr>
<tr>
<td>2</td>
<td>28</td>
<td>0</td>
<td>2</td>
<td>8</td>
<td>18</td>
</tr>
<tr>
<td>3</td>
<td>34</td>
<td>0</td>
<td>2</td>
<td>10</td>
<td>22</td>
</tr>
<tr>
<td>4</td>
<td>40</td>
<td>0</td>
<td>2</td>
<td>12</td>
<td>26</td>
</tr>
<tr>
<td>5</td>
<td>46</td>
<td>0</td>
<td>2</td>
<td>14</td>
<td>30</td>
</tr>
<tr>
<td>6</td>
<td>52</td>
<td>0</td>
<td>2</td>
<td>16</td>
<td>34</td>
</tr>
<tr>
<td>7</td>
<td>58</td>
<td>0</td>
<td>2</td>
<td>18</td>
<td>38</td>
</tr>
<tr>
<td>8</td>
<td>64</td>
<td>0</td>
<td>2</td>
<td>20</td>
<td>42</td>
</tr>
</tbody>
</table>
| Phase 2 | | | | | |
| 10 | 47 | 1 | 2 | 14 | 30 | 11163.872 | 1753293 | 14.32 | 285163 |
| 11 | 48 | 2 | 2 | 14 | 30 | Time-out | 2281797 | 17.85 | 393111 |
| 12 | 49 | 3 | 2 | 14 | 30 | Time-out | 2348862 | 23.08 | 409395 |
8. Conclusion
This paper presents an automated method for aspect weaving of scenario-based models. Although considerable research has been done to weave models, many such approaches are not fully automated. By contrast, in this paper we showed how aspects can be woven automatically with the help of constraint solvers. Amongst the available constraint solvers we decided to use the Z3-SMT solver. Moreover, in this approach, matching and weaving is done at the semantic level since we incorporate the semantics of the models into our transformation algorithm to generate Z3 code.
The example shown in the paper focuses on one base model, one advice and one pointcut. Our approach is not restricted to this and works for any number of pairwise composed models. If a solution does not exist it shows that there is an inconsistency between one of the models used. As discussed our weaving follows the general interpretation by [4] which is in accordance to our composition semantics defined in [21].
Finally, our approach should be applicable to a wide range of modelling notation used for design. Although we focus on sequence diagrams and how to capture aspects and aspect weaving, we can similarly use it to compose many different kinds of large static and behaviour models such as class diagrams and state machines.
References
|
{"Source-Url": "http://pure-oai.bham.ac.uk/ws/files/28731881/PID4207563.pdf", "len_cl100k_base": 10407, "olmocr-version": "0.1.53", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 39812, "total-output-tokens": 13329, "length": "2e13", "weborganizer": {"__label__adult": 0.00034928321838378906, "__label__art_design": 0.0004987716674804688, "__label__crime_law": 0.0003685951232910156, "__label__education_jobs": 0.0010213851928710938, "__label__entertainment": 6.92605972290039e-05, "__label__fashion_beauty": 0.0001666545867919922, "__label__finance_business": 0.0002503395080566406, "__label__food_dining": 0.00032830238342285156, "__label__games": 0.0005173683166503906, "__label__hardware": 0.0006866455078125, "__label__health": 0.0004935264587402344, "__label__history": 0.0002849102020263672, "__label__home_hobbies": 0.0001080036163330078, "__label__industrial": 0.000446319580078125, "__label__literature": 0.0003056526184082031, "__label__politics": 0.0002617835998535156, "__label__religion": 0.0004508495330810547, "__label__science_tech": 0.035186767578125, "__label__social_life": 0.00011849403381347656, "__label__software": 0.00601959228515625, "__label__software_dev": 0.951171875, "__label__sports_fitness": 0.00029277801513671875, "__label__transportation": 0.0006203651428222656, "__label__travel": 0.00019919872283935547}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 50526, 0.03801]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 50526, 0.51909]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 50526, 0.89541]], "google_gemma-3-12b-it_contains_pii": [[0, 2513, false], [2513, 7320, null], [7320, 12671, null], [12671, 18530, null], [18530, 21826, null], [21826, 26955, null], [26955, 29734, null], [29734, 35150, null], [35150, 39077, null], [39077, 44138, null], [44138, 50526, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2513, true], [2513, 7320, null], [7320, 12671, null], [12671, 18530, null], [18530, 21826, null], [21826, 26955, null], [26955, 29734, null], [29734, 35150, null], [35150, 39077, null], [39077, 44138, null], [44138, 50526, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 50526, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 50526, null]], "pdf_page_numbers": [[0, 2513, 1], [2513, 7320, 2], [7320, 12671, 3], [12671, 18530, 4], [18530, 21826, 5], [21826, 26955, 6], [26955, 29734, 7], [29734, 35150, 8], [35150, 39077, 9], [39077, 44138, 10], [44138, 50526, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 50526, 0.0828]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
778a4007ca993e321c31406548fcad0401e525cd
|
Java Memory Model Examples: Good, Bad and Ugly
Citation for published version:
Aspinall, D & Ševík, J 2007, Java Memory Model Examples: Good, Bad and Ugly. in Proceedings of Verification and Analysis of Multi-Threaded Java-Like Programs (VAMP 2007).
Link:
Link to publication record in Edinburgh Research Explorer
Document Version:
Peer reviewed version
Published In:
Proceedings of Verification and Analysis of Multi-Threaded Java-Like Programs (VAMP 2007)
General rights
Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s) and / or other copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated with these rights.
Take down policy
The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer content complies with UK legislation. If you believe that the public display of this file breaches copyright please contact openaccess@ed.ac.uk providing details, and we will remove access to the work immediately and investigate your claim.
Java Memory Model Examples:
Good, Bad and Ugly
David Aspinall and Jaroslav Ševčík
August 8, 2007
Abstract
We review a number of illustrative example programs for the Java Memory Model (JMM) [6, 3], relating them to the original design goals and giving intuitive explanations (which can be made precise). We consider good, bad and ugly examples. The good examples are allowed behaviours in the JMM, showing possibilities for non sequentially consistent executions and reordering optimisations. The bad examples are prohibited behaviours, which are clearly ruled out by the JMM. The ugly examples are most interesting: these are tricky cases which illustrate some problem areas for the current formulation of the memory model, where the anticipated design goals are not met. For some of these we mention possible fixes, drawing on knowledge we gained while formalising the memory model in the theorem prover Isabelle [1].
1 Introduction
The Java Memory Model (JMM) [6, 3] is a relaxed memory model which acts as a contract between Java programmers, compiler writers and JVM implementors. It explains possible and impossible behaviours for multi-threaded programs. The JMM is necessary to allow efficient execution and compilation of Java programs, which may result in optimisations that affect the order of memory operations. The case that is usually desirable is sequential consistency (SC) [4], which, roughly speaking, says that the outcome of executing a multi-threaded program should be equivalent to the outcome of executing some sequential ordering of its operations which agrees with the order that statements appear in the program. Sequential consistency has acted as a correctness criterion in the study of relaxed memory models and numerous variations have been explored; we discuss our own precise definition of SC for the JMM later.
Sequential consistency helps make multi-threaded programming comprehensible to the programmer. But for parts of a program which are executing in unrelated threads, sequential consistency may not be required. Moreover, in pursuit of the best possible performances, sophisticated concurrent algorithms have been designed which work in the presence of data races. A data race in a program is a point where the program itself fails to specify an ordering on conflicting actions across threads, so sequential consistency is undefined.
The JMM is one of the first explored memory models which connects a high-level programming language to low-level executions (most other relaxed memory
models work at the hardware level). The JMM has been designed to make three guarantees:
1. **A promise for programmers**: sequential consistency must be sacrificed to allow optimisations, but it will still hold for data race free programs. This is the data race free (DRF) guarantee.
2. **A promise for security**: even for programs with data races, values should not appear “out of thin air”, preventing unintended information leakage.
3. **A promise for compilers**: common hardware and software optimisations should be allowed as far as possible without violating the first two requirements.
The key question one asks about a program is whether a certain outcome is possible through some execution. Unfortunately, with the present JMM, it can be quite difficult to tell the answer to this! In part, the memory model has been designed around a set of examples (e.g., the causality tests in [8]) which have helped shape the definitions; but gaps and informality in the present definitions mean that there are still unclear cases.
Points 1 and 2 act to prohibit certain executions, whereas point 3 acts to require certain executions. It seems that only point 1 provides a precise set of behaviours that are disallowed, i.e., the non-SC behaviours for data race free programs. Regarding point 3, exactly which optimisations must be allowed has been a source of some debate and is still in flux [8, 9]. Regarding point 2, the “out of thin air” requirement has yet to be precisely characterised; we only know of forbidden examples which violate causality to allow behaviours that result in arbitrary values.
This paper discusses some illustrative examples for the Java Memory Model, relating them back to these goals and to the JMM definitions, and in particular, the definitions as we have formalised them [1]. Our contribution is to collect together some canonical examples (including tricky cases), and to explain how they are dealt with by our formal definitions, which represent an improvement and clarification of the official definitions. Despite the intricacies of the JMM definitions, we present the examples at an informal level as far as possible. We also give some opinions on future improvements of the JMM.
The rest of this paper is structured as follows. Section 2 explains intuitively how behaviours are justified in the memory model. Sections 3, 4 and 5 then present the examples: the good (allowed), the bad (prohibited) and the ugly (tricky cases where there is disparity between the JMM design aims and the actual definitions). Appendix A recalls some of the definitions of the JMM in a more precise format for reference; it should be studied by those who seek a complete understanding but can be ignored by a casual reader. Section 6 concludes.
## 2 A Bluffer’s Guide to the JMM
**Motivation.** Before we introduce the memory model, let us examine three canonical examples (from [6]), given in Fig. 1, which illustrate the requirements mentioned above. The programs show statements in parallel threads, operating on thread-local registers ($r_1, r_2, \ldots$) and shared memory locations ($x, y, \ldots$).
Initially $x = y = 0$
\begin{align*}
\text{r1} & := x \\
\text{r2} & := y \\
y & := 1 \\
x & := 1
\end{align*}
Is it possible to get $r1 = r2 = 1$ at the end of an execution?
Figure 1: Examples of legal and illegal executions.
In an interleaved semantics, program A could not result in $r1 = r2 = 1$, because one of the statements $r1 := x$, $r2 := y$ must be executed first, thus either $r1$ or $r2$ must be 0. However, current hardware can, and often does, execute instructions out of order. Imagine a scenario where the read $r1 := x$ is too slow because of cache management. The processor can realise that the next statement $y := 1$ is independent of the read, and instead of waiting for the read it performs the write. The second thread then might execute both of its instructions, seeing the write $y := 1$ (so $r2 = 1$). Finally, the postponed read of $x$ can see the value 1 written by the second thread, resulting in $r1 = r2 = 1$. Similar non-intuitive behaviours could result from simple compiler optimisations, such as common subexpression elimination. The performance impact of disabling these optimisations on the current architectures would be huge; therefore, we need a memory model that allows these behaviours.
However, there are limits on the optimisations allowed—if the programmer synchronises properly, e.g., by guarding each access to a field by a synchronized section on a designated monitor, then the program should only have sequentially consistent behaviours. This is why the behaviour $r1 = r2 = 1$ must be prohibited in program B of Fig. 1.
Even if a program contains data races, there must be some security guarantees. Program C in Fig. 1 illustrates an unwanted “out-of-thin-air” behaviour—if a value does not occur anywhere in the program, it should not be read in any execution of the program. The out-of-thin-air behaviours could cause security leaks, because references to objects from possibly confidential parts of program could suddenly appear as a result of a self-justifying data race. This might let an applet on a web page to see possibly sensitive information, or, even worse, get a reference to an object that allows unprotected access to the host computer.
**JMM framework.** Now we introduce the key concepts behind the JMM. Unlike interleaved semantics, the Java Memory Model has no explicit global ordering of all actions by time consistent with each thread’s perception of time, and has no global store. Instead, executions are described in terms of memory related actions, partial orders on these actions, and a visibility function that assigns a write action to each read action.
An action is a tuple consisting of a thread identifier, an action kind, and a unique identifier. The action kind can be either a normal read from variable $x$, a normal write to $x$, a volatile read from $v$ or write to $v$, a lock of monitor $m$, or an unlock of monitor $m$. The volatile read/write and lock/unlock actions...
are called \textit{synchronisation actions}. An \textit{execution} consists of a set of actions, a \textit{program order}, a \textit{synchronisation order}, a \textit{write-seen} function, and a \textit{value-written} function. The program order ($\leq_{po}$) is a total order on the actions of each thread, but it does not relate actions of different threads. All synchronisation actions are totally ordered by the synchronisation order ($\leq_{so}$). From these two orders we construct a happens-before order of the execution: action $a$ happens-before action $b$ ($a \leq_{hb} b$) if (1) $a$ synchronises-with $b$, i.e., $a \leq_{so} b$, $a$ is an unlock of $m$ and $b$ is a lock of $m$, or $a$ is a volatile write to $v$ and $b$ is a volatile read from $v$, or (2) $a \leq_{po} b$, or (3) there is an action $c$ such that $a \leq_{hb} c \leq_{hb} b$. The happens-before order is an upper bound on the visibility of writes—a read happening before a write should never see that write, and a read $r$ should not see a write $w$ if there is another write happening “in-between”, i.e., if $w \leq_{hb} w' \leq_{hb} r$ and $w \neq w'$, then $r$ cannot see $w$.\footnote{For details, see Defs 2, 4 and 6 in App. A.}
We say that an execution is \textit{sequentially consistent} if there is a total order consistent with the program order, such that each read sees the most recent write to the same variable in that order. A pair of memory accesses to the same variable is called a \textit{data race} if at least one of the accesses is a write and they are not ordered by the happens-before order. A program is \textit{correctly synchronised} (or \textit{data-race-free}) if no sequentially consistent execution contains a data race.
A tricky issue is initialisation of variables. The JMM says
\begin{quote}
The write of the default value (zero, false, or null) to each variable synchronises-with to the first action in every thread \cite{7}\end{quote}
However, normal writes are not synchronisation actions and synchronises-with only relates synchronisation actions, so normal writes cannot synchronise-with any action. For this paper, we will assume that all default writes are executed in a special initialisation thread and the thread is finished before all other threads start. Even this interpretation has problems; we mention them in Sect. 5.
\textbf{Committing semantics.} The basic building blocks are \textit{well-behaved} executions, in which reads are only allowed to see writes that happen before them. In these executions, reads cannot see writes through data races, and threads can only communicate through synchronisation. For example, programs A and C in Fig. 1 have just one such execution—the one, where $r_1 = r_2 = 0$. On the other hand, the behaviours of program B are exactly the behaviours that could be observed by the interleaved semantics, i.e. $r_1 = r_2 = 0$, or $r_1 = 1$ and $r_2 = 0$, or $r_1 = 0$ and $r_2 = 1$. In fact, if a program is correctly synchronised then its execution is well-behaved if and only if it is sequentially consistent. This does not hold for incorrectly synchronised programs, see Sect. 5.
The Java Memory Model starts from a well-behaved execution and \textit{commits} one or more data races from the well-behaved execution. After committing the actions involved in the data races it “restarts” the execution, but this time it must execute the committed races. This means that each read in the execution must be either committed and see the value through the race, or it must see the write that happens-before it. The JMM can repeat the process, i.e., it may choose some non-committed reads involved in a data race, commit the writes involved in these data races if they are not committed already, commit the
chosen reads, and restart the execution. The JMM requires all of the subsequent executions to preserve happens-before ordering of the committed actions.
This committing semantics imposes a causality order on races—the outcome of a race must be explained in terms of previously committed races. This prevents causality loops, where the outcome of a race depends on the outcome of the very same race, e.g., the outcome $r_1 = 1$ in program C in Fig. 1. The DRF guarantee is a simple consequence of this procedure. If there are no data races in the program, there is nothing to commit, and we can only generate well-behaved executions, which are sequentially consistent. In fact, the JMM actually commits all actions in an execution, but committing a read that sees a write that happens before it does not create any opportunities for committing new races, because reads must see writes that happen-before them in any well-behaved execution. Therefore the central issue is committing races, and we explain our examples using this observation.
3 Good executions
The interesting executions are those which are not sequentially consistent, but are legal under the JMM.
Simple reordering. First, we demonstrate the committing semantics on program A in Fig. 1. In the well-behaved execution of this program, illustrated by the first diagram in Fig. 2, the reads of $x$ and $y$ can only see the default writes of 0, which results in $r_1 = r_2 = 0$.
There are two data races in this execution (depicted by the dotted lines, the solid lines represent the happens-before order)—one on $x$ and one on $y$. We can commit either one of the races or both of them. Suppose we commit the race on $y$. In the second diagram we show the only execution that uses this data race; the committed actions are in brackets and the committed read sees the value of (the write in) the data race. The non-committed read sees the write that happens-before it, i.e., the default write. This execution gives the result $r_1 = 0$ and $r_2 = 1$. The JMM can again decide to commit a data race from the execution. There is only one such data race. Committing the data race on $x$ gives the last diagram, and results in $r_1 = r_2 = 1$.
Complex optimisation. The program
Initially \( x = y = 0 \)
\[
\begin{align*}
\text{r1} & := x \\
\text{r2} & := y \\
\text{if} & \ (\text{r1} \neq 0) \\
\quad & x := \text{r2} \\
\quad & y := \text{r1} \\
\text{else} & \\
\quad & y := 1
\end{align*}
\]
Can result in \( r1 = r2 = 1 \) using a committing sequence that is very similar to the previous program. First we commit the race on \( y \) and then the race on \( x \) with value 1. Note that the actions are not tied to instructions in any way—the write to \( y \) is first committed using the \text{else} branch, but it is executed by the \text{then} branch in the final execution.
Although it might seem counter-intuitive, this behaviour could be the outcome of a compiler optimisation. The compiler may realise that the only values in the program are 0 and 1; that the first thread can never write 0 to \( y \); thus it must write 1. As a result, it could replace the if statement with an assignment \( y := 1 \). Then it could reorder this with the statement \( r1 := x \), giving the program:
\[
\begin{align*}
\quad & y := 1 \\
\quad & r2 := y \\
\quad & r1 := x \\
\quad & x := r2
\end{align*}
\]
After the reordering we can get \( r1 = r2 = 1 \) from a SC execution.
## 4 Bad executions
The JMM aims to prohibit out-of-thin-air behaviours, which are usually demonstrated by program C from Fig. 1.
It contains a causality loop, which potentially could explain any value being written and seen by \( r1 \). But the JMM prohibits the result \( r1 = 1 \), for example, because we can only commit a race with the value 0. Even after restarting, all reads must see the value 0, because the non-committed ones can only see the default writes, and the committed ones were committed with value 0.
Another powerful tool for determining (in)validity of executions is the DRF guarantee. For example, this program (from \[6\]):
\[
\begin{align*}
\text{initially} \quad x = y = 0 \\
\text{r1} & := x \\
\text{r2} & := y \\
\text{if} & \ (r1 > 0) \\
\quad & y := 42 \\
\text{if} & \ (r2 > 0) \\
\quad & x := 42
\end{align*}
\]
is data race free and thus it cannot be that \( r1 = 42 \), because the writes of 42 do not occur in any sequentially consistent execution.
## 5 Ugly executions
Here we give examples that either show a bug in the JMM, i.e., behaviours that should be allowed but they are not, or behaviours that are surprising.
1. **Reordering of independent statements.** The first example (from \[2\]) demonstrates that it is not the case that any independent statements can be reordered in the JMM\(^2\) without changing the program’s behaviour:
\[\text{In \[1\], we suggest a weakening of the legality definition that fixes this problem while preserving the DRF guarantee.}\]
Can we get $r_1 = r_2 = r_3 = 1$? This requires that each register read sees a write of 1. The only way to commit a data race on $z$ with value 1 is to commit the races on $x$ and $y$ with values 1, so that the write $z := 1$ is executed. Note that the writes to $x$ and $y$ are committed in the else branch, and the write to $y$ happens-before the write to $x$. However, once we commit the data race on $z$, the first thread must execute the then branch in the restarted execution, which violates the requirement on preservation of ordering of the committed actions. So this outcome is impossible.
If we swap the statements in either branch of the if statement so they are the same, we can justify the result $r_1 = r_2 = r_3 = 1$. This demonstrates that independent instruction reordering introduces a new behaviour of the program, and so is not a legal transformation in general. This falsifies Theorem 1 of [6].
2. Reordering with external actions. Another counterexample to Theorem 1 of [6] shows that normal statements cannot be reordered with “external statements” (i.e. those which generate external actions such as I/O operations).
Here, the result $r_1 = r_2 = 1$ is not possible, because we must commit the printing before committing the data race on $x$. However, after swapping the print with $x := 1$ in the else branch, we can get the value 1 in both the registers by committing the race on $x$ followed by committing the race on $y$. As a result, this reordering is not legal. A fix to this problem has not been proposed yet. 4
3. Roach motel semantics. A desirable property of the memory model is that adding synchronisation to a program could introduce deadlock, but not any other new behaviour. A special case is “roach motel semantics” ([7]), i.e., moving normal memory accesses inside synchronised regions. Another case is making a variable volatile. We now show examples falsifying the claim that the JMM ensures this property.
First, note that this program
4By rule 9 of legality, Def. 7 in the Appendix.
4The apparent fix is to drop rule 9; the consequences of this are unknown, although DRF will be preserved.
cannot result in \( r_1 = r_2 = r_3 = 1 \), because the only way to get a write of 1 into \( y \) is to commit the data race on \( x \) with value 2. Once we commit the read of 2 from \( x \), the read of \( x \) must always see value 2 and the register \( r_1 \) must be 2 in any subsequent execution, so we cannot have \( r_1 = 1 \) in the final execution.
However, if we move the assignment \( r_1 := x \) inside the synchronised block, we can construct an execution where \( x := 2 \) happens-before \( r_1 := x \) using the synchronisation on \( m \), and \( r_1 := x \) sees value 2 (see execution A from Fig. 3). From this execution, we can commit the data race on \( y \) with value 1 without committing the read of \( x \). Then we can restart and commit the data race on \( z \) with value 1, and after another restart of the execution, we let the read of \( x \) see value 1 (execution B from Fig. 3). As a result, we introduce a new behaviour \( r_1 = r_2 = r_3 = 1 \) after moving a normal access into a synchronised block.
Using the same reasoning, if \( x \) is volatile, we can also get \( r_1 = r_2 = r_3 = 1 \) even if \( r_1 := x \) is outside the synchronised block. This demonstrates that making a variable volatile can introduce new behaviours other than deadlock.
4. Reads affect behaviours. In the JMM, reads can affect an execution in perhaps unexpected ways. Unlike an interleaved semantics, removing a redundant read from a program, such as replacing \( r := x; r := 1 \) by \( r := 1 \) can decrease observable behaviours, because the read of \( x \) might have been previously committed and it must be used. As a result, introducing redundant reads to a program is not a legal program transformation.
To demonstrate this, consider the program
\[
\begin{array}{c|c|c|c}
\text{lock } m & \text{lock } m & r_1 := x & r_3 := y \\
x := 2 & x := 1 & \text{lock } m & z := r_3 \\
\text{unlock } m & \text{unlock } m & r_2 := z & \\
& & \text{if } (r_1 == 2) & \\
& & y := 1 & \\
& & \text{else} & \\
& & y := r_2 & \\
& & \text{unlock } m & \\
\end{array}
\]
and the outcome \( r_1 = r_2 = 1 \). This is a possible behaviour of the program—we can commit the race on \( x \) between \( r_3 := x \) and \( x := 1 \), then the race on \( y \) between \( y := 1 \) and \( r_2 := y \), and finally the race on \( z \) with value \( 1 \) to get the result. But if we remove the (redundant) read \( r_4 := x \), we cannot keep the committed race on \( x \) after we commit the race on \( z \) with value \( 1 \).
5. **Causality tests.** In [8], the causality tests 17–20 are wrong—they are not allowed in the JMM contrary to the claim. We illustrate the problem on causality test 17 (all tests are an instance of one problem and we suggest a fix in [1]):
\[
x = y = 0
\]
\[
\begin{array}{l}
r_3 := x \\
if (r_3 != 42) \\
x := 42 \\
r_1 := x \\
y := r_1
\end{array}
\]
The causality test cases state that \( r_1 = r_2 = r_3 = 42 \) should be allowed, because the compiler might realize that no matter what is the initial value of variable \( x \) in the first thread, \( r_1 \) will always be 42, and it can replace the assignment by \( r_1 := 42 \). Then we can get the desired outcome by simple reordering of independent statements. However, there is a subtle bug in the memory model and it does not allow this behaviour (see [1] for more details). This is because rule 7 of legality (Def. 7) is too strong—it requires the reads being committed to see already committed writes in the justifying execution.
One of the causality tests also answers an interesting question: can we always commit actions one-by-one, i.e., each commit set having one more element than the previous one? The answer is negative, as illustrated by the causality test 2:
\[
x = y = 0
\]
\[
\begin{array}{l}
r_1 := x \\
r_2 := x \\
if (r_1 == r_2) \\
y := 1
\end{array}
\]
To get \( r_1 = r_2 = r_3 = 1 \), we must commit both reads of \( x \) at the same time.
6. **Sequential consistency and lock exclusivity.** Our next example execution is considered sequentially consistent by the JMM but does not reflect an interleaved semantics respecting mutual exclusion of locks. The JMM says that an execution is sequentially consistent if there is a total order consistent with
the execution’s program order such that each read sees the most recent write in that order. In [1], we show that this does not capture interleaved semantics with exclusive locks, demonstrated in this program:
<table>
<thead>
<tr>
<th>Initially x = y = z = 0</th>
</tr>
</thead>
<tbody>
<tr>
<td>r1 := y lock m</td>
</tr>
<tr>
<td>x := r1 lock m</td>
</tr>
<tr>
<td>r2 := x y := 1</td>
</tr>
<tr>
<td>z := 1 r3 := z</td>
</tr>
<tr>
<td>unlock m unlock m</td>
</tr>
</tbody>
</table>
By the JMM, it is possible to get $r1 = r2 = r3 = 1$, using the total order `lock m, lock m, y:=1, r1:=y, x:=r1, r2:=x, z:=1, r3:=z, unlock m, unlock m`. It is not hard to show that this the only order of reads and writes that is consistent with the program order and the reads see only the most recent writes in that order. However, this order does not respect mutual exclusion of locks. We believe, therefore, that sequential consistency ought to require existence of a total order consistent with both the program order and the synchronisation order. In [1], we have proved that the DRF guarantee also holds for this definition of SC.
7. Default writes and infinite executions. The writes of default values to variables introduce inconsistencies and surprising behaviours in the JMM. We noted earlier that the definition of the happens-before relation is flawed when default writes are considered carefully. Possible fixes, either by making default writes into synchronisation actions, or by forcing the initialisation thread to finish before all other threads start, conflict with infinite executions and observable behaviours in a subtle way.
For example, any infinite execution of the program (from [1]):
```java
while(true) { new Object() { public volatile int f; }.f++; }
```
must initialize an infinite number of volatile variables before the start of the thread executing the while loop. Then the synchronisation order is not an omega order, which violates the first well-formedness requirement. So the program above cannot have any well-formed execution in Java.
A related problem arises with the notions of observable behaviour and “hung” program given in [7]: in the program that precedes the above loop with an external action, there can only be a finite observable behaviour but the criteria for being “hung” are not met. Because of this, we suggest to restrict the executions to finite ones, and exclude the default write actions from observable behaviours.
8. Well-behaved executions and SC. For justifying legal executions, the JMM uses “well-behaved executions” [6, Section 1.2]. These are executions with certain constraints, in particular, that each read sees a most recent write in the happens-before order. It might be interesting to examine the relationship between the well-behaved executions and the sequentially consistent executions.
Lemma 2 of [6] says that for correctly synchronised programs, an execution is sequentially consistent if and only if it is well-behaved.
However, the following two examples show that these two notions are incomparable for general programs. First, consider the program
<table>
<thead>
<tr>
<th>x = 0</th>
</tr>
</thead>
<tbody>
<tr>
<td>x := 1 r1 := x</td>
</tr>
</tbody>
</table>
and its execution, where \( r_1 = 1 \). This is sequentially consistent, but not well-behaved.
On the other hand, the program
\[
\begin{array}{ll}
\text{lock } m_1 & \text{lock } m_2 \\
y := 1 & y := 2 \\
x := 1 & x := 2 \\
\text{unlock } m_1 & \text{unlock } m_2 \\
\text{lock } m_2 & \text{lock } m_1 \\
r_1 := x & r_2 := x \\
r_3 := y & r_4 := y \\
\text{unlock } m_2 & \text{unlock } m_1
\end{array}
\]
has a well-behaved execution, where \( r_1 = r_4 = 1 \) and \( r_2 = r_3 = 2 \). This result is not possible in any SC execution.
6 Conclusions
We have collected together and explained a set of typical examples for the Java Memory Model, including those good and bad programs used to motivate the definitions, and a set of ugly programs which cause problems for the present JMM definitions. Our aim was to summarise the status of the current JMM, as we understand it, and at the same time make some of the technicalities more accessible than they are in other accounts.
We have explained the examples at a slightly informal level, giving an intuitive description of the checking process one can use to explain why some example is possible (by explaining the commit sequence), or argue why some example is impossible (by explaining that no commit sequence with the desired outcome is possible). We explained the commit sequences in terms of what happens for data races, which is the essential part of the process; this fact is mentioned in Manson’s thesis [5] but not easily gleaned from the text or definitions in other published accounts [6, 3].
The ugly cases are the most interesting; clearly something must be done about them to gain a consistent account. The first hope is that the JMM definitions can be “tweaked” to fix everything. We have suggested several tweaks so far. But, because the definitions have some ad hoc aspects justified by examples on a case-by-case basis, it isn’t clear that it is possible to revise the definitions to meet the case-by-case examples of allowed and prohibited examples, while at the same time satisfy the global property of enabling all easily detectable optimisations of certain kinds. Further examination is required, but it may be that beginning from more uniformly derived semantic explanations (such as the approach of Cenciarelli et al [2]) is a better approach for achieving a more tractable definition.
Although we (naturally!) believe that our own explanations of the examples are accurate, the definitions are quite intricate and other people have made mistakes — perhaps some of the corner cases (such as the causality tests) were valid in previous versions of the model but became broken as it evolved. Therefore it is an obvious desire to have a way to check examples formally and automatically against memory model definitions; in future work we plan to investigate model checking techniques for doing this.
**Related work.** We have cited the source of examples and made some comparisons in the text; the papers in the bibliography also contain ample further pointers into the wider literature on memory models. The key starting point for understanding the present Java Memory Model is the draft journal paper [7]. During the development of the present memory model, many examples have been discussed on the mailing list, as well as in the test cases [8, 9].
**Acknowledgements.** The authors enjoyed discussions on some of the examples in this paper with M. Huisman, G. Petri and P. Cenciarelli. The second author is supported by a PhD studentship awarded by the UK EPSRC, grant EP/C537068. Both authors also acknowledge the support of the EU project MOBIUS (IST-15905).
**References**
A Precise JMM definitions
The following definitions correspond to those in [3, 6], but are mildly reformulated to match the way we have studied them in [1]. We use \( T \) for the set of thread identifiers, ranged over by \( t \); \( M \) for synchronisation monitor identifiers, ranged over by \( m \); \( L \) for variables (i.e., memory locations), ranged over by \( v \) (in examples, \( x, y \), etc.); and \( V \) for values. The starting point is the notion of action.
**Definition 1 (Action)** An action is a memory-related operation; each action belongs to one thread, denoted \( T(a) \). (2) An action has one of the following action kinds:
- volatile read of \( v \in L \),
- volatile write to \( v \in L \),
- lock on monitor \( m \in M \),
- unlock on monitor \( m \in M \),
- normal read from \( v \in L \),
- normal write to \( v \in L \),
- external action.
An action kind includes the associated variable or monitor. The volatile read, write, lock and unlock actions are called synchronisation actions.
**Definition 2 (Execution)** An execution \( E = \langle A, P, \leq_{po}, \leq_{so}, W, V \rangle \), where:
- \( A \subseteq A \) is a set of actions,
- \( P \) is a program, which is represented as a function that decides validity of a given sequence of action kinds with associated values if the action kind is a read or a write,
- the partial order \( \leq_{po} \subseteq A \times A \) is the program order,
- the partial order \( \leq_{so} \subseteq A \times A \) is the synchronisation order,
- \( W \in A \Rightarrow A \) is a write-seen function. It assigns a write to each read action from \( A \), the \( W(r) \) denotes the write seen by \( r \), i.e. the value read by \( r \) is \( V(W(r)) \). The value of \( W(a) \) for non-read actions \( a \) is unspecified,
- \( V \in A \Rightarrow V \) is a value-written function that assigns a value to each write from \( A \), \( V(a) \) is unspecified for non-write actions \( a \).
**Definition 3 (Synchronizes-with)** In an execution with synchronisation order \( \leq_{so} \), an action \( a \) synchronises-with an action \( b \) (written \( a <_{sw} b \)) if \( a \leq_{so} b \) and \( a \) and \( b \) satisfy one of the following conditions:
- \( a \) is an unlock on monitor \( m \) and \( b \) is a lock on monitor \( m \),
- \( a \) is a volatile write to \( v \) and \( b \) is a volatile read from \( v \).
**Definition 4 (Happens-before)** The happens-before order of an execution is the transitive closure of the composition of its synchronises-with order and its program order, i.e. \( \leq_{hb} = (\leq_{sw} \cup \leq_{po})^+ \).
**Definition 5 (Sequential validity)** We say that a sequence \( s \) of action kind-value pairs is sequentially valid with respect to a program \( P \) if \( P(s) \) holds.
Definition 6 (Well-formed execution) We say that an execution \( \langle A, P, \leq_{po}, \leq_{so}, W, V \rangle \) is well-formed if
1. \( \leq_{po} \) restricted on actions of one thread is a total order, \( \leq_{po} \) does not relate actions of different threads.
2. \( \leq_{so} \) is total on synchronisation actions of \( A \).
3. \( \leq_{so} \) is an omega order, i.e. \( \{ y \mid y \leq_{so} x \} \) is finite for all \( x \).
4. \( \leq_{so} \) is consistent with \( \leq_{po} \), i.e. \( a \leq_{so} b \land b \leq_{po} a \Rightarrow a = b \).
5. \( W \) is properly typed: for every non-volatile read \( r \in A, W(r) \) is a non-volatile write; for every volatile read \( r \in A, W(r) \) is a volatile write.
6. Locking is proper: for all lock actions \( l \in A \) on monitors \( m \) and all threads \( t \) different from the thread of \( l \), the number of locks in \( t \) before \( l \) in \( \leq_{so} \) is the same as the number of unlocks in \( t \) before \( l \) in \( \leq_{so} \).
7. Program order is intra-thread consistent: for each thread \( t \), the sequence of action kinds and values\(^5\) of actions performed by \( t \) in the program order \( \leq_{po} \) is sequentially valid with respect to \( P \).
8. \( \leq_{so} \) is consistent with \( W \): for every volatile read \( r \) of a variable \( v \) we have \( W(r) \leq_{so} r \) and for any volatile write \( w \) to \( v \), either \( w \leq_{so} W(r) \) or \( r \leq_{so} w \).
9. \( \leq_{hb} \) is consistent with \( W \): for all reads \( r \) of \( v \) it holds that \( r \not\leq_{hb} W(r) \) and there is no intervening write \( w \) to \( v \), i.e. if \( W(r) \leq_{hb} w \leq_{hb} r \) and \( w \) writes to \( v \) then\(^6\) \( W(r) = w \).
Definition 7 (Legality) A well-formed execution \( \langle A, P, \leq_{po}, \leq_{so}, W, V \rangle \) with happens before order \( \leq_{hb} \) is legal if there is a “committing” sequence of sets of actions \( C_i \) and well-formed “justifying” executions \( E_i = \langle A_i, P_i, \leq_{po,i}, \leq_{so,i}, W_i, V_i \rangle \) with happens-before \( \leq_{hb} \) and synchronises-with \( \leq_{sw,i} \), such that \( C_0 = \emptyset, C_{i-1} \subseteq C_i \) for all \( i > 0 \), \( \bigcup C_i = A \), and for each \( i > 0 \) the following rules are satisfied:
1. \( C_i \subseteq A_i \).
2. \( \leq_{hb}, |C_i| \leq_{hb} |C_{i-1}| \).
3. \( \leq_{so_i}, |C_i| \leq_{so} |C_{i-1}| \).
4. \( V_i|C_i = V|C_{i-1} \).
5. \( W_i|C_{i-1} = W|C_{i-1} \).
6. For all reads \( r \in A_i - C_{i-1} \) we have \( W_i(r) \leq_{hb} r \).
7. For all reads \( r \in C_i - C_{i-1} \) we have \( W_i(r) \in C_{i-1} \) and \( W(r) \in C_{i-1} \).
\(^5\)The value of an action \( a \) is \( V(a) \) if \( a \) is a write, \( V(W(a)) \) if \( a \) is a read, or an arbitrary value otherwise.
\(^6\)The Java Specification omits the part “\( W(r) = w \)”, which is clearly wrong since happens-before is reflexive.
8. Let’s denote the edges in the transitive reduction of \( \leq_{hb} \) without all edges in \( \leq_{po} \) by \(<_{ssw}\). We require that if \( x <_{ssw} y \leq_{hb} z \) and \( z \in C_i - C_{i-1} \), then \( x <_{sw} y \) for all \( j \geq i \).
9. If \( x \) is an external action, \( x \leq_{hb} y \), and \( y \in C_i \), then \( x \in C_i \).
Note that although the definition of legality does not mention the term “well-behaved execution” directly, rule 6 of legality ensures that the first justifying execution \( E_1 \) is well-behaved.
**Definition 8 (Sequential consistency)** An execution is sequentially consistent if there is a total order consistent with the execution’s program order and synchronisation order such that every read in the execution sees the most recent write in the total order.
**Definition 9 (Conflict)** An execution has a conflicting pair of actions \( a \) and \( b \) if both access the same variable and either \( a \) and \( b \) are writes, or one of them is a read and the other one is a write.
**Definition 10 (DRF)** A program is data race free if in each sequentially consistent execution of the program, for each conflicting pair of actions \( a \) and \( b \) in the execution we have either \( a \leq_{hb} b \) or \( b \leq_{hb} a \).
|
{"Source-Url": "https://www.research.ed.ac.uk/portal/files/25105628/jmmexamples.pdf", "len_cl100k_base": 9960, "olmocr-version": "0.1.49", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 47236, "total-output-tokens": 11384, "length": "2e13", "weborganizer": {"__label__adult": 0.0003421306610107422, "__label__art_design": 0.00022172927856445312, "__label__crime_law": 0.0003561973571777344, "__label__education_jobs": 0.0003695487976074219, "__label__entertainment": 4.792213439941406e-05, "__label__fashion_beauty": 0.00013494491577148438, "__label__finance_business": 0.00015878677368164062, "__label__food_dining": 0.0003230571746826172, "__label__games": 0.0005669593811035156, "__label__hardware": 0.0011005401611328125, "__label__health": 0.0004148483276367187, "__label__history": 0.00019276142120361328, "__label__home_hobbies": 7.301568984985352e-05, "__label__industrial": 0.0003502368927001953, "__label__literature": 0.00024139881134033203, "__label__politics": 0.0002779960632324219, "__label__religion": 0.0004658699035644531, "__label__science_tech": 0.0136260986328125, "__label__social_life": 6.151199340820312e-05, "__label__software": 0.003986358642578125, "__label__software_dev": 0.9755859375, "__label__sports_fitness": 0.00029969215393066406, "__label__transportation": 0.0004892349243164062, "__label__travel": 0.0001627206802368164}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 40176, 0.02675]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 40176, 0.53152]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 40176, 0.89666]], "google_gemma-3-12b-it_contains_pii": [[0, 1126, false], [1126, 3659, null], [3659, 6793, null], [6793, 9761, null], [9761, 13527, null], [13527, 15768, null], [15768, 18489, null], [18489, 20630, null], [20630, 22716, null], [22716, 24907, null], [24907, 27992, null], [27992, 30871, null], [30871, 33119, null], [33119, 35926, null], [35926, 38885, null], [38885, 40176, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1126, true], [1126, 3659, null], [3659, 6793, null], [6793, 9761, null], [9761, 13527, null], [13527, 15768, null], [15768, 18489, null], [18489, 20630, null], [20630, 22716, null], [22716, 24907, null], [24907, 27992, null], [27992, 30871, null], [30871, 33119, null], [33119, 35926, null], [35926, 38885, null], [38885, 40176, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 40176, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 40176, null]], "pdf_page_numbers": [[0, 1126, 1], [1126, 3659, 2], [3659, 6793, 3], [6793, 9761, 4], [9761, 13527, 5], [13527, 15768, 6], [15768, 18489, 7], [18489, 20630, 8], [20630, 22716, 9], [22716, 24907, 10], [24907, 27992, 11], [27992, 30871, 12], [30871, 33119, 13], [33119, 35926, 14], [35926, 38885, 15], [38885, 40176, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 40176, 0.03817]]}
|
olmocr_science_pdfs
|
2024-11-26
|
2024-11-26
|
c2f4b9412b2557c0b250f290d057e8d29283c782
|
Towards Practical, Precise and Parametric Energy Analysis of IT Controlled Systems
Bernard van Gastel Marko van Eekelen
Faculty of Management, Science and Technology, Open University of the Netherlands, Heerlen, The Netherlands
Institute for Computing and Information Sciences, Radboud University, Nijmegen, The Netherlands
{Bernard.vanGastel,Marko.vanEekelen}@ou.nl
Energy consumption analysis of IT-controlled systems can play a major role in minimising the overall energy consumption of such IT systems, during the development phase, or for optimisation in the field. Recently, a precise energy analysis was developed, with the property of being parametric in the hardware. In principle, this creates the opportunity to analyse which is the best software implementation for given hardware, or the other way around: choose the best hardware for a given algorithm.
The precise analysis was introduced for a very limited language: ECA. In this paper, several important steps are taken towards practical energy analysis. The ECA language is extended with common programming language features. The application domain is further explored, and threats to the validity are identified and discussed. Altogether, this constitutes an important step towards analysing energy consumption of IT-controlled systems in practice.
1 Introduction
Energy analysis of IT systems is an important emerging field. Its focus is on analysing the software that controls the IT system using models of the components of the system under analysis. Components can vary from small components such as a sensor in the Internet of Things to large subsystems as present in self-driving cars.
As traditionally many savings did occur on the hardware side of a computer, energy consumption is almost a blind spot when developing software. Each next hardware generation consumed less energy to perform the same amount of work. However, recently this development has lost its pace. At the same time, it becomes more and more clear that software has a huge impact on the behaviour and the properties of devices it runs on. A recent example of software influencing the working of a device is the Volkswagen scandal. The car manufacturer used software to detect if the car was being tested. If this was found to be the case, the diesel motor was programmed to operate in such a way that it exhausted less toxic gases and fumes. In [13] it is calculated that 44,000 years of human life are lost in Europe because of the fraud, which lasted at least six years. Another example is fridges from Panasonic, which could detect if a test was going on and suppressed energy intensive defrost cycles during this test. These are negative examples, but they do make clear that the software is in control of the device and its (energy) behaviour.
Although the software is evidently in control of the devices, there is almost no time dedicated in most computer science curricula to the energy efficiency of software. This is peculiar since energy is of vital importance to the modern (software) industry. For years, data centres have been located at places where the energy is cheap, and since the rise of the smartphone more software engineers recognise that to get good user reviews, their software should not rapidly deplete the battery charge of the user’s phone. Due to this lack of educational attention to energy-aware programming, most aspiring programmers never learn to produce energy efficient code. Software engineers have trouble assessing how much energy will be consumed by their software on a target device, especially when the software is run on a multitude of different systems. With the advent of the internet of things, where software is increasingly embedded in our daily life, the software industry should become aware of their energy footprint, and methods must be developed to assist in reducing this footprint.
Furthermore, the combination of many individual negative effects can also affect our society at large. Although this effect is less direct, it is no less essential. If devices that are present in large quantities in our society all exhibit the same negative behaviour, such as incurring needlessly a too high energy consumption, they can impact public utilities and our economy and will consume the finite resources of Earth even faster. Governments increasingly recognise this societal effect, as indicated by the new laws in the European Union issuing ecodesign requirements for many kinds of devices. One of the aims of these requirements is to make devices more energy efficient. Examples of product categories with ecodesign requirements include vacuum cleaners, electrical motors, lighting, heaters, cooking appliances, televisions and coffee machines. Even requirements leading to relative small improvements in energy efficiency can yield large results at scale, even in the case of devices of which one would expect no significant electricity savings to be possible.
Modern devices and appliances are controlled by software, which makes analysing the energy consumption challenging of these devices, as the behaviour of its software is difficult to predict. To analyse the consumption of hardware, the software controlling the hardware needs to be analysed together with the hardware.
**Our approach** To this end, we proposed in [5] a hybrid approach, joining energy behaviour models of the hardware with the energy-aware semantics of software and a program transformation. The interface between hardware and software is made explicit and has to be well defined, allowing for exchanging of hardware or software components. Using this parametric approach, multiple implementations can be analysed. Such an approach can be used on design level or for optimisation. One can e.g. choose the best software implementation for given hardware, or the other way around: choose the best hardware for a given algorithm.
The described approach derives energy consumption functions. These energy functions signify the exact energy behaviour when the software is executing and controlling the modelled hardware. Hardware is modelled as a finite state machine, with both the states and transitions labelled with energy consumption. The programs that can be analysed are written in the software language ECA, which is an imperative language inspired by C and Java. Currently, only a limited set of program constructs is supported.
The most important contributions of this article are:
- extended support for common language features in the software to be analysed: adding types, data structures, global variables, and recursion;
• description of application domain of the ECA energy analysis method;
• identified threats to the validity of proposed approach and a discussion of how to deal with these threats.
Overview Section 2 introduces the ECA energy analysis. In section 3 extensions of the ECA language are defined including the new derivation rules that are needed for the analysis. Section 4 explores the application domain of ECA. The validity of the results of the analysis is discussed in section 5. Finally, we conclude with related work, future work and conclusions in sections 6 and 7.
2 Introduction to energy analysis with ECA
Energy analysis combines hardware modelling with the energy-aware semantics of software. To this end the language ECA is specified, on which our analysis is targeted. Based on this, a semantics of this language can be defined, which includes energy consumption. Using this energy-aware semantics, a program transformation is given. This transformation generates an executable model, using a parametric function. If both a concrete input and one or multiple hardware models are specified, the parametric function will result in the energy consumption occurring when running the software on the given hardware.
To illustrate this process, we start with the hardware modelling and continue with describing for some program constructs the semantic rules, the program transformations and the effect on hardware. This is an introductory overview. For further details, the reader is referred to [5, 6, 10].
The hardware conceptually consists of a component state and a set of component functions which operate on the component state. We use finite state models to model these hardware components, with the transitions constituting function calls on the hardware. Energy usage is expressed by labelling both the vertices and edges with energy consumption, which can be in any unit. Labels on vertices constitute time-bound energy consumption, i.e. power draw. Edges are labelled with the consumption of a certain amount of energy, not time-bound but corresponding to the transition. Depending on your needs, you can model energy consumption in one way or the other. Besides the ones above described, there are no additional requirements. We use the power draw function \( \phi \) which translates a component state to a power draw. The result of this function is used to calculate energy consumption for the time spent in a specific state.
All transitions in a component model are explicit in the ECA source code. We use the notation \( C::f \) to refer to a function \( f() \) operating on a component \( C \). Multiple different hardware components can be used simultaneously from the same program, the components are differentiated by a unique name (substituted in the rules for \( C \)). Besides this addition, the ECA language is a fairly default imperative language sporting functions, a single signed integer type, conditionals, looping constructs and expressions that can be used as statements.
Next, we move on to the semantics, for now without energy added. Besides the function environment \( \Delta \) and the program state \( \sigma \), we have the (hardware) component states \( \Gamma \). This makes the effect on the hardware explicit. The effect of the component function \( C::f \) is split into two: the effect function \( \delta_{C::f} \) and a function \( \text{rv}_{C::f} \) calculating the return value of the component call. Both are working on the component
state retrieved from $\Gamma$ (by $\Gamma(C)$). The straightforward semantic definition of the component function $s\text{Cmp}$, not taking energy into account, is given below.
$$
\Delta, C::f = (\delta_{C::f}, \text{rv}_{C::f}) \vdash C::f(e_1) \Rightarrow (\text{rv}_{C::f}(\Gamma'(C), a) \cdot \sigma', \Gamma'[C \leftarrow \delta_{C::f}(\Gamma'(C), a)])
$$
Adding to it, the energy cost of a component function call consists of including the time taken to execute this function and the explicit energy cost attributed to this call resulting in rule $es\text{Cmp}$.
$$
\Delta, \Phi \vdash (e_1, \sigma, \Gamma) \Rightarrow (a, \sigma', \Gamma', E')
$$
The approach works by transforming these semantic rules into higher order expressions. When executed on a concrete program state $P\text{State}$ and component state $C\text{State}$, this expression yields the energy consumption (and new states and possible value of an ECA expression). Expressions from the ECA language are transformed into rules that result in a tuple of three elements: a value function $V$, a state update function $\Sigma$ (for both program state and the hardware state), and an energy consumption function $E$. Statements from the ECA language are only transformed into the latter two.
These compositional expressions are composed with higher order combinators. One of these combinators is the composition operator $>>>$, which first applies the left-hand side, and on the resulting state applies the right-hand side. Another is the $\mp$ operator, which is a higher order addition operator: when executed, it calculates the energy consumptions of the two operands based on the input states and adds them together.
To explain the component call rule $bt\text{Cmp}$ and function call rule $bt\text{Call}$, we need an operator for higher order scoping. This operator creates a new program environment but retains the component state. It can even update the component state given a $\Sigma$ function, which is needed because this $\Sigma$ needs to be evaluated.
using the original program state. The definition is as follows:
\[
[x \mapsto V, \Sigma] : \text{Var} \times (\text{PState} \times \text{CState} \to \text{Value}) \\
\times (\text{PState} \times \text{CState} \to \text{PState} \times \text{CState}) \\
\to (\text{PState} \times \text{CState} \to \text{PState} \times \text{CState})
\]
\[
[x \mapsto V, \Sigma](ps, cs) = ([x \mapsto V(ps, cs)], cs') \quad \text{where} \quad (\_, cs') = \Sigma(ps, cs)
\]
We also need an additional operator split, because the program state is isolated, but the component state is not. The split function forks the evaluation into two state update functions and joins the results together. The first argument defines the resulting program state; the second defines the resulting component state.
\[
\Delta \vdash e : \langle V_{ex}, \Sigma_{ex}, \ldots \rangle \\
\Delta, C::f = (x_f, V_f, \Sigma_f, \ldots, \ldots) \vdash C::f(e) : \langle [x_f \mapsto V_{ex}, \Sigma_{ex}] >>> V_f, \text{split}(\Sigma_{ex}, [x_f \mapsto V_{ex}, \Sigma_{ex}] >>> \Sigma_f), \ldots \rangle
\]
The environment $\Delta$ is extended for each component function $C::f$ with two elements: an energy judgment $E_f$ and a run-time $t_f$. Time independent energy usage can be encoded into this $E_f$ function. For functions defined in the language, the derived energy judgement is inserted into the environment. Using the patterns described above the component function call is expressed as:
\[
\Delta \vdash e : \langle V_{ex}, \Sigma_{ex}, E_{ex} \rangle \\
\Delta, C::f = (x_f, \ldots, E_f, t_f) \vdash C::f(e) : \langle \ldots, \ldots, E_{ex} \vdash (\Sigma_{ex} >>> (t_{def}(t_f) \mathbin{\div} E_f)) \rangle
\]
This concludes the short introduction to energy analysis with ECA. For a more thorough coverage, see [5, 6, 10].
3 Increasing the expressivity of ECA
To bridge the gap between practical programming languages and ECA, several extensions to ECA are introduced in this section: adding data structures and types, global variables and recursion.
3.1 Adding data structures and types
The only supported type in the ECA language was a signed integer. There were no explicit Booleans, floating point numbers or data structures. To add those, types of variables need to be supported. We need multiple modifications for this change: modifying the grammar, and adding type distinction to both the semantic environments and program transformations.
We consider variables to be passed \textit{by-value}. Functions can have side effects on the components and, as introduced in section 3.2, on the global variables. Functions are statically scoped. \textit{Recursion is now supported}, and the changes needed to the semantic rules and program transformations are discussed in section 3.3.
The extended BNF grammar for the ECA language is defined in listing 2. We presume there is a way to differentiate between identifiers that represent variables ⟨var⟩, function names ⟨fun-name⟩, components ⟨component⟩, and constants ⟨const⟩.
Functions on components can now have a variable number of arguments and optionally return a value (if not, the type void should be used). A constructor for data structures is included, with a syntax like a function call with as name the type of the data structure), and a construct to access fields of a data structure (with the . operator). A type checking phase is now needed to detect typing errors, like using a data structure as a condition in the if construct. Only correctly typed programs are considered. The language retains an explicit construct for operations on hardware components (e.g. memory, storage or network devices). The notation C::f refers to a function f () operating on a component C. This allows us to reason about components in a straightforward manner.
```
⟨program⟩ ::= ⟨struct-def⟩ (program) | ⟨fun-def⟩ (program) | ⟨type⟩ ⟨var⟩ '=' ⟨expr⟩ | ε
⟨struct-def⟩ ::= 'struct' ⟨struct-name⟩ 'begin' ⟨struct-fields⟩ 'end'
⟨struct-fields⟩ ::= ⟨type⟩ ⟨field-name⟩ ';' ⟨struct-fields⟩ | ε
⟨type⟩ ::= 'void' | 'bool' | 'int' | 'float' | ⟨struct-name⟩
⟨fun-def⟩ ::= ⟨type⟩ ⟨fun-name⟩ '(' ⟨[fun-args]⟩ ')' 'begin' ⟨expr⟩ 'end'
⟨fun-args⟩ ::= ⟨type⟩ ⟨name⟩ ',' ⟨fun-args⟩ | ⟨type⟩ ⟨name⟩
⟨bin-op⟩ ::= '+' | '-' | '*' | '>' | '>=' | '=' | '!=' | '<=' | '<' | 'and' | 'or'
⟨expr⟩ ::= ⟨const⟩ | ⟨var⟩ | ⟨expr⟩ ⟨bin-op⟩ ⟨expr⟩
| ⟨struct-name⟩ '(' ⟨args⟩ ')' | ⟨expr⟩ '.' ⟨field-name⟩ | ⟨type⟩ ⟨var⟩ '=' ⟨expr⟩ | ⟨var⟩ '=' ⟨expr⟩ | ⟨component⟩ ':' ⟨fun-name⟩ '(' ⟨[args]⟩ ')' | ⟨fun-name⟩ '(' ⟨[args]⟩ ')' | ⟨stmt⟩ '.' ',' ⟨expr⟩
⟨args⟩ ::= ⟨expr⟩ ',' ⟨args⟩ | ⟨expr⟩
⟨stmt⟩ ::= 'skip' | ⟨stmt⟩ ';' ⟨stmt⟩ | ⟨expr⟩
| 'if' ⟨expr⟩ 'then' ⟨stmt⟩ ['else' ⟨stmt⟩] 'end'
| 'repeat' ⟨expr⟩ 'begin' ⟨stmt⟩ 'end'
| 'while' ⟨expr⟩ 'begin' ⟨stmt⟩ 'end'
```
Listing 2: Extended BNF grammar for the ECA language, with types and data structures added, as well as one construct in ⟨program⟩ for global variable support (see next section).
A typical (predictive recursive descent) parser of this extended language is in the LL(2) class of parsers, with a small second pass. This second pass is needed, to avoid a possible infinite lookahead that is needed to differentiate between expressions and statements. During the first phase expressions and statements are combined into one construct. The small post-processing step differentiates between the two. In this way, the language can still be efficiently parsed in a simple manner.
Next are the adjustments to the semantic rules. Because a type checking phase was added, no typing error can occur when applying the semantic rules. Although the meaning differs, the syntax of the rules remains largely the same. Likewise, we adjust the program transformation rules. To support the new grammar rules, we add additional rules to the existing body of rules. Below is the rule for field access on a variable listed.
\[
\Delta; \Phi \vdash \langle e, \sigma, \Gamma \rangle \xrightarrow{\text{esField}} \langle v, \sigma', \Gamma', E' \rangle
\]
### 3.2 Global variables
Control software often works with global variables. To support analysis of this control software we consequently need support in our ECA language for global variables. Hardware components are already handled as global state. To also support global variables, we need to introduce an additional global program state environment in addition to the local program state environment as it is currently used.
In the semantics, an additional global program state \( G \) is added to all tuples in every rule. Lookups are first performed in the local scope, the already existing \( \sigma \). Although for scoping a layered program state can be preferred, or one based on indirections, we use a different approach. Because of special handling of the, by definition global, hardware components, the global program state is handled in the same manner as the global hardware component states. We add rules in the semantics for global variable definitions. The assignment rule is split depending if you assign a global or local variable. The variable loop is adjusted to first look in the local scope and if nothing is found, continue in the global scope. We start with introducing the global assignment rule.
\[
\Delta; \Phi \vdash \langle e_1, \sigma, G, \Gamma \rangle \xrightarrow{\text{esGlobAssign}} \langle n, \sigma', G', \Gamma', E' \rangle
\]
Next, we adjust the variable lookup rule, with a \( \cup \) defined on two program environments. This \( \cup \) creates one environment, according to the scoping rules. It a variable is defined in both, the left-hand argument to the \( \cup \) is normative.
\[
\Delta; \Phi \vdash \langle x := e_1, \sigma, G, \Gamma \rangle \xrightarrow{\text{esVar}} \langle n, \sigma', G' \cup [x \leftarrow n], \Gamma', E' + \Phi(\Gamma') \cdot t_{\text{assign}} \rangle
\]
properties. As an example, the higher order scoping rule of section 2 is redefined below.
\[
[x \mapsto V, \Sigma] : \text{Var} \times (\text{PState} \times \text{GState} \to \text{Value}) \\
\times (\text{PState} \times \text{GState} \to \text{PState} \times \text{GState}) \\
\to (\text{PState} \times \text{GState} \to \text{PState} \times \text{GState})
\]
\[
[x \mapsto V, \Sigma](ps, gs) = ([x \mapsto V(ps, gs)], gs') \text{ where } (\_, gs') = \Sigma(ps, gs)
\]
The lookup function that the variable lookup rule depends on is redefined as follows:
\[
\text{lookup}_x : \text{PState} \times \text{GState} \to \text{Value}
\]
\[
\text{lookup}_x(ps, gs) = ps(x) \quad \text{if } x \text{ exists in local program scope } ps
\]
\[
\text{lookup}_x(ps, gs) = \text{Variables}(gs)(x) \quad \text{if } x \text{ exists in the global variables part of } gs
\]
### 3.3 Adding recursion
We can define the function call in a similar way as the component call, which was introduced in section 2. However, to support recursion, a special subst higher order function is introduced to unfold the function definition once, just before it is executed on a concrete environment. The subst is defined as follows, with PState signifying a program state, GState signifying the global state (extended in section 3.2 to be both the component states and the global variables) and \(T\) a type variable (depending on whether a state update or a value function is substituted):
\[
\text{subst} : (\text{PState} \times \text{GState} \to T) \\
\times (\text{PState} \times \text{GState} \to T) \\
\to (\text{PState} \times \text{GState} \to T)
\]
\[
\text{subst}(T, R)(ps, gs) = (T[R \leftarrow \text{subst}(T, R)])(ps, gs)
\]
A recursive call is represented by the abstract higher-order function \(\text{rec}\), which is a placeholder for applying substitution on. There are multiple variants, depending on the resulting type, with the \(\text{rec}_V\) one for a resulting value function, and the \(\text{rec}_\Sigma\) one for a resulting state update function.
\[
\text{rec}_V : \text{PState} \times \text{GState} \to \text{Value}
\]
\[
\text{rec}_\Sigma : \text{PState} \times \text{GState} \to \text{PState} \times \text{GState}
\]
If there is a function body \(B\) computing a Value with for example \(\text{rec}_V\) in it, the value of the recursive function can be computed by executing \(\text{subst}(B, \text{rec}_V)\). As long as the original function terminates on the given input environment, this analysis will terminate on the same input. This is the essential difference from the \(\text{btCmp}\) rule, as can be seen in the definition of \(\text{btCall}\) below:
\[
\Delta, f = (x_f, V_f, \Sigma_f) \vdash e : (V_{ex}, \Sigma_{ex}) \\
\Delta^v, f = (x_f, V_f, \Sigma_f) \vdash f(e) : ([x_f \mapsto V_{ex}, \Sigma_{ex}] \lll \lll \text{subst}(V_f, \text{rec}_V(f)), \lll \lll \text{split}(\Sigma_{ex}, [x_f \mapsto V_{ex}, \Sigma_{ex}]) \lll \lll \text{subst}(\Sigma_f, \text{rec}_\Sigma(f)))
\]
For each language function, a definition is placed in $\Delta^v$ using the $btFuncDef$ rule. The body of the function is analysed, and recursive calls to the function are replaced with rec placeholders using the $btRec$ rule. To support this, the function definition rule inserts a special definition in the function environment $\Delta^v$, on which the $btRec$ rule works. This leads to the following definition of $btFuncDef$, with $P$ the remaining program definition:
\[
\frac{\Delta^v, f = (x) \vdash e : \langle \text{Vex}, \Sigma_{\text{ex}} \rangle}{\Delta^v \vdash \text{function } f(x) \begin{array}{l} \text{begin} \hfill \end{array} e \hfill \end{array} \begin{array}{l} \text{end} \end{array} P : \Sigma_{\text{st}}}
\]
The placeholders are inserted using the $btRec$ rule. This rule analyses the expression used as the argument, like the component and function call rules do. The definition is in fact very similar to those definitions:
\[
\frac{\Delta^v, f = (xy) \vdash e : \langle \text{Vex}, \Sigma_{\text{ex}} \rangle}{\Delta^v \vdash f(e) : \langle [xy \mapsto \text{Vex}, \Sigma_{\text{ex}}] \%\% \text{rec}_V(f) \rangle . \text{split}(\Sigma_{\text{ex}}, [xy \mapsto \text{Vex}, \Sigma_{\text{ex}}] \%\% \text{rec}_S(f))}
\]
4 Exploring the application domain of ECA
The foreseen application area of the proposed analysis is in predicting the energy consumption of control systems, where software controls peripherals. This includes control systems in factories, cars, aeroplanes, smart-home applications, etc. Examples of hardware components range from heaters to engines, motors and urban lighting. Depending on the target device energy consumption can be electricity, gas, water, or any other resources where the consumption increases monotonically. The proposed analysis can predict the energy consumption of multiple algorithms and different hardware configurations. The choice of algorithm or configuration may depend on the expected workload. This makes the proposed technique useful for both programmers and operators. Below, we discuss the application domain of ECA in a way which is partly and informally published in the lecture notes for the TACLe PhD summer school in 2016 in Yspertal, Austria [7].
The possibility to abstract from the actual hardware specification makes the proposed approach still applicable even when no final hardware component is available for basing the hardware model on, or when such a model is not yet created. We observe that many decisions are based on relative properties between systems. Abstracting hardware models can be used to focus e.g. on the relative differences between component methods and component states.
Compared to the Hoare logic in [10], many restrictions are not present in ECA. Foremost, this type system does not have the limitation that state change cannot depend on the argument of a component function nor that the return value of a component function cannot depend on the state of the component. More realistic models can, therefore, be used. This widens the number of applications, as behaviour of hardware can be modelled that previously could not be expressed in the modelling.
However, there are still certain properties hardware models must satisfy for ECA to be applicable. Foremost, the models have to be discrete. Energy consumption that gradually increases or decreases over time can therefore not be modelled directly. However, discrete approximations may be used. Secondly, every state change has to be the consequence of an explicit application of a component function. So, implicit state changes by hardware components cannot be expressed.
The quality of the derived energy expressions is directly related to the quality of the used hardware models. Depending on the goal, it is possible to use multiple models for one and the same hardware component. For instance, if the hardware model is constructed as a worst-case model, this approach will produce worst-case information. Similarly one can use average-case models to derive average case information.
It can be difficult to obtain detailed hardware models, sometimes for the simple reason that the hardware is yet to be developed and not ready. We expect that, in cases where multiple software implementations are to be compared, the relative consumption information will be sufficient to support design decisions. This allows for constructing abstract models, with not much detail but including the relevant information which is needed to make a proper comparison. However, this abstraction could impact the validity of the results (as a realistic model could yield different results).
A class of applications where the approach described in this article could be useful is a company that produces many variations of the same device. Variations occur based on local requirements, or on regional differences in the electric grid, or on different requirements set by integrators or consumers, or any combination thereof. The ECA approach allows for quickly designing those variations, and having a clear view on how changes will impact all those variations.
5 Validity
The analysis is sound and complete as can be proven by induction on the syntactic structure of the program in a similar way as in an earlier version of our analysis described in [14], in which the proof is more complex due to the presence of approximations. An analysis method may be in itself sound and complete but the validity of applying the method in practice can not automatically be inferred from that.
There are several validity constraints to the technique as it is discussed in this article. The quality of the results depends directly on the quality of the component models used. There are severe restrictions on component models, e.g. the power draw is assumed to be constant in every state of the component. This is in practice not true for most devices, e.g. the power draw can be a function of time. This has to be modelled in an abstract discretised component model. It is to be seen whether with discrete approximations for real world hardware component models can be created with a level of precision that is suited to make accurate energy estimates. It is hard to construct and validation such component models on the right level of abstraction, as there are several real world practical issues. If basing the component model on specifications from hardware vendors, all kinds of errors in the specification are transferred to the component model. Production errors in the hardware, and eventually the degradation of hardware, can induce erratic energy consumption behaviour that does not conform to the specification/component model.
Validating a component model with a test setup is hard, as energy is hard to measure. Small differences in energy consumption are hard to measure correctly, and outside conditions like temperature can influence the results greatly. Energy differs significantly from other kinds of resources (e.g. memory and time), which are measurable with great precision within a computer by the computer itself. Introducing a standard energy consumption measurement interval might help in making measurements more uniform. Validating if the number of states of a component model is the same as the actual number of states of
an actual hardware component, is a hard problem by itself. With powerful models, the actual validation process with real hardware might just take too much time forcing the user to settle for a feasible but not fully validated model.
There is another potential source of not matching the actual energy consumption of a realistic situation. Compiler errors and optimisations can impact the (energy) behaviour of a source program greatly. The compiler has influence on the timing of high-level language constructs. The timing constants used for these language constructs should match the time it takes to execute those language constructs. Such a match could be guaranteed by creating a resource consumption certified compiler in a similar way as was done in the CompCert certified compiler project [11]. Of course this would require the availability of energy aware semantics both on the source and the target level. An even more complicating matter may be the complex design of modern processors executing the software. Even relatively small embedded microprocessors have features (register bypass e.g.), which impact the execution timing of statements significantly. Proper documentation of such features may be hard to find since e.g. the inner details of the pipeline of modern CPU’s can often not be found in the documentation.
These constraints on modelling hardware components and validity implications should be lifted and further investigated to make the technique discussed applicable to general, real-world problems. However, depending on the context and the precision needed, the current technique can already be applicable now. If the hardware component is relatively simple, a suited component model can be constructed. Another valid area for the techniques discussed is to give feedback to a prospective programmer, such that during construction of software the developer can optimise the energy consumption for various hardware configurations.
6 Related work and future work
A few options are available to a programmer who wants to write energy-efficient code. The programmer can look for programming guidelines and design patterns, which in most cases produce more energy-efficient programs, e.g. [15, 3]. Then, he/she might make use of a compiler that optimises for energy-efficiency, e.g. [20]. If the programmer is lucky, there is an energy analysis available for the specific platform at hand, such as [9] in which the energy consumption of a processor is modelled in SimpleScalar.
However, for most platforms, this is not a viable option. In that case, the programmer might use dynamic analysis with a measurement set-up. This, however, is not a trivial task and requires a complex set-up [8, 4]. Moreover, it only yields information for a specific benchmark [12]. Nevertheless, these approaches are always applicable. A programmer might, however, prefer an approach that yields additional insight in a more predictive manner.
In future work, we aim to fully implement this precise analysis to evaluate its suitability for larger systems and further explore practical applicability. We intend to experiment with additional implementations of various derived approximating analyses to evaluate which techniques/approximations work best in which context.
A current limitation of the analysis is that it allows only one control process (processor). Actual systems often consist of a network of interacting systems. Therefore, incorporating interacting systems would increase the applicability of the approach. Such systems can be seen as hybrid automata. Theoretical and practical results in modelling hybrid automata [1, 2] might be a useful starting point for further research.
To make the presentation more concise, it might be useful to use as a subject language a first-order strict-evaluation functional programming language. One can expect that this will alleviate the need to have a separate basic dependent type system which transforms all variables into expressions over input variables. However, expressions would still need to be expressed in terms over input variables. To support the analysis of data types, a size analysis of data types might be useful to enable iteration over data structures, e.g. using techniques similar to [16, 17].
On the language level, the type system is precise. However, it does not take into account optimisations and transformations below the language level. This can be achieved by analysing the software on a lower level, for example, the intermediate representation of a modern compiler, or even the binary level. For increased accuracy, this may certainly be worthwhile. Another motivation to use such an intermediate representation as the language that is analysed is the ability to support (combinations of) many higher level languages. In this way, programs written in and consisting of multiple languages can be analysed. It can also account for optimisations (such as common subexpression elimination, inlining, statically evaluating expressions), which in general reduce the execution time of the program and therefore impact the time-dependent energy usage (calls with side effects like component function calls are not optimised).
Compared with [10], the ECA energy consumption in this paper is precise instead of over-approximated. Future research can show if the expressions derived by this type system can be transformed in such a way that also upper bound expressions are derived. To support this, recursion and loops should be transformed in a Cost Relation System (CRS), a special case of recurrence relations. Solving this CRS, one can acquire a direct formula expressing the energy consumption of the recursive function or loop.
Another approach to providing more precise estimates is described in [19]. In this approach suitable program inputs are identified through which, by measurement, more precise results can be achieved in combination with an auxiliary energy model taking into account the energy consumption of instructions in relation to each other.
Finally, a systematic approach to constructing component models should be looked into. One can create a model from the specifications given by the vendor. Another way is using model learning [18] techniques, which create a finite state model from black box testing and measuring. All the states should have a time dependent energy consumption assigned to them, and all the transitions should be assigned incidental energy consumption. Such a model can then be used as a component model.
7 Conclusion
Energy analysis consists of a hybrid approach in analysing both hardware and software together, to derive energy consumptions when executing the software on the hardware. This can be used during the development phase, or for optimisation. One can e.g. choose the best software implementation for given hardware, or the other way around: choose the best hardware for a given algorithm. The key to analysing larger systems is compositionality. Many programs encountered in the real world feature language constructs such as global variables and recursion. To analyse these programs, support in the ECA language and program transformations is required. This article extends ECA with language support for multiple types, support for recursive functions and global variables. Important properties are retained: it remains a composable, precise, and parametric energy analysis.
Furthermore, to gain additional insights in the feasibility of the approach, the article explores the (im)possibilities of using the approach in practice by discussing validity and applications.
All in all, this constitutes a practical step towards the application of the proposed energy analysis on real world problems.
References
|
{"Source-Url": "http://repository.ubn.ru.nl/bitstream/handle/2066/173231/173231.pdf?sequence=1", "len_cl100k_base": 8346, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 46742, "total-output-tokens": 11359, "length": "2e13", "weborganizer": {"__label__adult": 0.0004863739013671875, "__label__art_design": 0.0004260540008544922, "__label__crime_law": 0.000392913818359375, "__label__education_jobs": 0.0005588531494140625, "__label__entertainment": 8.434057235717773e-05, "__label__fashion_beauty": 0.0002110004425048828, "__label__finance_business": 0.00034689903259277344, "__label__food_dining": 0.00044655799865722656, "__label__games": 0.0006799697875976562, "__label__hardware": 0.0029354095458984375, "__label__health": 0.000850677490234375, "__label__history": 0.0003151893615722656, "__label__home_hobbies": 0.00016427040100097656, "__label__industrial": 0.0006570816040039062, "__label__literature": 0.0002963542938232422, "__label__politics": 0.0003349781036376953, "__label__religion": 0.0005235671997070312, "__label__science_tech": 0.0709228515625, "__label__social_life": 8.547306060791016e-05, "__label__software": 0.005229949951171875, "__label__software_dev": 0.91259765625, "__label__sports_fitness": 0.0003657341003417969, "__label__transportation": 0.0009522438049316406, "__label__travel": 0.0002605915069580078}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 44174, 0.01577]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 44174, 0.58573]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 44174, 0.85762]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 2801, false], [2801, 6618, null], [6618, 10115, null], [10115, 12157, null], [12157, 14918, null], [14918, 17159, null], [17159, 20046, null], [20046, 23049, null], [23049, 26699, null], [26699, 30353, null], [30353, 34057, null], [34057, 37779, null], [37779, 41451, null], [41451, 44174, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 2801, true], [2801, 6618, null], [6618, 10115, null], [10115, 12157, null], [12157, 14918, null], [14918, 17159, null], [17159, 20046, null], [20046, 23049, null], [23049, 26699, null], [26699, 30353, null], [30353, 34057, null], [34057, 37779, null], [37779, 41451, null], [41451, 44174, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 44174, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 44174, null]], "pdf_page_numbers": [[0, 0, 1], [0, 2801, 2], [2801, 6618, 3], [6618, 10115, 4], [10115, 12157, 5], [12157, 14918, 6], [14918, 17159, 7], [17159, 20046, 8], [20046, 23049, 9], [23049, 26699, 10], [26699, 30353, 11], [30353, 34057, 12], [34057, 37779, 13], [37779, 41451, 14], [41451, 44174, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 44174, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
9077e44f83f518c0bbf12c28e1f8f9c0c5fc4894
|
Team AtlanMod
Modeling Technologies for Software Production, Operation, and Evolution
Rennes - Bretagne-Atlantique
Theme: Distributed Systems and Services
# Table of contents
1. **Team** .......................................................................................................................... 1
2. **Overall Objectives** ....................................................................................................... 1
2.1. Presentation .................................................................................................................. 1
2.2. Previous Achievements ............................................................................................... 2
2.3. Highlights ................................................................................................................... 2
3. **Scientific Foundations** .................................................................................................. 2
4. **Application Domains** ................................................................................................... 4
4.1. Introduction .................................................................................................................. 4
4.2. Reverse Engineering .................................................................................................... 4
4.3. Model-driven System Interoperability ......................................................................... 4
4.4. Model-Driven Cartography ......................................................................................... 5
5. **Software** ....................................................................................................................... 5
5.1. Introduction .................................................................................................................. 5
5.2. The ATL Model Transformation Language ............................................................... 5
5.3. AMW (AtlanMod Model Weaver) ............................................................................... 6
5.4. TCS (Textual Concrete Syntax) .................................................................................. 6
5.5. MoDisco (Model Discovery) ....................................................................................... 6
5.6. AM3 (AtlanMod MegaModel Management) ............................................................... 7
5.7. Portolan (Model-Driven Cartography) ....................................................................... 7
5.8. The AmmA ToolBox ................................................................................................... 7
6. **New Results** ................................................................................................................ 8
6.1. Model Transformation ............................................................................................... 8
6.2. Model Weaving and Model Matching ....................................................................... 8
6.3. Global Model Management ....................................................................................... 8
6.4. Model-driven interoperability .................................................................................... 8
6.5. Reverse Engineering ................................................................................................ 9
6.6. Modeling and the web ............................................................................................... 9
6.7. Social aspects of MDE .............................................................................................. 9
6.8. Quality in MDE .......................................................................................................... 9
7. **Contracts and Grants with Industry** .......................................................................... 9
7.1. IP Modelplex (2006-2010) ...................................................................................... 9
7.2. EDONA, Paris Competitiveness Cluster "System@tic" (2007-2010) ......................... 10
7.3. IdM++, ANR (2008-2011) ....................................................................................... 10
7.4. Lambda, Paris Competitiveness Cluster "System@tic" (2008-2010) ....................... 10
7.5. CONICYT Chile-INRIA HOT MaTE Project (2008-2010) .................................... 10
7.6. CESAR, ARTEMIS JOINT UNDERTAKING (2009-2012) ...................................... 11
7.7. GALAXY, ANR (2010-2013) .................................................................................... 11
7.9. BNP Paribas collaboration, Action 1 "Continuity between the process modeling and software design" (2010-2011) ................................................................. 11
7.10. MDE Expertise - Exchanging knowledge, techniques and experiences around Model Driven Engineering education (2010-2012) .............................................. 12
8. **Other Grants and Activities** ..................................................................................... 12
9. **Dissemination** ............................................................................................................ 12
9.1. Animation of the scientific community .................................................................... 12
9.2. Editorial Boards and Program Committees ............................................................ 13
9.3. PhD and Habilitation Juries ...................................................................................... 13
9.4. Involvement in the Eclipse Community .................................................................. 13
9.5. Invitations and Participations to Seminars ............................................................... 14
9.6. Teaching
10. Bibliography
1. Team
Faculty Members
Jean Bézivin [Team Leader, until June 2010, Professor, HdR]
Jordi Cabot [Team Leader, since July 2010, Associate Professor, École des mines de Nantes]
Frédéric Jouault [Associate Professor, École des mines de Nantes]
Massimo Tisi [Associate Professor, since September 2010, École des mines de Nantes, INRIA PostDoc before]
Technical Staff
Hugo Brunelière [Engineer, École des mines de Nantes since March 2010, INRIA Modelplex until February 2010]
Guillaume Doux [Engineer, IDM++, Université de Nantes]
Vincent Mahé [Engineer, IDM++, since May 2010, Université de Nantes]
Jean-Sébastien Sottet [Engineer, Lambda, until April 2010]
Cauê Avila Clasen [Engineer, CESAR]
Salvador Martinez Perez [Engineer, since April 2010, OPEES]
Wolfgang Kling [Engineer since February 2010, ARMINES, GALAXY, Internship before February 2010]
Carlos Gonzalez [Engineer, CESAR, since November 2010]
PhD Student
Kelly Garcés [until September 2010, Collaboration with Ascola, ARMINES]
Post-Doctoral Fellow
Robert Tairas [since November 2010]
Visiting Scientists
Javier Canovas [from April to July 2010]
Soichiro Hidaka [May 2010]
Administrative Assistant
Hanane Maaroufi [Part-time]
Other
Francisco Murcia [Internship since September 2010]
2. Overall Objectives
2.1. Presentation
Model Driven Engineering (MDE) is a software engineering paradigm that advocates for the rigorous use of (software) models as the main artifacts in all software engineering activities. This comes from an industrial need to have a regular and homogeneous organization where different facets of a software system may be easily separated or combined. The basic assumption of MDE is that the classical programming code is often not at the right representation level for managing all these facets even if, at some point of the process, executable code will usually be generated from some abstract representation level. It has been shown that adoption of MDE increases productivity and quality of the software systems under development.
In this sense, AtlanMod is developing pioneering solutions to solve core research challenges in MDE and to ensure its successful application on relevant industrial problems.
2.2. Previous Achievements
AtlanMod has significantly contributed to the evolution of MDE and to the progressive emergence of a scientific community in this field. The team has progressively developed a complete modeling framework [45] [9] that is now well accepted in the scientific community. This framework provides core MDE components (described in several research papers e.g., [4] [5] [47] [49] [3]) for (meta)model definition and manipulation. The iterative definition of this conceptual framework has been validated by the construction of several experimental concrete toolboxes. The first generation (sNets, based on semantic networks) was built in Smalltalk in the 90’s [43]. The second one is the current AmmA (for AtlanMod Model Management Architecture) toolbox available in Eclipse. This is at the same time a research platform and an industrial toolbox.
The AmmA platform is based on the conclusion that MDE is in fact a branch of language engineering (this was one of the outcomes of a Dagstuhl school that we co-organized in 2004 [44]). More precisely a metamodel is now considered as the definition of the abstract syntax of a language, usually a Domain Specific Language (DSL). This idea is central in the AmmA toolbox that may be viewed as a DSL building framework composed itself of a number of primitive DSLs. The four most known AmmA DSLs are KM3 [4] (a DSL for metamodel specification), TCS [5] (a DSL for textual syntaxes), ATL [7] [6] (a DSL for model transformation), and AMW [49] (a DSL for representing model correspondences). All these mutually dependent tools are available under Eclipse.org (projects or components: M2M, ATL, TMF, MoDisco, AM3, AMW). They are currently in use in research, teaching, and industry and they have a broad user community.
2.3. Highlights
The following item list describes the main highlights of the year for the AtlanMod team:
- Opening of new research lines (Modeling as a Service, Quality of models, process and people aspects of MDE, and Model-Driven Cartography focused on the Business-IT alignment industrial challenge).
- Changes on the team structure and leadership. Jordi Cabot replaces Jean Bézivin as team leader in July. Two more permanent members join the team (a research engineer and an associate professor) plus an INRIA postdoc and several research engineers linked to the team’s projects.
- First edition of the MDE diploma: a Post-Master’s Specialization in Model Driven Engineering officially recognized by the French Ministry of Industry
3. Scientific Foundations
3.1. MDE Foundations
MDE can be seen as a generalization and abstraction of object technology allowing to map more abstract organizations on class-based implementations. In MDE, (software) models are considered as the unifying concept [45].
Traditionally, models were often used as initial design sketches mainly aimed for communicating ideas among developers. On the contrary, MDE promotes models as the primary artifacts that drive all software engineering activities. Therefore, techniques for model definition and manipulation are the basis of any MDE framework.
The MDE community distinguishes three levels of models: (terminal) model, metamodel, and metametamodel. A terminal model is a (partial) representation of a system/domain that captures some of its characteristics (different models can provide different knowledge views on the domain and be combined later on to provide a global view). In MDE we are interested in terminal models expressed in precise modeling languages. The abstract syntax of a language, when expressed itself as a model, is called a metamodel. A complete language definition is given by an abstract syntax (a metamodel), one or more concrete syntaxes (the graphical or textual syntaxes that designers use to express models in that language) plus one or more definition of its semantics. The
relation between a model expressed in a language and the metamodel of that language is called conformsTo. Metamodels are in turn expressed in a modeling language called metamodeling language. Similar to the model/metamodel relationship, the abstract syntax of a metamodeling language is called a metametamodel and metamodels defined using a given metamodeling language must conform to its metametamodel. Terminal models, metamodels, and metametamodel form a three-level architecture with levels respectively named M1, M2, and M3. A formal definition of these concepts is provided in [4] and [46]. MDE promotes unification by models, like object technology proposed in the eighties unification by objects [42]. These MDE principles may be implemented in several standards. For example, OMG proposes a standard metametamodel called Meta Object Facility (MOF) while the most popular example of metamodel in the context of OMG standards is the UML metamodel.
In our view the main way to automate MDE is by providing model manipulation facilities in the form of model transformation operations that taking one or more models as input generate one or more models as output (where input and output models are not necessarily conforming to the same metamodel). More specifically, a model transformation \( Mt \) defines the production of a model \( Mb \) from a model \( Ma \). When the source and target metamodels are identical (\( MMa = MMb \)), we say that the transformation is endogenous. When this is not the case (\( MMa \neq MMb \)) we say the transformation is exogenous. An example of an endogenous transformation is a UML refactoring that transforms public class attributes into private attributes while adding accessor methods for each transformed attribute. One of the first papers to discuss metamodel-based transformation was [10].
Many other operations may be considered as transformations as well. For example verifications or measurements on a model can be expressed as transformations [47]. One can see then why large libraries of reusable modeling artifacts (mainly metamodels and transformations) will be needed. Another important idea is that a model transformation is itself a model [1]. This means that the transformation program \( Mt \) can be expressed as a model and as such conforms to a metamodel \( MMt \). This allows a homogeneous treatment of all kinds of terminal models, including transformations. \( Mt \) can be manipulated using the same existing MDE techniques already developed for other kinds of models. For instance, it is possible to apply a model transformation \( Mt' \) to manipulate \( Mt \) models. In that case, we say that \( Mt' \) is a higher order transformation (HOT), i.e. a transformation taking other transformations (expressed as transformation models) as input or/and producing other transformations as output.
As MDE developed, it became apparent that this was a branch of language engineering [44]. In particular, MDE offers an improved way to develop DSLs (Domain-Specific Languages). DSLs are programming or modeling languages that are tailored to solve specific kinds of problems in contrast with General Purpose Languages (GPLs) that aim to handle any kind of problem. Java is an example of a programming GPL and UML an example of a modeling GPL. DSLs are already widely used for certain kinds of programming; probably the best-known example is SQL, a language specifically designed for the manipulation of relational data in databases. The main benefit of DSLs is that they allow everybody to write programs/models using the concepts that actually make sense to their domain or to the problem they are trying to solve (for instance Matlab has matrices and lets the user express operations on them, Excel has cells, relations between cells, and formulas and allows the expression of simple computations in a visual declarative style, etc.). As well as making domain code programmers more productive, DSLs also tend to offer greater optimization opportunities. Programs written with these DSLs may be independent of the specific hardware they will eventually run on. Similar benefits are obtained when using modeling DSLs. In MDE, new DSLs can be easily specified by using the metamodel concept to define their abstract syntax (with KM3 for example [4]). Models specified with those DSLs can then be manipulated by means of model transformations (with ATL for example [3]).
When following the previously described principles, one may take advantage of the uniformity of the MDE organization. Considering similarly models of the static architecture and models of the dynamic behavior of a system allows at the same time economy of concepts and economy of implementation. Considering models of products (e.g., software artifacts like UML) and models of processes (e.g., software processes like SPEM) may lead to a dual process/product organization. Considering transformation models, weaving models, and
traceability models as special cases of correspondence models may also lead to simplicity and efficiency of implementations. These are some of the use cases that are being explored in the team.
4. Application Domains
4.1. Introduction
It is difficult to define a precise applicability domain for MDE because, by definition, the scope is the largest. Generic tools developed by the AtlanMod team may apply as well to information systems and to embedded systems. MDE is not even restricted to software engineering, but also applies to data engineering [48] and to system engineering [41]. There are a lot of problems in these application domains that may be addressed by model transformation techniques.
The AmMa tools are being currently used in projects related to embedded systems, like Lambda, Edona, OPEES, etc. The domains addressed by these projects are mainly automotive, aeronautics, and transportation. Since our tools are offered as open-source, we may notice that several groups have used them in quite different areas like: Critical software, Real time, Formal methods, Web engineering, Ontology engineering, Web semantics, etc.
In the next sections we focus on three specially interesting domains on which we have applied our MDE techniques.
4.2. Reverse Engineering
One important and original domain that is being investigated by the AtlanMod team is the reverse engineering of legacy code. Here again this spans through a spectrum of application domains, the legacy being coded in such languages as ADA, Java, COBOL, C, C++ or even FORTRAN. We have shown how the reverse engineering practices may be advantageously revisited with the help of MDE and open-source tooling. The team has set up the MoDisco project [21][38][40] under Eclipse.org to investigate this and to federate the international research efforts in this domain. The main idea is that a metamodel may precisely express what we want to extract from a low-level legacy code. For example we can define a rational process to extract business rules from COBOL legacy programs. The rise in abstraction allowed by MDE may bring new hopes that reverse engineering can now move beyond ad-hoc practices. The MoDisco Eclipse project is being referenced by the OMG ADM (Architecture-Driven Modernization) normalization initiative.
4.3. Model-driven System Interoperability
Historically the first application area of MDE was code generation from abstract models. A typical example is generation of Java code from UML models or of platform-dependent from platform-independent software artifacts. As discussed above, the subject of reverse engineering was the second major historical application field for MDE, with the discovery of structured precise models from heterogeneous and often unstructured systems.
In the recent period however, MDE has been used as a solution for system interoperability. In this new context, transformations are performed while systems are in execution. For example, a set of transformations may keep two different tools synchronized, by exchange of structured data or even by interpretation of complex events. This approach revisits tool interoperability by explicitly representing the associated metamodels (if needed, deduced from the tool API or storage format), defining mappings between them and using those mappings to automatically generate transformations between them. Our proposal to the CESAR (see 7.6) reference technology platform uses these ideas for defining the notion of virtual tool. The establishment of correspondences between technical spaces (e.g., Eclipse Modeling and Microsoft DSL tools or OSLO [20]) follows a similar schema.
4.4. Model-Driven Cartography
A new important topic AtlanMod started investigating on this year is how to use Model-Driven Engineering concepts and techniques to solve cartography problems. In both the business and IT worlds, companies are now using many different software tools covering multiple inter-connected domains and related features. Because of the high complexity and heterogeneity of these organizations, it is more and more important to be able to have a real clear vision of the actual situation in terms of considered tools and various relationships (either already existing or potential) between them. The representation and use of this knowledge, called Cartography, is fundamental in the process of evaluating/measuring deployed architectures, but also for specifying their future evolutions or even elaborating on brand new solutions. Having a clear and comprehensible cartography of a given situation is also required in many other contexts such as reverse engineering or tool interoperability in general, which are domains we are also actively working on. The main challenge raised by this general problem is about the effective management and understanding of a possible huge amount of metadata on all the considered software artifacts. These metadata can be of various and varied kinds, depending on the targeted goal(s) and the corresponding chosen viewpoint(s). Related to this problem, AtlanMod has notably worked on a Model-Driven Cartography prototype named Portolan, and also experimented on cartography visualizations [31].
5. Software
5.1. Introduction
The scheme followed until now by the team to develop high quality software that may attract a large user base (with the benefits this provides: visibility, feedback,...) consist in 1 - identifying relevant research problems for which we develop an initial proof of concept, 2 - releasing the proof of concept as open source contribution to the community and integrate its feedback, 3 - promote it towards a normalization status (de jure or de facto) and 4 - if the proof of concept becomes successful and starts to attract a large user base we partner with a technology provider to create a commercial-quality level (but still open source) version of the tool. This last step is necessary because, technical quality is just one of the many factors that companies analyze when deciding whether to take the risk of adopting a new tool. Examples of other factors that influence this decision are: user support, good documentation or usability aspects. Research groups cannot invest resources on these non-core research aspects. The technology provider can help the research group developing these aspects in exchange of visibility and the possibility of selling services around the tool [37].
The most ancient tool developed in the team is the ATL model transformation language. In this case the transfer state has been recently reached and we are presently working on the last phase of the process. MoDisco is approaching the same status and other tools of the team are following the same path.
We name AmmA (see 5.8) the set of software tools developed by the AtlanMod team. Until now we have used the Eclipse foundation as underlying platform of all our tools but we may consider alternative platforms in the future (see as an example our new Modeling as a Service initiative [36].
For each tool we just indicate the main contact since most team members contribute at one point or the other in the development of several tools.
5.2. The ATL Model Transformation Language
Participant: Frédéric Jouault [contact].
URL: http://www.eclipse.org/m2m/atl/
With an eye on the normative work of the OMG (MOF, OCL, QVT, etc.), a new conceptual framework has been developed based on a second generation model transformation language called ATL. Although ATL influenced the OMG standard, the approach is more general as discussed in [8].
Due to the previous iterations, the architecture of the ATL model transformation language is highly modular and based on a generic model engineering virtual machine and on a bootstrapped compiler itself running on this virtual machine. The central idea in Frédéric Jouault’s Ph.D. thesis [50] of using MDE tools to build other MDE tools (or using DSLs to build other DSLs) has been quite productive. Seeking conceptual simplicity also led to implementation efficiency since ATL currently provides one of the most efficient solutions for model transformation.
In 2004 IBM gave an Eclipse innovation award to the ATL project. In 2007 Eclipse recognized ATL as one central solution for model transformation and promoted it to the M2M project (see Eclipse.org/m2m). There are more than 200 industrial and academic sites using ATL today, and several Ph.D. thesis in the world are based on this work.
5.3. AMW (AtlanMod Model Weaver)
**Participant:** Frédéric Jouault [contact].
**URL:** http://www.eclipse.org/gmt/amw/
AMW is a component-based platform for model weaving that can be used to establish and manage abstract correspondences between models. The platform is generic and based on the Eclipse contribution mechanism: components are defined in separate plugins. The plugins are further interconnected to create the model weaving workbench. Components for user interface, matching algorithms and serialization of models may be plugged as necessary. We extended the Eclipse EMF architecture for model manipulation to coordinate the weaving actions. We use the EMF reflective API to obtain a standard weaving editor which adapts its interface according to metamodels modifications. The ATL transformation engine is plugged as the standard transformation platform. AMW is released as open-source software under the Eclipse Public License and available as an Eclipse plugin. AMW is being used by more than 40 user sites, including research labs and major companies (NASA, BAE, Versata, Obeo, etc.).
5.4. TCS (Textual Concrete Syntax)
**Participant:** Frédéric Jouault [contact].
**URL:** http://www.eclipse.org/gmt/tcs/
It is often necessary to define concrete syntaxes for metamodels, which only define abstract syntaxes. TCS is a language in which context-free concrete syntaxes can be defined by specifying how each concept of a metamodel is represented textually. Once such a definition has been defined in the form of a TCS model, it is possible to: 1) parse programs into models, 2) pretty-print models into programs, and 3) edit programs with a rich text editor supporting syntax highlighting, code completion, outline, text hovers, hyperlinks, etc. Many textual languages have already been specified with TCS: 1) within AmmA (see 5.8) with ATL, KM3, TCS itself, etc. 2) in other contexts with SQL, LOTOS, EBNF, etc. TCS currently generates ANTLR v3 grammars that are LL(*).
5.5. MoDisco (Model Discovery)
**Participant:** Hugo Brunelière [contact].
**URL:** http://www.eclipse.org/modisco/
MoDisco (for Model Discovery) is an Eclipse component that gathers contribution from several academic and industrial partners in the field of model-driven reverse engineering. The goal of the project is to federate common efforts in the transformation of legacy systems into models. The extraction process is metamodel driven, i.e. (1) all extracted models conform to a given metamodel and (2) the discoverer itself is generated from the metamodel, usually in a semi-automatic way. In some cases the legacy system is structured which greatly facilitates model extraction. For example if the legacy is composed of code (e.g., ADA, COBOL, Java, Visual Basic, etc.), the grammar and the target metamodel may be jointly used in order to generate the discoverer. Once the model has been extracted from the legacy, it can be measured, understood or manipulated by way of model transformations in languages like ATL. MoDisco has close relations with the OMG Architecture Driven Modernization (ADM) Task Force, for which the project provides reference implementations of its main standards like the Knowledge Discovery Metamodel (KDM), Software Measurement Metamodel (SMM) and others to come. Moreover, the Eclipse EMFT EMF Facet project has been very recently initiated as a MoDisco spin-off in order to externalize some features which are not actually specific to reverse engineering problems, and thus may be reused in many different contexts. The goal of EMF Facet (http://www.eclipse.org/modeling/emft/facet/) is to work on providing a generic and dynamic (i.e. the original model is actually not altered) model extension mechanism, based on the runtime execution of queries (possibly in ATL, OCL, Java, etc) on the extended model(s).
5.6. AM3 (AtlanMod MegaModel Management)
Participant: Hugo Brunelière [contact].
URL: http://wiki.eclipse.org/index.php/AM3
The AtlanMod Megamodel Management tool offers several functionalities for modeling in the large [2], i.e. for handling several related models (either terminal models, metamodels or transformation models) that can be reused. The main component in AM3 is a generic megamodel manager that allows the user to browse and manipulate a set of related models. This manager knows the semantic relations between all these models. These relations are often associated to a given weaving model allowing not only navigating the traces between models, but also the traces between model elements. Since the links are stored externally as weaving models, the participating models do not get polluted and may be used as they are. Furthermore it is possible to handle multiple traceability chains going through similar models. The generic tool for megamodel management has been used by different partners for several use cases like operationalization of chains of transformations.
5.7. Portolan (Model-Driven Cartography)
Participant: Hugo Brunelière [contact].
URL: http://www.emn.fr/z-info/atlanmod/index.php/Model-Driven_Cartography
Model-Driven Engineering (MDE), with its simple core principles and set of base generic techniques (metamodeling, model transformation, model weaving, etc), provides the relevant support for designing and implementing Cartography solutions. The proposed Portolan prototype is a concrete illustration of both a model-based and model-driven Cartography platform. Thus, the objective of Portolan is to facilitate the identification of interoperability solutions between tools by: 1) discovering (at least semi-automatically) maps of given situations in terms of deployed tools and relationships between them; 2) easily navigating and editing these maps; 3) augmenting or specializing them with both manually-entered and computed information; 4) visualizing them, using different customizable ways, in order to facilitate their understanding. This recently developed generic tooling for cartography has already been used during the first action of our collaboration with BNP Paribas, as well as in the context of the IDM++ project.
5.8. The AmmA ToolBox
ATL, AMW, TCS, MoDisco, and AM3 are among the most important Eclipse.org components produced by the AtlanMod team. However there are also other components and a lot of functionalities, examples, and use cases made available and necessary to express solutions to many problems. The whole set of contributions composes the AmmA platform.
6. New Results
6.1. Model Transformation
Model transformation and in particular our ATL model transformation language continues playing a key role in our MDE strategy. During 2010 the new results on this area have been:
- The development of an execution algorithm for live incrementality in ATL, with the implementation of a prototype, to immediately propagate to the target model any change performed to the source model (avoiding the complete recomputation of the target model from scratch) [28].
- An improvement in the productivity of HOT development in ATL, obtained by analyzing the set of public transformations from our user base, detecting recurrent and time-expensive patterns, and proposing an alternative for them in the form of a HOT library and some extensions to the ATL language [34].
- Formalization of our industrialization strategy for ATL [37]. The resources freed in this process are invested in pursuing research on model transformation.
Additionally, we applied model transformation to two new areas:
- Performance engineering [35], with complex transformation chains.
6.2. Model Weaving and Model Matching
During 2010, we improved our results in model matching. The AtlanMod Matching Language [11] (AML) was notably compared to other model migration tools [33]. Additionally, we leveraged Global Model Management techniques (see 6.3) in order to automatically evaluate the quality of AML output [27].
6.3. Global Model Management
In order to represent metadata about model transformation more accurately, we have worked on a functional typing system for megamodeling [16]. The basic idea is to consider transformations as functions, and to give them functional types. With this approach, Higher-Order Transformations can notably be represented correctly: their output is not simply a transformation model, but a model transformation as well [1].
We have also used model weaving and Global Model Management (GMM) jointly in order to better support inter-DSL coordination [29].
Finally, we have applied GMM to a problem outside of MDE: the management of Eclipse plugins [39]. This work notably demonstrates that megamodeling may be used to represent non-MDE systems, and to operate on them.
6.4. Model-driven interoperability
In 2010, we have made some progress in the understanding of how MDE may be used to represent tools in order to enable interoperability between them [22] and applied this knowledge to the specific scenario of bridging the Eclipse and Microsoft modeling tools [20]. This scenario is specially challenging since the interoperability must be achieved both at the model and metamodel levels.
We have also experimented with reuse and interoperability of two graphical tools [14]: Graphviz\(^1\), and GMF\(^2\) (Graphical Modeling Framework).
\(^1\)http://www.graphviz.org/
\(^2\)http://www.eclipse.org/modeling/gmp/
6.5. Reverse Engineering
With the end of the ModelPlex project we have consolidated and promoted the results achieved in our MoDisco Model-driven reverse engineering project [21][38][40]
6.6. Modeling and the web
During this year, we have launched the MaaS (Modeling as a Service) initiative [36] where we explore the possibility of providing modeling and model-driven engineering services from the cloud. Some topics that would fit in this area would be: collaborative and distributed modeling tools, model transformation engines in the cloud, modeling mash-ups (combining model-driven engineering services from different providers), global model management and scalable model-based services in the cloud to deal with very large models and model transformations.
Apart from the previous topic (focused on using the web and the cloud for modeling), we have also worked on applying MDE to solve web engineering problems:
- Development of safe web interface interactions [18].
- Management and synchronization of tests written at the different abstraction levels (computation-independent, platform-independent and platform-specific) of a model-driven Web application [32].
- Definition of a search engine on heterogeneous sources (following the paradigm of Search Computing), using a model-driven perspective [19].
6.7. Social aspects of MDE
All the technical work on MDE must be complemented with a better understanding of how users apply MDE in practice. Otherwise we may end up designing techniques that do not fit their needs. This is even more important for companies willing to adopt MDE. We must carefully analyze the social and organizational changes that companies must undergo in order to successfully introduce MDE techniques as an important part of their development processes. This part has been largely ignored and causes the failure of many MDE projects. In particular, during this year we have been working on the following aspects:
- Integrating non-functional aspects in model-driven development processes to ensure that the quality properties of the system at run-time satisfy the user expectations [17]
- Proposing a new approach for modeling software processes that helps companies to visualize and understand the (social) requirements imposed by a software process [25] and to adapt the process to their own reality [26].
- Increasing the expressivity of modeling languages to facilitate the task of designers. In particular we have targeted the design of datawarehouses [24].
6.8. Quality in MDE
In MDE, quality of modeling artifacts (models, metamodels,...) have a direct effect on the quality of the running system (specially in forward engineering, where the system is automatically generated from the models and thus defect in the models generate defects in the code). During this year we have published some results on the validation and verification of model transformations [13][23] and on the generation of executable operations [12]
7. Contracts and Grants with Industry
7.1. IP Modelplex (2006-2010)
Participants: Hugo Brunelière, Jean Bézivin, Frédéric Jouault.
The MODELPLEX project (http://www.modelplex.org), with Thales, IBM, Sodifrance, SAP, etc, aims at defining a coherent infrastructure for the development of complex systems, where complexity corresponds to several factors like size, heterogeneity, dynamic evolution, distribution and subsystem autonomy. Examples of highly heterogeneous systems are legacy systems that have been built and adapted on long period of time, using different technologies. Model driven reverse engineering, global model management (megamodeling) or model transformation for interoperability are also important problems which we addressed more particularly in this project. The project successfully ended with the official final review in May 2010 at the European Commission.
7.2. EDONA, Paris Competitivity Cluster "System@tic" (2007-2010)
Participants: Frédéric Jouault, Jean Bézivin.
The EDONA project (http://www.edona.fr), which stands (in French) for Environments of Development Open to the Standards of the Car, is a project of the pole of competitiveness System@tic Paris-Area. It has as an objective the construction of an open platform facilitating the realization of chains of development trade modular, interoperable, and adaptable to the various needs of the actors and trades of the car industry. The project is directed by Renault and the form chosen for EDONA is the creation of a technological platform of reference then its specialization in applicative products of the sector. In this project, the AtlanMod team collaborates with Obeo on the industrialization of ATL.
7.3. IdM++, ANR (2008-2011)
Participants: Jean Bézivin, Hugo Brunelière, Jordi Cabot, Guillaume Doux, Frédéric Jouault, Vincent Mahé.
IdM++ (http://www.emn.fr/x-info/idmpp/index.php/Accueil) is a project involving ILOG, CEA, Mia-Software, Prima Solution and AtlanMod. The main goal is to investigate advanced issues in model engineering. The IDM++ consortium proposes the combination of Global Model Management and Model Configuration techniques. This approach is promoted according to the partners background in Model Driven Engineering, Constraint based programming and optimization techniques. The team is particularly in charge of WP 2, on global model management. In this context, the interoperability between various DSLs will be studied. Total allocated budget amounts to 810 kEURof which the team is sharing 250kEURon three years.
7.4. Lambda, Paris Competitivity Cluster "System@tic" (2008-2010)
Participants: Jean-Sébastien Sottet, Jean Bézivin, Jordi Cabot.
In the context of embedded software deployed on "off-the-shelf" execution platforms, the LAMBDA project (http://www.usine-logicielle.org/lambda/index_EN.html, System@tic Paris-Region) has two major goals: 1) to demonstrate the technical feasibility and the interest of model libraries by formalizing the key properties of execution platforms, and 2) to reconcile appropriated standards (SysML, MARTE, AADL, IP-XACT) with de facto standards (already implemented by widespread analysis and simulation tools). Lambda is a three-year project gathering 14 partners with an overall budget of 5.30 MEUR. LAMBDA means Libraries for Applying Model Based Development Approaches. The project started on June 1, 2008. AtlanMod is involved in Task T2.2: analysis of requirement for scalability and as part of this task has been working on developing performance tests to prove the scalability of MDE technologies.
7.5. CONICYT Chili-INRIA HOT MaTE Project (2008-2010)
Participants: Jean Bézivin, Frédéric Jouault, Hugo Brunelière.
HOT MaTE stands for Higher-Order Transformation Model and Transformation Engineering. This CONICYT-INRIA project is a collaboration with the MaTE research group from the University of Chile. The objective of this project is to advance the state of the art in model transformation, and global model management. In particular, we study how Higher-Order Transformations can be represented in a megamodel, as well as megamodel evolution. Prototypes are developed on the AnmA platform, and more especially using ATL for transformations, and AM3 for megamodeling.
7.6. CESAR, ARTEMIS JOINT UNDERTAKING (2009-2012)
Participants: Jean Bézivin, Frédéric Jouault, Cauê Avila Clasen, Carlos Gonzalez.
CESAR (http://cesarproject.eu) stands for Cost-Efficient methods and processes for SAfety Relevant embedded systems, and is a European funded project from ARTEMIS JOINT UNDERTAKING.
The three transportation domains, automotive, aerospace, and rail, as well as the automation domain share the need to develop ultra-reliable embedded systems to meet social demands for increased mobility and safety in a highly competitive global market. To maintain the European leading edge position in the transportation as well as automation market, CESAR aims to boost cost efficiency of embedded systems development and safety and certification processes by an order of magnitude.
CESAR pursues a multi-domain approach integrating large enterprises, suppliers, SME’s and vendors of cross sectoral domains and cooperating with leading research organizations and innovative SME’s. In particular, we work on the Reference Technology Platform, which aims at tool integration. We propose to achieve tool integration by means of metamodeling and model transformations [20].
7.7. GALAXY, ANR (2010-2013)
Participants: Jean Bézivin, Frédéric Jouault, Wolfgang Kling.
GALAXY (http://galaxy.lip6.fr) proposes to deal with the model driven collaborative development of complex systems. It is a French national project funded by ANR (ARPEGE Program), which is carried out by an industrial consortium whose partners are Industry (Airbus), Research and University (Armines -AtlanMod-, IRIT, LIP6) and Vendors and service providers (AKKA, Softeam). Galaxy aims at defining an open and flexible architecture particularly designed to be scalable. One of the key points is related to the fragmentation and distributiveness of huge models, their synchronization and relationship with communication means classically used by development teams. The work is being driven by use cases provided by a company (Airbus), which describe scalability issues they face during systems developments. Our work in this project is composed of two main parts: 1) the conception of efficient mechanisms for multiple views of complex (large) models; 2) the definition of a solution for the automation of modeling tasks on large model repositories, like the execution of large amounts of transformations, the orchestration of their execution, and the effective browsing of repositories for finding specific models.
Participants: Jean Bézivin, Jordi Cabot, Salvador Martinez Perez, Massimo Tisi.
OPEES (http://www.opees.org) stands for Open Platform for the Engineering of Embedded Systems, and is a European funded project from ITEA2. Its mission statement is "to settle a community and build the necessary means and enablers to ensure long-term availability of innovative engineering technologies in the domain of dependable or critical software-intensive embedded systems". In particular, within OPEES, our schema of open source industrial collaboration [37] (e.g. around ATL) will be tested and developed as a team contribution to this project. AtlanMod is also responsible for providing a model-driven interoperability solution for the integration of the ecosystem of OPEES components, based on metamodeling the domain data of each component and bridging, by model transformation, the specific data representations.
7.9. BNP Paribas collaboration, Action 1 "Continuity between the process modeling and software design" (2010-2011)
Participants: Jean Bézivin, Hugo Brunelière, Jordi Cabot, Vincent Mahé.
This collaboration with the BNP Paribas company started this year with a first collaborative action about studying business process modeling, and more specifically its continuity between the domain (business) and the technological (IT) spaces. Our work in this action is composed of two main parts: 1) a study of the state of the art (in both industry and research worlds, including for instance European projects) also presenting proposals of concrete solutions; 2) the implementation of an extensible Business-IT prototype, following the recommendations of the previously mentioned study. This action is planned to be followed by a next one, during the coming year, targeting other interested topics where MDE and our technologies are relevant and applicable.
7.10. MDE Expertise - Exchanging knowledge, techniques and experiences around Model Driven Engineering education (2010-2012)
Participants: Jordi Cabot, Massimo Tisi.
MDE Expertise (http://www.learnMDE.org) is an European Leonardo da Vinci project (LifeLong learning programme) focused on the development of common educational materials for the Model Driven Engineering (MDE) area. The main aim of the project is to transfer and adapt the education in Model Driven Engineering concepts to the local IT education societies of the partner’s countries, thus improving the partners’ knowledge about up to date current software development methods. This results in the best preparation for professionals competing on the IT market. Direct results include: development of common MDE teaching methods, suited for the partners’ local needs and market requirements; creation of teaching materials (with online version) localized for the partners’ languages and definition of tools for e-learning and knowledge exchange. Indirect effects include improving the capability of local SMEs in solving complex software design problems through modeling, and evolving the software development job market.
8. Other Grants and Activities
8.1. Regional Initiatives
In 2009, the AtlanMod team initiated a regular series of events called Les Jeudis des Modèles. This event has successfully continued during 2010. Every two months approximately, an international expert from the model engineering community is invited to come and present a subject of research or innovation of interest to the scientific and industrial community. These events typically attract between 60 and 80 researchers, students and industrials. In fact, Les Jeudis des Modèles event has become a regional rendez-vous of the model engineering community, attracting people from Rennes, Vannes, La Rochelle, and many other places beyond Nantes. Among others, we have invited Patrick Albert (IBM), Nicolas Rouquette (NASA/JPL), Ed Merks (Macro Modeling), Ivar Jacobson (IJ company), Sridhar Iyengar (IBM) and Jon Whittle (Professor at Lancaster University). It should be noted that these visits allow us to organize different meetings between the researchers of the AtlanMod team and the various industrials attending the main presentation.
As part of our commitment to the Eclipse community, we co-organized (together with the Obeo company) an Eclipse DemoCamp event to celebrate the Eclipse Helios version release. The Eclipse DemoCamps are an opportunity to showcase all of the technology being built by the Eclipse community and, for the team, an excellent opportunity to meet other Eclipse enthusiasts in the region and show them the Eclipse tools developed by the team.
9. Dissemination
9.1. Animation of the scientific community
Participants: Jordi Cabot, Frédéric Jouault, Massimo Tisi.
http://www.emn.fr/z-info/jmodeles/
In 2010, the AtlanMod team has coorganized the following events:
- 2nd International Workshop on Model Transformation with ATL\footnote{http://www.emn.fr/z-info/atlanmod/index.php/MtATL2010} (MtATL 2010) co-located with the ICMT and Tools conferences in Málaga.
- OCL and Textual Modeling Workshop (OCL’10) co-located with the MoDELS conference in Oslo.
- Model-driven Interoperability (MDI’10) co-located with the MoDELS conference in Oslo.
- Desarrollo de software dirigido por modelos (DSDLM’10), Spanish national workshop on MDD
### 9.2. Editorial Boards and Program Committees
Participation to editorial boards of scientific journals:
- Jean Bézivin: SoSym, IBIS, JOT.
- Jean Bézivin: AITO (ECOOP), TOOLS, ICMT.
Participation to conference program committees:
- Jordi Cabot: WWW’10, MoDELS’10, SLE’10, ECMFA’10, ICECCS’10, ICMT’10, ICWE’10, WEBIST’10, Models and Evolution - MoDELS workshop, Doct. Symp at SLE’10, Effectiveness of MDE - ECMFA workshop, Workshop on Modeling Social Media, TTC’10, ADBIS workshop on MDA, Ed. Symp at MoDELS’10
### 9.3. PhD and Habilitation Juries
Participants: Jean Bézivin, Jordi Cabot.
Jean Bézivin was a member of the following PhD juries:
- Rick Salay, PhD committee, University of Toronto, Toronto, Canada
- Bert Vanhooft, PhD committee, K.U. Leuven, Heverlee, Belgium
- Vadim Zaytsev, PhD committee, University of Koblenz, Koblenz, Germany
Jordi Cabot was a member of the following PhD juries:
- Pau Giner. PhD committee, Technical University of Valencia, Valencia, Spain
- Marta Ruiz. PhD committee, Technical University of Valencia, Valencia, Spain
- José Eduardo Rivera. PhD committee, University of Málaga, Málaga, Spain
### 9.4. Involvement in the Eclipse Community
Participants: Jean Bézivin, Frédéric Jouault, Hugo Brunelière, Guillaume Doux, Kelly Garcés.
The AmmA platform components are made available on Eclipse.org by AtlanMod, which is involved in the Eclipse community as follows:
- Jean Bézivin is project lead of the GMT project.
- Frédéric Jouault is project lead of the M2M (Model-to-Model transformation) and TMF (Textual Modeling Framework) projects as well as lead on M2M/ATL (collaboration with Obeo), and TMF/TCS. He is also committer on GMT/AM3 and GMT/AMW.
- Hugo Brunelière is project lead of MDT/MoDisco (collaboration with Mia-Software), now including the former GMT/AM3. He is also committer on EMFT/EMF Facet (collaboration with Mia-Software) and GMT (responsible for the ongoing GMT project termination’s process). He co-organized the Eclipse DemoCamp Helios in Nantes on the 9th of July 2010 (collaborative work with Obeo), and participated as a presenter to the two main community events of the year: EclipseCon and Eclipse Summit Europe.
- Guillaume Doux is committer on GMT/AM3.
- Kelly Garcés is an M2M/ATL and GMT/AMW contributor.
9.5. Invitations and Participations to Seminars
**Participants:** Jean Bézivin, Jordi Cabot, Frédéric Jouault, Massimo Tisi.
The AtlanMod team gave three seminars at the National Institute of Informatics (NII) in Tokyo on several aspects of Model-Driven Engineering:
- Jean Bézivin gave a seminar entitled *Issues in Domain Specific Languages (DSLs) and Model Driven Interoperability (MDI).*
- Frédéric Jouault gave a seminar entitled *Management of Transformations.*
- Massimo Tisi gave a seminar entitled *Transforming Model Transformations.*
Jordi Cabot gave:
- A seminar entitled *Educating in MDE* at the University of the Basque Country
- A seminar entitled *New Advances in MDE* at the University of La Rochelle
- A talk entitled *Agile and MDE: friends or foes* at the Agile Tour event in Nantes
9.6. Teaching
**Participants:** Jean Bézivin, Hugo Brunelière, Jordi Cabot, Kelly Garcés, Frédéric Jouault, Massimo Tisi.
The members of the AtlanMod team have taught Model-Driven Engineering to several types of students:
- **MDE Diploma.** The AtlanMod team is in charge in 2010 of a 360-hour diploma on MDE5.
- **EMN GSI option.** Frédéric Jouault is in charge of a 24-hour course on MDE in this EMN program (final year).
Some members of the team also have teaching duties in other fields (e.g., operating system, object-oriented programming).
10. Bibliography
**Major publications by the team in recent years**
5 http://www.mines-nantes.fr/fr/Formations/Formation-specialisee/MDE
Publications of the year
Doctoral Dissertations and Habilitation Theses
Articles in International Peer-Reviewed Journal
International Peer-Reviewed Conference/Proceedings
National Peer-Reviewed Conference/Proceedings
Workshops without Proceedings
Scientific Books (or Scientific Book chapters)
References in notes
|
{"Source-Url": "http://raweb.inria.fr/rapportsactivite/RA2010/atlanmod/atlanmod.pdf", "len_cl100k_base": 11208, "olmocr-version": "0.1.50", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 55567, "total-output-tokens": 17555, "length": "2e13", "weborganizer": {"__label__adult": 0.0004453659057617187, "__label__art_design": 0.0008454322814941406, "__label__crime_law": 0.0002601146697998047, "__label__education_jobs": 0.00429534912109375, "__label__entertainment": 0.00013589859008789062, "__label__fashion_beauty": 0.00025463104248046875, "__label__finance_business": 0.00044655799865722656, "__label__food_dining": 0.0003571510314941406, "__label__games": 0.0007338523864746094, "__label__hardware": 0.0008668899536132812, "__label__health": 0.0005130767822265625, "__label__history": 0.000621795654296875, "__label__home_hobbies": 0.00016319751739501953, "__label__industrial": 0.0005254745483398438, "__label__literature": 0.0005445480346679688, "__label__politics": 0.0003361701965332031, "__label__religion": 0.0007143020629882812, "__label__science_tech": 0.0430908203125, "__label__social_life": 0.00022327899932861328, "__label__software": 0.007488250732421875, "__label__software_dev": 0.935546875, "__label__sports_fitness": 0.0003919601440429687, "__label__transportation": 0.000762939453125, "__label__travel": 0.00027370452880859375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 67731, 0.04205]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 67731, 0.27525]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 67731, 0.83102]], "google_gemma-3-12b-it_contains_pii": [[0, 158, false], [158, 158, null], [158, 6041, null], [6041, 6072, null], [6072, 8268, null], [8268, 12147, null], [12147, 17111, null], [17111, 20769, null], [20769, 24690, null], [24690, 27704, null], [27704, 32082, null], [32082, 35114, null], [35114, 38221, null], [38221, 42341, null], [42341, 46017, null], [46017, 49664, null], [49664, 52565, null], [52565, 55144, null], [55144, 58152, null], [58152, 61497, null], [61497, 64722, null], [64722, 67731, null]], "google_gemma-3-12b-it_is_public_document": [[0, 158, true], [158, 158, null], [158, 6041, null], [6041, 6072, null], [6072, 8268, null], [8268, 12147, null], [12147, 17111, null], [17111, 20769, null], [20769, 24690, null], [24690, 27704, null], [27704, 32082, null], [32082, 35114, null], [35114, 38221, null], [38221, 42341, null], [42341, 46017, null], [46017, 49664, null], [49664, 52565, null], [52565, 55144, null], [55144, 58152, null], [58152, 61497, null], [61497, 64722, null], [64722, 67731, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 67731, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 67731, null]], "pdf_page_numbers": [[0, 158, 1], [158, 158, 2], [158, 6041, 3], [6041, 6072, 4], [6072, 8268, 5], [8268, 12147, 6], [12147, 17111, 7], [17111, 20769, 8], [20769, 24690, 9], [24690, 27704, 10], [27704, 32082, 11], [32082, 35114, 12], [35114, 38221, 13], [38221, 42341, 14], [42341, 46017, 15], [46017, 49664, 16], [49664, 52565, 17], [52565, 55144, 18], [55144, 58152, 19], [58152, 61497, 20], [61497, 64722, 21], [64722, 67731, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 67731, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-03
|
2024-12-03
|
c7b778b3d80aaf1d58d9e36bd9a6fce7e2bf8120
|
Miniphases: Compilation using Modular and Efficient Tree Transformations
Dmitry Petrashko
EPFL, Switzerland
dmitry.petrashko@gmail.com
Ondřej Lhoták
University of Waterloo, Canada
olhotak@uwaterloo.ca
Martin Odersky
EPFL, Switzerland
martin.odersky@epfl.ch
Abstract
Production compilers commonly perform dozens of transformations on an intermediate representation. Running those transformations in separate passes harms performance. One approach to recover performance is to combine transformations by hand in order to reduce number of passes. Such an approach harms modularity, and thus makes it hard to maintain and evolve a compiler over the long term, and makes reasoning about performance harder. This paper describes a methodology that allows a compiler writer to define multiple transformations separately, but fuse them into a single traversal of the intermediate representation when the compiler runs. This approach has been implemented in the Dotty compiler for the Scala language. Our performance evaluation indicates that this approach reduces the running time of tree transformations by 35% and shows that this is due to improved cache friendliness. At the same time, the approach improves total memory consumption by reducing the object tenuring rate by 50%. This approach enables compiler writers to write transformations that are both modular and fast at the same time.
CCS Concepts
• Software and its engineering → Compilers
Keywords
compiler performance, tree traversal fusion, cache locality
1. Introduction
Contemporary compilers are complicated, consisting of thousands to millions of lines of code. The design of a compiler is constrained by multiple competing requirements, and it is challenging to satisfy all of them simultaneously. A compiler needs to be correct, and therefore easy to test. A compiler needs to be maintainable and easy to debug. To serve both of these needs, the design of the compiler should be modular. But a compiler also needs to be fast. Compiling a complicated programming language is computationally expensive, but software developers run their compilers many times during development, and waiting for the compiler hinders their productivity. A good compiler design provides both modularity and performance at the same time.
Balancing modularity and performance has been a difficult and long-running challenge in the compiler for the Scala programming language. Compilation times have been a frequent complaint from users. On many occasions, compiler developers had to make difficult trade-offs between modularity, maintainability, and performance.
Most compilers are composed of a sequence of transformations of some intermediate representation of the program being compiled. Often, a core part of the intermediate representation is an abstract syntax tree.
In this paper, we propose a new design for tree transformations that is both modular and efficient at the same time. This design is adopted in the Dotty compiler for Scala. We present the design to demonstrate its modularity and we empirically evaluate its performance in the Dotty compiler.
For modularity, each transformation of the intermediate representation should be expressed as an independent traversal of the abstract syntax tree. However, the tree is too large to fit in cache, so each traversal of the whole tree is expensive. Our solution enables the compiler developer to implement, test, and reason about transformations as separate traversals. However, our approach fuses the transformations performed at individual tree nodes so that multiple logical transformation passes (“Miniphases”) are performed in a single traversal of the abstract syntax tree.
The remainder of this paper is organized as follows:
• Section 2 shows the conflict between modularity and performance based on experience with Scala 2.x compilers;
• Section 3 presents performance characteristics that we targeted when designing the Miniphases framework;
• Section 4 introduces proposed design abstractions and describes the implementation inside the Dotty compiler;
• Section 5 presents results of experiments that evaluate the impact of the Miniphases framework on GC object promotion rate and CPU cache misses;
• Section 6 covers limitations of the framework and soundness of fusion;
• Section 7 discusses real-world experience with the framework, such as maintenance cost and the on-boarding process for new contributors;
• Section 8 presents related work;
• Section 9 concludes.
2. Background: Scala Compilers
The current Scala compiler has been the production compiler since version 2.0 of Scala in 2006. The Miniphase approach that we study in this paper is being implemented in Dotty, a next-generation compiler for experimenting with new language features and compiler designs for Scala.
Both compilers share the following common structure. The major internal data structures are trees, which describe the syntax of the program being compiled, and types and symbols, which describe semantic information and the relationships between program entities. The program being compiled is represented as a sequence of compilation units. Every compilation unit is a single source-file which may define multiple top-level classes.
The tree nodes in both compilers are logically immutable and do not have a link to their parent node. This allows to reuse trees in multiple locations, and simplifies debugging as no mutation to trees is possible. When trees are modified, they are rebuilt using copiers. An optimization avoids the copying in the (quite common) case where a transform returns a tree with the same fields as its input.
Symbols are unique identifiers for definitions, including members and local variables, coming both from sources currently being compiled as well as their binary dependencies. Types are used not only to describe the type of an entity, but can also serve as references to program definitions such as methods or variables. In the Dotty compiler, this has been generalized to a point where all references to other program parts are embodied in types. This is possible, and convenient, because the Scala type system includes singleton types [18], which guarantee that an expression has the same value as some entity such as a field or variable, and are thus equivalent to references to those fields and variables. Types also encode constants [13] and higher-kindred types.
An execution of the compiler is broadly divided into the front-end, the tree transformation pipeline, and code generator. The front-end parses and type-checks source code, and generates trees annotated with type information. The tree transformations gradually desugar and lower the Scala-like code to a simpler form that is close to Java bytecode. The code generator emits Java bytecode from the lowered trees.
In this paper, our focus is on the middle phases which constitute the tree transformation pipeline.
2.1 Experience with the Scala Compiler
In this section, we review experience from the past ten years of developing the Scala compiler, focusing especially on modularity and performance.
The compiler that has been used for Scala versions 2.0 to 2.12 is organized as a sequence of phases. Each phase is a function that takes the tree of a compilation unit as input and returns a transformed tree as output. The implementation of each phase can be arbitrary Scala code, and there are no restrictions on how it, for example, traverses the tree. This Megaphase approach is illustrated in Figure 1. In the compiler for Scala version 2.12.0, there are 24 such phases, listed in Table 1.
The Megaphase approach was originally intended to be modular in that each phase is an independent transformation of the tree.
A drawback is that each phase that implements a specific language feature must traverse the entire tree to find uses of that feature. When a use of the feature is found, the phase transforms the relevant tree node. All ancestor nodes are also rebuilt because the tree is immutable. For example, the program in Listing 1 uses pattern matching, lazy vals, and mixins. To compile this program, at least five transformations are needed to implement the three language features, to create a constructor for the class Increment, and to normalize the method interfaceMethod to take an empty list of arguments. When implemented as independent Megaphases, each of these transformations must traverse the entire tree.
<table>
<thead>
<tr>
<th>phase name</th>
<th>id</th>
<th>description</th>
</tr>
</thead>
<tbody>
<tr>
<td>parser</td>
<td>1</td>
<td>parse source into ASTs, perform simple desugaring</td>
</tr>
<tr>
<td>namer</td>
<td>2</td>
<td>resolve names, attach symbols to named trees</td>
</tr>
<tr>
<td>packageobjects</td>
<td>3</td>
<td>load package objects</td>
</tr>
<tr>
<td>typer</td>
<td>4</td>
<td>the meat and potatoes: type the trees</td>
</tr>
<tr>
<td>patmat</td>
<td>5</td>
<td>translate match expressions</td>
</tr>
<tr>
<td>superaccessors</td>
<td>6</td>
<td>add super accessors in traits and nested classes</td>
</tr>
<tr>
<td>extmethods</td>
<td>7</td>
<td>add extension methods for inline classes</td>
</tr>
<tr>
<td>pickler</td>
<td>8</td>
<td>serialize symbol tables</td>
</tr>
<tr>
<td>refchecks</td>
<td>9</td>
<td>reference/override checking, translate nested objects</td>
</tr>
<tr>
<td>uncurry</td>
<td>10</td>
<td>uncurry, translate function values to anonymous classes</td>
</tr>
<tr>
<td>fields</td>
<td>11</td>
<td>synthesize accessors and fields, including bitmaps for lazy vals</td>
</tr>
<tr>
<td>tailcalls</td>
<td>12</td>
<td>replace tail calls by jumps</td>
</tr>
<tr>
<td>specialize</td>
<td>13</td>
<td>@specialized-driven class and method specialization</td>
</tr>
<tr>
<td>explicitouter</td>
<td>14</td>
<td>this refs to outer pointers</td>
</tr>
<tr>
<td>erasure</td>
<td>15</td>
<td>erase types, add interfaces for traits</td>
</tr>
<tr>
<td>posterasure</td>
<td>16</td>
<td>clean up erased inline classes</td>
</tr>
<tr>
<td>lambdalift</td>
<td>17</td>
<td>move nested functions to top level</td>
</tr>
<tr>
<td>constructors</td>
<td>18</td>
<td>move field definitions into constructors</td>
</tr>
<tr>
<td>flatten</td>
<td>19</td>
<td>eliminate inner classes</td>
</tr>
<tr>
<td>mixin</td>
<td>20</td>
<td>mixin composition</td>
</tr>
<tr>
<td>cleanup</td>
<td>21</td>
<td>platform-specific cleanups, generate reflective calls</td>
</tr>
<tr>
<td>delambdafy</td>
<td>22</td>
<td>remove lambdas</td>
</tr>
<tr>
<td>jvm</td>
<td>23</td>
<td>generate JVM bytecode</td>
</tr>
<tr>
<td>terminal</td>
<td>24</td>
<td>the last phase during a compilation run</td>
</tr>
</tbody>
</table>
Table 1: Phases in Scala 2.12.0
For example, Scala supports method definitions with multiple argument lists. The phase called uncurry was originally written to flatten the argument lists in such definitions into a single list of arguments. For the sake of performance, several unrelated transformations were added to this phase. In particular, this phase also finds try blocks used as subexpressions of some expression and lifts them into separate methods. This transformation is necessary because Java try blocks are statements, not expressions, so the JVM implementation of exception handlers does not provide a way to communicate an expression context from the try block to the exception handler. This transformation is completely unrelated to the original purpose of the uncurry phase. In the Dotty compiler, this transformation is done in its own Miniphase called LiftTry.
As another example, the Scala compiler contains a phase called refchecks, originally written to check that overriding methods conform to the types of the superclass methods that they override. Originally, the phase was intended to only inspect but not modify the tree. However, the current implementation of this phase performs multiple transformations of the tree. In particular, it replaces local (singleton) object definitions by local variables containing the object, it replaces calls to factory methods with calls to class constructors, and it eliminates conditional branches when their condition is statically known. None of these transformations
```
trait Interface {
def interfaceMethod = 1
lazy val interfaceField = 2
}
class Increment(by: Int) extends Interface {
def incOrZero(b: Any) = b match {
case b: Int => b + by
case _ => 0
}
}
```
Listing 1: Sample Scala program
In this example, each of the phases changes only a single node in the tree, yet five traversals are needed to change five nodes.
To improve performance, consecutive phases have been joined at the source level by hand, making the resulting phase contain code to perform multiple transformations at once. Even though the Megaphase design was intended to be modular, performance considerations pressured the developers to mix unrelated transformations in individual phases. This reduction in the number of phases makes the compiler faster, at a cost of hard-to-predict interactions between different transformations. Over the years, this has led to a code-base that is hard to maintain and evolve.
<table>
<thead>
<tr>
<th>phase name</th>
<th>id</th>
<th>description</th>
</tr>
</thead>
<tbody>
<tr>
<td>FrontEnd</td>
<td>1</td>
<td>Compiler frontend: scanner, parser, namer, typer</td>
</tr>
<tr>
<td>sbt.ExtractDependencies</td>
<td>2</td>
<td>Sends information on classes’ dependencies to sbt via callbacks</td>
</tr>
<tr>
<td>PostTyper</td>
<td>3</td>
<td>Additional checks and cleanups after type checking</td>
</tr>
<tr>
<td>sbt.ExtractAPI</td>
<td>4</td>
<td>Sends a representation of the API of classes to sbt via callbacks</td>
</tr>
<tr>
<td>Pickler</td>
<td>5</td>
<td>Generate TASTY info</td>
</tr>
<tr>
<td>FirstTransform</td>
<td>6</td>
<td>Some transformations to put trees into a canonical form</td>
</tr>
<tr>
<td>CheckReentrant</td>
<td>7</td>
<td>Internal use only: Check that compiled program has no data races involving global vars</td>
</tr>
<tr>
<td>RefChecks*</td>
<td>8</td>
<td>Various checks mostly related to abstract members and overriding</td>
</tr>
<tr>
<td>CheckStatic*</td>
<td>9</td>
<td>Check restrictions that apply to @static members</td>
</tr>
<tr>
<td>ElimRepeated*</td>
<td>10</td>
<td>Rewrite vararg parameters and arguments</td>
</tr>
<tr>
<td>NormalizeFlags*</td>
<td>11</td>
<td>Rewrite some definition flags</td>
</tr>
<tr>
<td>ExtensionMethods*</td>
<td>12</td>
<td>Expand methods of value classes with extension methods</td>
</tr>
<tr>
<td>ExpandSAMs*</td>
<td>13</td>
<td>Expand single abstract method closures to anonymous classes</td>
</tr>
<tr>
<td>TailRec*</td>
<td>14</td>
<td>Rewrite tail recursion to loops</td>
</tr>
<tr>
<td>LiftTry*</td>
<td>15</td>
<td>Put try expressions that might execute on non-empty stacks into their own methods</td>
</tr>
<tr>
<td>ClassOf*</td>
<td>16</td>
<td>Expand ‘Predef.classOf’ calls</td>
</tr>
<tr>
<td>TryCatchPatterns*</td>
<td>17</td>
<td>Compile cases in try/catch</td>
</tr>
<tr>
<td>PatternMatcher*</td>
<td>18</td>
<td>Compile pattern matches</td>
</tr>
<tr>
<td>ExplicitOuter*</td>
<td>19</td>
<td>Add accessors to outer classes from nested ones.</td>
</tr>
<tr>
<td>ExplicitSelf*</td>
<td>20</td>
<td>Make references to non-trivial self types explicit as casts</td>
</tr>
<tr>
<td>CrossCastAnd*</td>
<td>21</td>
<td>Normalize selections involving intersection types.</td>
</tr>
<tr>
<td>Splitter*</td>
<td>22</td>
<td>Expand selections involving union types into conditionals</td>
</tr>
<tr>
<td>VChinlineMethods*</td>
<td>23</td>
<td>Inlines calls to value class methods</td>
</tr>
<tr>
<td>IsInstanceOfEvaluator*</td>
<td>24</td>
<td>Issues warnings when unreachable statements are present in match/if expressions</td>
</tr>
<tr>
<td>SeqLiterals*</td>
<td>25</td>
<td>Express vararg arguments as arrays</td>
</tr>
<tr>
<td>InterceptedMethods*</td>
<td>26</td>
<td>Special handling of ‘==’, ‘=’, ‘getClass’ methods</td>
</tr>
<tr>
<td>Getters*</td>
<td>27</td>
<td>Replace non-private vals and vars with getter defs (fields are added later)</td>
</tr>
<tr>
<td>ElimByName*</td>
<td>28</td>
<td>Expand by-name parameters and arguments</td>
</tr>
<tr>
<td>AugmentScala2Traits*</td>
<td>29</td>
<td>Expand traits defined in Scala 2.11 to simulate old-style rewritings</td>
</tr>
<tr>
<td>ResolveSuper*</td>
<td>30</td>
<td>Implement super accessors and add forwards to trait methods</td>
</tr>
<tr>
<td>ArrayConstructors*</td>
<td>31</td>
<td>Intercept creation of (non-generic) arrays and intrinsify.</td>
</tr>
<tr>
<td>Erasure</td>
<td>32</td>
<td>Rewrite types to JVM model, erasing all type parameters, abstract types and refinements.</td>
</tr>
<tr>
<td>ElimErasedValueTypes*</td>
<td>33</td>
<td>Expand erased value types to their underlying implementation types</td>
</tr>
<tr>
<td>VCElideAllocations*</td>
<td>34</td>
<td>Peephole optimization to eliminate unnecessary value class allocations</td>
</tr>
<tr>
<td>Mixin*</td>
<td>35</td>
<td>Expand trait fields and trait initializers</td>
</tr>
<tr>
<td>LazyVals*</td>
<td>36</td>
<td>Expand lazy vals</td>
</tr>
<tr>
<td>Memoize*</td>
<td>37</td>
<td>Add private fields to getters and setters</td>
</tr>
<tr>
<td>LinkScala2ImplClasses*</td>
<td>38</td>
<td>Forward calls to the implementation classes of traits defined by Scala 2.11</td>
</tr>
<tr>
<td>NonLocalReturns*</td>
<td>38</td>
<td>Expand non-local returns</td>
</tr>
<tr>
<td>CapturedVars*</td>
<td>39</td>
<td>Represent vars captured by closures as heap objects</td>
</tr>
<tr>
<td>Constructor*</td>
<td>40</td>
<td>Collect initialization code in primary constructors</td>
</tr>
<tr>
<td>FunctionalInterfaces*</td>
<td>41</td>
<td>Rewrites closures to implement @specialized types of Functions.</td>
</tr>
<tr>
<td>GetClass*</td>
<td>42</td>
<td>Rewrites getClass calls on primitive types.</td>
</tr>
<tr>
<td>LambdaLift*</td>
<td>43</td>
<td>Lifts out nested functions to class scope, storing free variables in environments</td>
</tr>
<tr>
<td>ElimStaticThis*</td>
<td>44</td>
<td>Replace ‘this’ references to static objects by global identifiers</td>
</tr>
<tr>
<td>Flatten*</td>
<td>45</td>
<td>Lift all inner classes to package scope</td>
</tr>
<tr>
<td>RestoreScopes*</td>
<td>46</td>
<td>Repair scopes rendered invalid by moving definitions in prior phases of the group</td>
</tr>
<tr>
<td>ExpandPrivate*</td>
<td>47</td>
<td>Widen private definitions accessed from nested classes</td>
</tr>
<tr>
<td>SelectStatic*</td>
<td>48</td>
<td>get rid of selects that would be compiled into GetStatic*</td>
</tr>
<tr>
<td>CollectEntryPoints*</td>
<td>49</td>
<td>Find classes with main methods</td>
</tr>
<tr>
<td>CollectSuperCalls*</td>
<td>50</td>
<td>Find classes that are called with super</td>
</tr>
<tr>
<td>DropInlined*</td>
<td>51</td>
<td>Drop Inlined nodes, since backend has no use for them</td>
</tr>
<tr>
<td>MoveStatic*</td>
<td>52</td>
<td>Move static methods to companion classes</td>
</tr>
<tr>
<td>LabelDefs*</td>
<td>53</td>
<td>Converts calls to labels to jumps</td>
</tr>
<tr>
<td>GenBCode</td>
<td>54</td>
<td>Generate JVM bytecode</td>
</tr>
</tbody>
</table>
Table 2: Phases in Dotty compiler. The horizontal lines indicate blocks of Miniphases(*) that constitute a single transformation.
Figure 2: Pipelining of a leaf-node through Miniphases
are related to the original purpose of the refchecks phase, or to each other.
In this paper, we propose a framework that removes the need to make this trade-off. The proposed framework allows separate transformations to be defined in separate phases, yet applies the transformations in a common traversal of the tree for performance. Thus, it frees compiler developers from the pressure to combine unrelated transformations in the same phase.
Currently, the code of the Dotty compiler is modularized into 54 phases, listed in Table 2. We expect that the number of phases could increase to around 100 once the compiler is finished.
3. Target Performance Characteristics
While designing the framework, we had approximate performance characteristics in mind.
Based on user feedback on existing versions of the Scala compiler, we would like to be able to compile about 4000 lines per second (on a MacBook Pro 14", 2014). The current scalac compiler can compile 1000–2000 lines per second on such a machine, depending on the application being compiled.
The tree transformation pipeline uses about one-third of the compilation time. The rest of the time is spent in the typechecker and the code generator, which are independent of the tree transformation pipeline. Thus, the tree transformations should process 12000 lines of code per second. A typical line of code corresponds to about 12 tree nodes. We estimate that the compiler performs about 100 distinct transformations, each of which justifies a separate phase. We would like the framework to spend no more than 20% of the time traversing the tree, leaving 80% of the time for useful transformations. Thus, a Megaphase approach would need to visit each node in about 14 nanoseconds, or 28 CPU cycles. If we can perform the 100 transformations in only 10 traversals, we can use 140 nanoseconds, or 280 CPU cycles per tree node visit.
4. Design
Listing 2 presents a simplified structure of the tree nodes used in the Dotty compiler. Each tree node has a withNewChildren method that creates a new node with a modified list of children.
The tree transformation pipeline has the overall structure given in Listing 3. For each phase, and for each compilation unit, the compiler applies the phase to the compilation unit. In the Miniphase approach, this high-level structure remains the same. However, multiple Miniphase transformations are fused together and performed in a single phase.
To support this fusion, all Miniphases must traverse the tree in a consistent order. A Miniphase is therefore implemented as a phase whose runPhase does a postorder traversal over the tree, as shown in Listing 4. When visiting each node, it calls the transform method, which dispatches to a specific node transformation function depending on the type of the tree node. By default, the node transformations are all identity methods. An implementation of a specific transformation is expected to override the transformation methods of the types of node relevant to the transformation.
The advantage of imposing a uniform postorder traversal is that multiple Miniphases can now be fused together, after being combined by functions presented in Listing 5. The
class Phase {
def runPhase(t: Tree): Tree =
val runsAfter: Set[MiniPhase] = Set.empty
def checkPostCondition(t: Tree): Boolean = true
}
class MiniPhase extends Phase {
val valDefTransform: ValDef => Tree = id
val defDefTransform: DefDef => Tree = id
val identTransform: Ident => Tree = id
...
val selectTransform: Select => Tree = id
}
final def transform(t: Tree) = t match {
case a: ValDef => valDefTransform(a)
case a: DefDef => defDefTransform(a)
...
case a: Select => selectTransform(a)
}
final def runPhase(t: Tree): Tree = {
val newChildren = t.children.map(sub => runPhase(sub))
val reconstructed = t.withNewChildren(newChildren)
transform(reconstructed)
}
Listing 4: Definition of a Miniphase
private def chainMiniPhases(first: MiniPhase, second: MiniPhase) = {
new MiniPhase {
val valDefTransform = { x: ValDef =>
val newTree = first.valDefTransform(x)
second.transform(newTree)
}
...
// similar to valDefTransform for all node kinds
val runsAfter: Set[MiniPhase] =
second.runsAfter -- first ++ first.runsAfter
def checkPostCondition(t: Tree) =
first.checkPostCondition(t) &&
second.checkPostCondition(t)
}
}
def combine(a: Array[MiniPhase]): MiniPhase =
a.reduceRight((phase, acc) =>
chainMiniPhases(phase, acc)
)
Listing 5: Fusion algorithm for Miniphases
fused Miniphase traverses the tree only once. While visiting each tree node, it applies the transformations implemented by all of its constituent Miniphases. The valDefTransform method applies the valDefTransform method of the first Miniphase (and similarly for other node types), but for subsequent Miniphases, it must call the general transform method, because the first Miniphase might have changed the type of the node. This is illustrated in Figures 2 and 3. In Figure 2, the blue leaf node is transformed by three Miniphases (yellow, green, orange), yielding an orange node, before any of the other blue nodes are processed. In the next step, in Figure 3, the parent of the now orange node is processed by the same three Miniphases.
A set of fused Miniphases has the following properties, which must be taken into account by implementors:
- The transform method is called on all nodes of the compilation unit in a post-order traversal order.
- When the transform method of Miniphase $m$ is called on a tree node $t$, $t$ has already been transformed by all Miniphases that come before $m$, and the children of $t$ have been transformed by all Miniphases that have been fused with $m$, including ones that come both before and after $m$. In Figure 3, the yellow and green Miniphases process a node whose child is already orange, even though the orange Miniphase comes after the green one. Though it is surprising that Miniphase $m$ “sees the future” in its child subtrees, we have found that this rarely creates any problems, since most phases simplify the trees and introduce new invariants and rarely break existing ones.
We will discuss in Section 6 the criteria that developers of transformation phases must consider in deciding whether a phase can be fused with other phases.
Two important optimizations can be applied to the basic fusion technique. Both these optimizations are shown in the modified version of the Miniphase fusion implementation given in Listing 6.
First, since most Miniphases transform only a small subset of the types of tree nodes, the fusion code explicitly checks (Line 81, Listing 6) if the transformation in one of the Miniphases is the identity, and if so, the transformation in that Miniphase is skipped.
Second, since most transformations do not change the type of the tree node, a fast path that explicitly checks for this case was added that avoids the dispatch in the transform method, and instead calls the node transformation method for the relevant node type directly.
4.1 Prepares
The Miniphase framework presented so far is sufficiently general to implement all but 4 Miniphases present in the Dotty compiler. The remaining 4 phases, however, perform transformations that depend on the ancestors of the current
private def chainMiniPhases(first: MiniPhase, second: MiniPhase) = {
new MiniPhase {
val valDefTransform = ...
...
if (first.valDefTransform == id)
second.valDefTransform
else if (second.valDefTransform == id)
first.valDefTransform
else { x: ValDef =>
val newX = phase.valDefTransform(x)
newX match {
case newX: ValDef =>
second.valDefTransform(x)
case other: Tree =>
second.transform(other)
}
...
}
}
}
Listing 6: Optimization for identity transforms and for nodes that keep the same node type
class MiniPhase extends Phase {
...
val valDefPrepare: ValDef => Unit = empty
val defDefPrepare: DefDef => Unit = empty
val identPrepare: Ident => Unit = empty
...
val selectPrepare: Select => Unit = empty
}
Listing 7: MiniPhase extended with prepares
tree node, so it may seem that a post-order traversal is not ideal.
One example is the LiftTry transformation which was described in Section 2.1. This transformation lifts try blocks within an expression into independent methods. When it encounters a try block, this phase needs to know whether the block is part of a larger expression, and thus it needs information about its ancestors in the tree.
In order to accommodate such phases without abandoning the consistent post-order traversal that enables phase fusion, prepare methods have been added to the framework that mutate the internal state of a phase when entering a given type of subtree. Specifically, the LiftTry phase maintains a boolean state which is an over-approximation of whether the current subtree is inside an expression that requires try blocks to be lifted into methods. Before processing a tree node using the transform method, the runPhase method first calls the corresponding prepare method to update the state of the MiniPhase.
The chainMiniPhases method now also needs to chain prepares, as shown in Listing 8.
In the current implementation, there is a separate prepare method for each type of tree node, just as there are node-specific transform methods. Only very few phases have non-empty prepare methods, and those that do need to prepare for most kinds of tree node types. Therefore, it may have been sufficient (and simpler) to only have a single prepare method that is executed for every node regardless of its type.
4.2 Initialization and Finalization of Phases
Later, during development, we have found it helpful to extend Miniphases with the ability to prepare for a compilation unit and transform a compilation unit. compilationUnitPrepare is the proper place to initialize the initial internal state of the phase, such as populating global references used by the phase, while compilationUnitTransform is a natural place to clean the internal state to avoid high memory footprint and memory leaks.
5. Evaluation
We have performed an empirical evaluation of the performance benefits of the Miniphase approach. We compare the current version of the Dotty compiler, which uses Miniphases, with a modified version in which the groups of Miniphases were split up, so that each MiniPhase performed a separate tree traversal, like in the Megaphase approach. We ran both versions of the compiler on two significant input programs: the Scala standard library (34 000 LOC) and the Dotty compiler itself (50 000 LOC). In addition to the overall running time, we compared data from the JVM garbage collector, specifically the number of objects allocated and promoted to the old generation, and data collected using low-level CPU counters to explain cache behavior.
Figure 4: Execution time of tree transformation passes, typechecker, and code generation backend in Miniphase and Megaphase versions of the Dotty compiler.
The benchmarks were executed on a server with two Intel(R) Xeon(R) CPU E5-2680 v2 @ 2.80GHz CPUs, running on a fixed frequency of 2.4Ghz with HyperThreading disabled. This CPU has a 25MB L3 cache. Every one of the 10 cores in this CPU additionally has a 256KB L2 cache and 32KB L1-icache and L1-dcache. In this architecture, the L2 cache is not inclusive and the L3 cache is inclusive on all levels above it: data contained in the core caches must also reside in the last level cache [6].
This server has 64Gb of 4-channel memory and runs 64-bit Ubuntu Linux with kernel version 4.4.0-45-generic. We have used the Oracle Hotspot Java VM version 1.8.0_111, build 25.111-b14. In order to ensure consistency between the runs and reduce variance due to disk seeks, all data needed for compilation is stored in tmpfs, a Linux filesystem that is an in-memory store.
5.1 Overall Time
Figure 4 shows the overall running time of the frontend, tree transformation pipeline, and backend. The tree transformations use a significant amount of the overall compilation time: in the Megaphase approach, they take more time than either the frontend or the backend. The graph also shows that Miniphases decrease the time taken by the tree transformations by 37% when compiling the standard library and 34% when compiling the Dotty compiler. Overall, the total compilation time (including the frontend and backend) decreases by 15% and 16%, respectively. In the following sections, we look in more detail at the likely reasons for this improvement.
5.2 GC Object Allocation and Promotion
In this section, we investigate the performance of the garbage collector. The reported values were obtained by parsing the GC logs obtained by passing -XX:+PrintGCDetails -XX:+PrintGCTimeStamps to the Oracle Hotspot Java VM. The entire compiler pipeline was executed 50 times from a cold start, which represents a common setup of batch compilation in a big project.
We measured how many managed objects are allocated and then promoted to the old generation by garbage collection. We performed our measurements during the compilation of the compiler itself and the standard library.
Figure 5 shows the total size of the objects allocated in the tree transformation pipeline. Miniphases reduce the amount of memory allocated by 5% during compilation of the Dotty compiler itself and 9% during compilation of the Scala standard library. This is explained by the fact that we need to recreate a path from the modified part of the tree to the root less frequently. It is important to note that the absolute amount of memory allocated is high, between 7 to 9 GB, so even a decrease of 9% is a lot of memory. Note that this is the total size of objects allocated during the entire execution of the compiler, not the total consumed amount of memory at any particular point in time.
The decrease in the objects promoted to the old generation is much more significant, even in a relative sense, as shown in Figure 6. The reduction thanks to Miniphases is a
full 49% and 55% for the standard library and Dotty compiler, respectively. In absolute terms, Miniphases reduce the promoted objects by over 1 GB in both cases. Many tree nodes that are created in a Miniphase are replaced by subsequent Miniphases in the same traversal, so they die young. In contrast, in the Megaphase approach, a node created in one phase is not replaced until the next traversal of the whole tree, and by that time, the node may already have been promoted to the old generation.
5.3 CPU Performance Counters
Focusing now on CPU behaviour, we used the perf utility that is shipped with Ubuntu Linux 16.04 with Linux kernel 4.4.0-45-generic to measure low-level CPU counters. This measurement approach is less intrusive than tracing or sampling profiling and allows to explain details of how the code was executed by the CPU.
To isolate the tree transformation pipeline from the front end and the code generator, we made two modified versions of the Dotty compiler: one stops execution after the front end, and the other stops execution after the tree transformations. The data collected during 50 executions of each of these versions was very consistent, with a variability less than 0.5% across runs. We subtracted the counts of the two versions to approximate the effect of the tree transformations on the performance counters.
Figure 7 shows the number of instructions executed, the number of clock cycles taken, and the number of stalled cycles during the execution of the tree transformations. The total number of instructions decreased by 10%, but the number of cycles used to execute those instructions decreased by a much larger 35%.
This is explained by Figure 8a, which shows that Miniphases decreased the cache miss rate by 47%, 17% and 40% for L1 cache loads, L1 cache stores and last level cache loads, respectively. Figure 8b indicates that the total number of cache accesses decreased by only 10%. Figure 8c shows that the total number of accesses that miss all on-chip caches and access main memory decreased by 47%, from 512 million to 278 million accesses.
Figure 8d presents the L1-instruction cache miss count, which decreased by 24%. We believe that this is explained by the fact that CPU caches are inclusive and eviction from last level cache would also trigger eviction from lower level caches. By improving the hit rate in data caches, Miniphases also indirectly reduce evictions from the L1-instruction cache.
We conclude that the main reason for the performance improvements of the Miniphase approach compared to the Megaphase approach is that the Miniphase approach makes more effective use of the CPU caches.
5.4 Comparison with Existing Production Compiler
To put the running times of the Dotty compiler with Miniphases in perspective, Figure 9 compares its performance to the existing Scala production compiler, scalac, which implements the Megaphase approach. It must be noted that they are different compilers, so confounding factors other than Miniphases also influence differences in their performance. Nevertheless, we observe that Dotty spends only 42% and 39% as much time in tree transformations as scalac when compiling the standard library and Dotty, respectively. Dotty’s type checker is also faster than that of scalac, though this is unrelated to Miniphases, and the performance of the backends is about the same. Overall, Dotty compiles the standard library and itself in only 51% and 58% of the time taken by scalac.
6. Soundness and Limitations of Phase Fusion
6.1 Fusion Criteria
We do not formally define criteria that would give soundness guarantees that fusing phases does not change their behaviour. To be sound, any such formal criteria would have to be conservative. They can give guarantees for simple programs in which tree traversals affect a small number of well-behaved data structures, but they would be too conservative to apply to the setting of a complex production compiler in which the tree traversals indirectly interact with files, tools external to the compiler itself and other kinds of global mutable state.
Instead, we provide high-level criteria that must be interpreted with an understanding of the overall design of the compiler and the high-level relationships between the major global data structures. The following requirements are sufficient for a Miniphase to be fusible into a block:
1. A phase does not break invariants registered by previous phases in the same block.
2. A phase can successfully transform trees whose children have already been transformed by future phases in the same block.
3. A phase does not require that previous phases in the same block have finished transforming the entire compilation unit. Usually, when this is required, it is due to global data structures outside of the tree being transformed, such as the symbol table.
We have built a system for expressing phase invariants and postconditions that are enforced by dynamic checkers during testing. In our experience, these checkers are able to catch cases when these three requirements for phase fusion are violated. We will discuss these checkers in Section 6.3, but first, we examine examples of phases that are not fused because they violate the fusion criteria.
6.2 Example Violations of Fusion Criteria
Ideally, all the Miniphases in the compiler would be fused into a single traversal of the tree. In practice, our compiler has 6 separate blocks of Miniphases, marked with (*) in Table 2. Miniphases in the same block are fused together, but each block requires a separate traversal of the tree. Here, we describe some of the reasons that prevented us from fusing all Miniphases.
We have found that phases that violate of rule 1 are uncommon. While we did have phases that relax some invariants of previous phases, we were able to implement them in a more maintainable way following rule 1.
groups of Miniphases because it violates both rules 2 and 3. Therefore, the type erasure phase introduces a split between form the entire compilation unit to eliminate all of them. eliminated by the splitter phase, which is required to trans-...sions of types, violating rule 2. Union types are about trees that it sees. In particular it assumes absence of phas...sions that work on trees with both unerased and erased ver-...formation, it would be difficult to write other transformation many trees. Since types are the main carriers of semantic in-...The phase that performs type erasure modifies the types of...compiler needs to erase type arguments from generic types. Since Java bytecode does not have generic types, a Scala...6.2.2 Rules 2 and 3 Example: Erasure
Since the criteria from Section 6.1 are not verified statically, the Miniphase framework uses a system of dynamic assertions exercised by a large test suite to ensure correctness, and to localize a bug to a specific phase.
Each Miniphase defines postconditions that must hold about the tree nodes after the phase has transformed them. Runtime tests of the postconditions are implemented in the checkPostcondition method (Listing 4) of the Miniphase. The intended meaning of the postconditions is that if one Miniphase establishes a postcondition, all later Miniphases must also preserve it.
During testing, a checker pass is inserted between phases. A simplified version of its implementation is shown in Listing 9. The pass first checks various global invariants that are expected to always hold between any phase. For example, the checker removes all types from the tree and reconstructs them bottom-up, and checks that the reconstructed types are the same as the types that were associated with the tree. After checking global invariants, the checker pass runs the post-condition checks of not only the last executed Miniphase, but also of all the Miniphases that executed before it. This ensu...sures not only that each Miniphase has established its post-conditions, but also that no other Miniphases have invalid-ated them. In practice, we have found this mechanism to be very effective in the localizing bugs to a given Miniphase. In particular, bugs that involve interactions between different Miniphases would be difficult to track down without these checks. But if a postcondition of phase X fails after executing phase Y, we know immediately that phase Y breaks the invariant that phase X is intended to establish. For example, if a phase reintroduces a tree that contains pattern matching after the phase that eliminates pattern matching, we know immediately which phase to blame.
Miniphases also define preconditions by reference to the postconditions of other Miniphases. That is, a Miniphase specifies which other Miniphases must execute before it. For example, the phase that removes pattern matching requires the tail recursion elimination phase to finish processing all the trees before it can finish executing. Any pre-conditions specific to a Miniphase are usually the postcon-ditions of some earlier Miniphase. To specify preconditions, a Miniphase defines two methods. The runsAfter method returns a set of Miniphases that must precede the current Miniphase. The runsAfterGroupsOf method returns a set of Miniphases that must strictly precede the fused Mega-phase containing the current Miniphase. In other words, a Miniphase in runsAfterGroupsOf must completely finish transforming the tree before the current Miniphase can run. These two methods are used to specify the ordering criteria between Miniphases, in particular rule 2 from Section 6.1. If Miniphase X requires the postcondition of Miniphase Y to hold for only the node that X is immediately processing, X includes Y in runsAfter. If X requires the postcondi-
torney of Y to hold for all nodes of the tree, in particular for the children of the node that X is immediately processing, X includes Y in runsAfterGroupsOf. The phase ordering requirements specified by these two methods are checked when the Dotty compiler runs, not when it is compiled, but they are checked as soon as the compiler starts up, so any violations are caught immediately, independent of any test input.
The runtime overhead of the dynamic checks depends significantly on the specific code being compiled, but the approximate slowdown in the running time of the compiler is about 1.5x. The dynamic checks are enabled on every run of the test suite. The Dotty compiler has an extensive test suite that includes the tests from the test suite of the current production scalac compiler.
A similar dynamic invariant checking pass was initially implemented in the current production scalac compiler. However, in practice, it has not been maintained in a passing state: some Megaphases invalidate the postconditions of other Megaphases. For example, the pattern matching elimination phase creates references to symbols that are created only later, by a later phase. In general, because each Megaphase does multiple unrelated things, and because related transformations need to be split into different Megaphases, it has proven infeasible in practice to allocate to specific Megaphases the postconditions that should logically belong to the individual transformations.
7. Discussion
In this section, we discuss further experience with the Miniphase framework, including the onboarding process, code readability and maintenance, and common patterns that work well together with Miniphases.
Listing 9: Simplified version of TreeChecker
```scala
class TreeChecker(previousPhases: List[Phase], typer : Typer) extends Phase {
def runPhase(t: Tree): Tree = {
t.forAllSubtrees(subt =>
val reTyped = typer.typeCheck(subt.stripTypes)
reTyped.hasSameTypes(subt) &&
checkNoDoubleDefinitions(subt) &&
checkValidJVMNames(subt) &&
checkNoOrphanTypes(subt) &&
/* other non-phase-specific sanity checks*/
previousPhases.forAll { phase =>
phase.checkPostCondition(subt)
}
}
...
// implementations of helper methods such as
// checkNoDoubleDefinitions
}
```
7.1 Readability
The Scala and Dotty compilers are developed by several disconnected teams and open-source contributors. Most open-source contributors contribute their time voluntarily, and wish to start contributing quickly, without spending a lot of time to just get started. Most contributors want to solve the specific problem that bothers them. With the Miniphase framework, contributors find the phases easier to understand for two reasons:
First, each Miniphase is smaller and does a single transformation. A new developer needs to initially understand only one small phase, rather than a large Megaphase in which multiple different transformations are interleaved. This leads to less coupling and easier understanding.
Second, the Miniphase framework insists on a specific uniform structure of phases. While this makes it harder to write the initial implementation in this framework, it helps over the long term by making phases have similar structure and be easier to understand and maintain.
This is a very substantial improvement over the situation in the Scala 2.0-2.12 compiler, where fusing multiple complex phases together by hand made it very hard to keep track of what every phase does and how.
7.2 Predictable Performance Characteristics
The Miniphase approach imposes a specific structure that makes it easy for external contributors to join and reason about performance of a Miniphase. In most cases, the obvious solution that is suggested by the framework is the most efficient. This is very helpful in the presence of open-source contributors, since it reduces the number of iterations needed to polish the performance of contributed code.
7.3 Onboarding Process
Open-source contributors frequently ask how they can get involved and learn about internals of the compiler. A good way for new contributors to start working on the compiler is by extending either the tree checkers or phase postconditions. The new contributor learns which properties can be relied on in which phases, and can check her assumptions in test executions of the compiler. At the same time, the contributor improves the compiler with stronger checkers that make it possible to catch bugs earlier and simplify development and debugging. Moreover, the added postcondition checkers can serve as documentation of invariants for other new contributors.
7.4 Experience with Contributors
When a new phase is being developed, we need to decide where the phase should be run in the pipeline. Deciding whether two phases should be fused is a complex question that depends on how much high-level information the phase needs and whether it can co-exist in the same phase block. The former is commonly trivial while the latter is covered by the rules presented in Section 6.
Based on our experience, most people who contribute to the compiler are on the extremes: either they are experts who have been working on the compiler for long and know the entire pipeline, or they come to make a small contribution once in a while. While the first group doesn’t need any guidance on knowing where to place a phase, the second group commonly starts by discussing the idea of a phase in a mailing list, online chat, or personal communication. In this discussion, experts suggest how the phase should be written and where it should be in the pipeline.
After an initial implementation is written, it is contributed as a pull request to a github repository and goes through review by experts maintaining the repository. At the same time, continuous integration systems run tests that verify that pre- and post-conditions hold for the entire testsuite, which includes the compiler itself, the standard library, and several thousands of programs contributed by the community.
8. Related Work
8.1 Deforestation and Stream Fusion
The original inspiration for the Miniphase approach was prior work on “deforestation” [3, 5, 24]. These approaches compose multiple functions that transform lists or trees without explicitly constructing the intermediate data structures between the composed functions. A limitation of these general approaches is that the functions to be composed must be in so-called treeless form. In the specific case of a Scala compiler, this condition is violated because the tree transformations inspect nodes nested inside subtrees and construct new subtrees consumed by subsequent phases. Thus, the general deforestation technique cannot be applied because it would change the semantics of the transformations.
8.2 Sound Fusion in Tree Traversal Languages
In this section, we describe several domain-specific tree traversal languages and frameworks that are more general than the functions that can be fused by deforestation, but still sufficiently restricted to enable static analysis of the patterns of data accesses in a traversal. This enables automatic sound reordering of the node visits in multiple traversals.
Attribute Grammar Scheduling
Attribute grammars [12] are a formalism that defines computation on trees as evaluation of a set of pure functions for each node that may depend on the attribute values computed for other nodes. The formalism has been applied in many practical compiler implementations over the decades. As an example, JastAdd [4] is a recent attribute grammar framework that continues to be actively maintained, developed, and extended. A key problem is to find an order in which to evaluate the attributes of tree nodes that respects the dependencies between the attribute functions. For a particular parse tree, it suffices to topologically sort the pairs of tree nodes and their attributes, since the dependencies are explicit in the attribute evaluation functions. Various restricted classes of attribute grammars have been defined for which an evaluation order can be pre-computed ahead of time, independently of a particular parse tree. Some of these classes can be evaluated in a single pass over the parse tree, with a single visit of each node [10, 11, 15]. More general classes of attribute grammars require multiple passes, and algorithms have been proposed for finding evaluation orders that minimize the number of passes [1, 22]. These techniques have been extended to evaluation of attributes of multiple tree nodes in parallel [9]. Meyerovich et al. [16] combines parallel attribute scheduling techniques with programmer input in the form of sketches to synthesize GPU and multicores CPU implementations of tree manipulating programs.
Locality in Tree Traversals
Techniques have been proposed to rewrite recursive programs that traverse trees to enhance data locality [7, 8, 25]. Jo and Kulkarni [7] proposed point blocking, a transformation similar to loop interchange, in which an outer loop of multiple tree traversals is interchanged with the traversal of the tree nodes, yielding a single traversal that executes the previously outer loop at each node that it visits. The transformation is applicable when the outer loop is parallelizable. Jo and Kulkarni [8] extended the idea of point blocking into a similar but more sophisticated technique, traversal splicing, that improves locality of irregular tree traversals that traverse only a subset of the nodes of the tree. Weijiang et al. [25] defined a static dependence test for a domain specific language for tree traversals. The dependence test analyzes tree access path expressions in the code that visits each tree node to determine which visits of which nodes can be reordered. The dependence test makes it possible to soundly apply point blocking, traversal splicing, and parallelization to a larger set of tree traversal algorithms.
MADNESS Passes
Rajbhandari et al. [20, 21] propose and prove correct a technique that is able to compose recursive operators that are implemented using a set of primitive recursive operators. They demonstrate significant speedup obtained by fusion. Their approach is able to find an optimal schedule for fusion, while in our case the schedule is predefined. Compared to the dependence test of Weijiang et al. [25], the MADNESS system is more general in that it applies to both pre-order and post-order traversals.
The main benefit of the techniques described in this section is that they identify cases when soundness of fusion can be proven automatically. There are two reasons why they cannot be applied in the Dotty compiler. First, Dotty transformations modify the tree and construct new subtrees. Second, the implementations of Miniphase transformations are not purely functional: they manipulate non-local mutable data structures such as symbol tables, and they even cause additional files to be parsed and type-checked and transformed when they are referenced.
8.3 Other Pass Fusion Approaches
ASM [2] is Java bytecode instrumentation and emission library based on the visitor design pattern. A visitor transforms instructions in a sequence of bytecode instructions. ASM allows multiple visitors to be fused, so that part of the bytecode sequence is processed by all of them before continuing with the rest of the sequence. The obvious difference is that ASM transforms sequences, while Miniphases transform trees. For sequences, there is one obvious traversal order, while for trees, various traversal orders are possible. Miniphases impose a post-order traversal but provide the mechanism of prepares, discussed in Section 4.1, to implement transformations that would otherwise require different traversal orders. Another difference is that in Dotty, the meaning of a tree often depends significantly on its subtrees, so the issue of a phase observing children that have already been transformed by other trees is more important. In contrast, the meaning of a bytecode instruction usually does not depend on preceding instructions, at least not directly. Instead, it depends strongly on context, such as the state of the JVM operand stack, which ASM transformers usually maintain in additional data structures, not as part of the instructions themselves. In contrast, in the tree-based representation of Dotty, information about the operands of an expression node is associated with its child nodes. In general, both the input and the output of an ASM pass is JVM bytecode. In contrast, the purpose of the transformations in Dotty is to translate an intermediate representation similar to Scala source code to one similar to Java bytecode, so the types of nodes that appear in the tree gradually change as the tree passes through the sequence of transformations.
Lepper [14] proposes to optimize a sequence of traversals of trees by multiple visitors by detecting which visitors are interested in processing which nodes of the tree. This is done by using reflection to identify visitors that do not override the default visit methods for certain types of tree nodes. The optimized traversal can then skip traversing entire subtrees whose types ensure that none of the visitors are interested in visiting any of their nodes. A key difference is that these optimized visitors only traverse the tree, but do not generate different trees to pass from one visitor phase to the next.
8.4 Compilers Based on Tree Transformation Passes
The Nanopass Framework [23] is a compiler intended for teaching courses on compiler construction. In the framework, each individual transformation is done in a separate pass. Fusing the phases is suggested as possible future work. Due to practical considerations when compiling a complex language such as Scala, we need to have additional prepare passes, which the Nanopass Framework does not have.
Like Dotty, the Polyglot compiler [17] is structured as a sequence of passes that successively transform trees, in this case from various extensions of Java to Java itself. Like in Dotty, tree nodes are immutable, so each pass that replaces a tree node with a new one rebuilds the spine of the tree up to the root. The Miniphase approach of fusing tree transformations could also be used to improve the performance of Polyglot.
9. Conclusion and Future Work
The Miniphase approach removes the need to choose between modularity and efficiency in the implementation of tree transformations in a compiler. The resulting compiler is thus more modular and more efficient than using the Megaphase approach. This methodology simplifies both development and maintenance. Our evaluation indicates that using fused Miniphases allows speedups for tree transformations up to 1.6x that we demonstrated on real code bases with a real-world Scala compiler. Our detailed evaluation shows that the biggest contributing factor is improved cache friendliness, which leads to better CPU utilization.
Our approach is applicable not only to trees, but can be extended to directed acyclic graphs. We are also interested in using Miniphase-based approaches for executing independent compiler phases in parallel.
While our work was primarily focused on a compiler for Scala, we believe that the approach is general enough to be used in other compilers which share the same internal representation for considerable parts of their pipelines.
Acknowledgments
We want to thank Iulian Dragos for sharing his experience based on 12 years work on Scala compilers, starting before the time of Scala 2.0, even before the Scala compiler had bootstrapped itself. His knowledge was very helpful in understanding the evolution of the Scala 2.0-2.12 codebase.
This research was supported by the Natural Sciences and Engineering Research Council of Canada.
We are grateful to other researchers and students that use and base their work on the Dotty compiler and to the anonymous reviewers for the valuable feedback and helpful comments.
References
|
{"Source-Url": "https://infoscience.epfl.ch/record/228518/files/paper.pdf", "len_cl100k_base": 12866, "olmocr-version": "0.1.53", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 49491, "total-output-tokens": 15336, "length": "2e13", "weborganizer": {"__label__adult": 0.00030994415283203125, "__label__art_design": 0.0002416372299194336, "__label__crime_law": 0.00022864341735839844, "__label__education_jobs": 0.0004336833953857422, "__label__entertainment": 4.5359134674072266e-05, "__label__fashion_beauty": 0.00013196468353271484, "__label__finance_business": 0.00017964839935302734, "__label__food_dining": 0.00029349327087402344, "__label__games": 0.000431060791015625, "__label__hardware": 0.0007257461547851562, "__label__health": 0.0003275871276855469, "__label__history": 0.00018453598022460935, "__label__home_hobbies": 6.22868537902832e-05, "__label__industrial": 0.0003170967102050781, "__label__literature": 0.0001703500747680664, "__label__politics": 0.00023448467254638672, "__label__religion": 0.0004427433013916016, "__label__science_tech": 0.007068634033203125, "__label__social_life": 5.555152893066406e-05, "__label__software": 0.003812789916992187, "__label__software_dev": 0.9833984375, "__label__sports_fitness": 0.00027823448181152344, "__label__transportation": 0.00042939186096191406, "__label__travel": 0.00018966197967529297}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 63852, 0.02306]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 63852, 0.40663]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 63852, 0.89035]], "google_gemma-3-12b-it_contains_pii": [[0, 4075, false], [4075, 8444, null], [8444, 12432, null], [12432, 18184, null], [18184, 21440, null], [21440, 25581, null], [25581, 29194, null], [29194, 32375, null], [32375, 36858, null], [36858, 38271, null], [38271, 42083, null], [42083, 47176, null], [47176, 53127, null], [53127, 58898, null], [58898, 63306, null], [63306, 63852, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4075, true], [4075, 8444, null], [8444, 12432, null], [12432, 18184, null], [18184, 21440, null], [21440, 25581, null], [25581, 29194, null], [29194, 32375, null], [32375, 36858, null], [36858, 38271, null], [38271, 42083, null], [42083, 47176, null], [47176, 53127, null], [53127, 58898, null], [58898, 63306, null], [63306, 63852, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 63852, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 63852, null]], "pdf_page_numbers": [[0, 4075, 1], [4075, 8444, 2], [8444, 12432, 3], [12432, 18184, 4], [18184, 21440, 5], [21440, 25581, 6], [25581, 29194, 7], [29194, 32375, 8], [32375, 36858, 9], [36858, 38271, 10], [38271, 42083, 11], [42083, 47176, 12], [47176, 53127, 13], [53127, 58898, 14], [58898, 63306, 15], [63306, 63852, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 63852, 0.22252]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
ad08cf8dca48446279c0e9ff83145e5c3f91176a
|
Software-specific Named Entity Recognition in Software Engineering Social Content
Deheng Ye, Zhenchang Xing, Chee Yong Foo, Zi Qun Ang, Jing Li, and Nachiket Kapre
School of Computer Engineering
Nanyang Technological University, Singapore
Email: ye0014ng@e.ntu.edu.sg, {zxing, fooc0029, zang004, jli030, nachiket}@ntu.edu.sg
Abstract—Software engineering social content, such as Q&A discussions on Stack Overflow, has become a wealth of information on software engineering. This textual content is centered around software-specific entities, and their usage patterns, issues-solutions, and alternatives. However, existing approaches to analyzing software engineering texts treat software-specific entities in the same way as other content, and thus cannot support the recent advance of entity-centric applications, such as direct answers and knowledge graph. The first step towards enabling these entity-centric applications for software engineering is to recognize and classify software-specific entities, which is referred to as Named Entity Recognition (NER) in the literature. Existing NER methods are designed for recognizing person, location and organization in formal and social texts, which are not applicable to NER in software engineering. Existing information extraction methods for software engineering are limited to API identification and linking of a particular programming language. In this paper, we formulate the research problem of NER in software engineering. We identify the challenges in designing a software-specific NER system and propose a machine learning based approach applied on software engineering social content. Our NER system, called S-NER, is general for software engineering in that it can recognize a broad category of software entities for a wide range of popular programming languages, platform, and library. We conduct systematic experiments to evaluate our machine learning based S-NER against a well-designed rule-based baseline system, and to study the effectiveness of widely-adopted NER techniques and features in the face of the unique characteristics of software engineering social content.
I. INTRODUCTION
Social online communities, such as Stack Overflow and Quora, play a significant role in knowledge sharing and acquisition for software developers [1]. The user-generated content in these websites has grown into an important information resource on the Web that complements traditional technical documents [2]. A fundamental task for reusing content in these websites is searching for discussions of a specific software entity (e.g., a library, a tool, an API), to find good usage patterns, bug solutions, or alternatives. Existing approaches treat software engineering social content as textual documents, and use vector space model (e.g., TF-IDF), topic model (e.g., LDA [3]), or neural network language model (e.g., word embedding [4]) to index the content.
Existing approaches have an important limitation: uniform importance assumption. That is, mentions of software-specific entities in the content are treated in the same way as other regular textual content. This assumption may result in less desirable indexing of the content, because traditional information retrieval concepts such as term frequency do not apply naively to recognize essential domain-specific entities [5, 6, 4].
The social-technical nature of software engineering social content available on social information sharing websites calls for innovative forms of information extraction, organization, and search. A very desirable goal would be to organize the information in a knowledge base about different software-specific entities and relationships between entities. Such knowledge base can be represented as a graph, also known as knowledge graph [7]. Search systems can exploit knowledge graph not only for finding the content that actually discusses a particular software-specific entity, but also to displaying additional facts and direct information about the central entity in a query [8]. As the first step towards knowledge graphs and entity-centric search systems for software engineering domain, we must be able to recognize mentions of software-specific entities in software engineering social content and classify them into pre-defined categories. This task is referred to as named entity recognition or NER for short.
NER has been extensively studied on formal text (such as news articles [9]), informal text (such as emails [10], [11]), and social content (such as Tweets [12], [13]). The goal is to recognize real-world objects in texts, such as person, location, and organization. Some NER work from other domain exists for recognizing domain-specific entities, such as Biomedical NER [14], [15], NER in clinical notes [16]. In contrast, our study focuses on designing domain-specific NER methods for software engineering social content, a new genre of social-technical texts. Proposed solutions to NER fall into three categories: rule-based, machine learning based, and hybrid methods. Existing studies show that machine learning based methods usually outperform rule-based methods [9], [12], [16]. However, for software engineering texts, existing approaches are limited to dictionary look-up and rule-based methods based on code or text parsing techniques [17], [18], [19], [20]. Furthermore, the entity category is limited to only API.
In this work, we aim to design and evaluate a machine learning based method for general NER in software engineering social content. By general NER for software engineering, we mean that we would like to recognize not only APIs but also other categories of software-specific entities (such as programming languages, platforms, tools, libraries, frameworks, software standards). We have the following research questions:
- What are the challenges in NER for software engineering
•
•
social content, compared with formal text, other social content like Tweets, or other domain-specific texts like clinical notes?
- How can we adapt state-of-the-art machine learning based NER pipeline for NER in software engineering social content?
- How well will the machine learning based NER method work in face of the unique challenges of software engineering social content?
To answer these research questions, we make the following contributions:
- We perform a formative study of a diverse set of Stack Overflow posts covering major programming languages, platforms, and libraries. Through this formative study, we identify design challenges and key design decisions to be made in NER for software engineering social content.
- We develop a software-specific, machine-learning based NER method, called S-NER, including software-specific entity categories, a software-specific tokenizer, Conditional Random Fields (CRF) based learning, and a rich and effective set of features for model training.
- We select and annotate a corpus of Stack Overflow posts, and use this corpus to train and test our S-NER method. We demonstrate the effectiveness of S-NER, and show that the machine learning based method can significantly outperform a well-designed rule-based baseline method.
- We discuss some design lessons learned in our study for researchers and designers of similar software-specific NER systems.
- We provide our annotated corpus, collected gazetteers, unsupervised word clusters, and trained CRF models to the software engineering community for further research and validation.
II. DESIGN CHALLENGES IN NER IN SOFTWARE ENGINEERING SOCIAL CONTENT
Existing NER methods recognize real-world objects, such as person, location, organization, time and date in formal or informal texts. However, these entities are not what developers are concerned with in software engineering texts. To recognize software-specific entities (such as programming languages, platforms, frameworks, tools, libraries, APIs and software standards) that developers care about, we must develop software-specific NER methods. Little work has been done along this line, except some API extraction and linking work in software engineering texts [17, 18, 20].
In this section, we report our formative study of Stack Overflow posts to understand the challenges in designing a general NER method for software engineering social content. Understanding these design challenges helps us choose and customize state-of-the-art NER techniques for designing our software-specific NER method.
We randomly sample a diverse set of 150 Stack Overflow posts covering 6 popular programming languages (JavaScript, Java, C#, Python, PHP, HTML), 1 popular platform (Android) and 1 popular library (jQuery). We then manually identify software-specific entities mentioned in these sampled posts. Through this formative study, we summarize the following challenges in NER in software engineering social content:
1) Stack Overflow discussions are characterized by not following strict linguistic rules and more spelling mistakes compared to formal texts. For example, capitalizations are used extensively in question titles and discussions for emphasis. It happens that “JavaScript” is misspelled as “javascript” (missing the character “c”).
2) Many software-specific entity names are common words. For example, “String” is a class name, “Application” is an Android class name, “config” can be a Python library name. However, “String”, “Application” and “config” are also very common words mentioned in the discussions, for example, “this method returns string”, “I am writing a Web Application”, “how to config it”.
3) Stack Overflow users often define code entities (e.g., classes, methods) for illustration purpose. These user-defined code elements have the same lexical and syntactic formats as library/framework APIs. However, they are not code entities that developers are concerned with in general.
4) Different software-specific entities often have the same name. For example, the term “memcached” can be a PHP class, and can also be a memory management tool. “Mac” can be a platform name, or a class name of Android, or an acronym of the software standard “message authentication code”. This causes ambiguity in determining appropriate entity category.
5) The informal nature of Stack Overflow posts introduces many name variations for the same software-specific entity. For example, in addition to the official programming language name JavaScript, users also refer to the language as Javascript, js, javascript, or JS.
6) Different programming languages usually have different naming conventions for API entities. For example, the naming of official Java methods follows lowerCamelCase, e.g., “getProperty”, PHP methods contain underscore, while .NET APIs follow UpperCamelCase.
7) Stack Overflow posts contain a plethora of distinctive named entities. Most of these entities (except for popular programming languages and platforms) are relatively infrequent.
Challenges 1-5 indicate that dictionary look-up or rule-based methods would not produce reliable NER results on software engineering social content. Furthermore, Challenges 5-7 indicate that it would be impractical or extremely expensive to define a comprehensive set of NER rules. Thus, building a machine learning based NER for software engineering social content is necessary. To tackle the above challenges, the machine learning based NER should not examine only local features of individual words. Instead, it should take into account the surrounding context of the word to recognize entity and determine entity category. Conditional Random Fields (CRFs) [21], the state-of-the-art statistical modeling methods
for solving such sequential data labeling problem, have been widely used in NLP problems, such as POS tagging [22], shallow parsing [13], and NER [23], [12], [16].
Building a machine learning based NER requires a lot of annotated data or rules for model training. Manually creating annotated data is tedious and prohibitively expensive. Furthermore, Challenges 5-6 indicate that Stack Overflow discussions contain many more Out-of-Vocabulary (OOV) words (i.e., entities that have not been seen in the training data) than formal texts, due to name variations and naming-convention differences. Challenge 7 indicates that even a large sample of manually annotated posts will still contain few training examples. Proposed solutions to alleviate this issue would be semi-supervised learning, which aims to use unsupervised word representations (e.g., Brown clusters [24]) learned from the abundant unlabeled data as extra features to improve accuracy of supervised NER model learned from small amount of annotated data [25], [13].
Due to the OOV words, it is likely to encounter an entity which is difficult to identify using local contextual cues alone because the entity has not been seen before. In these cases, a gazetteer or dictionary of known entity identifiers is often useful [26], [9], [27], [12]. The gazetteer will not be used for simple dictionary look-up. Instead, using gazetteers one may define additional features in the CRF model that represent the dependencies between a word’s NER label and its presence in a particular gazetteer. Such gazetteer features are often highly informative, and including them in the model should in principle result in better model performance. Thus, we should build software-specific gazetteers for NER in software engineering social content.
### III. Problem Definition
Based on our formative study, we define the problem of NER in software engineering social content as follows. Let \( T \) be a discussion thread, i.e., a question and its answers, in Stack Overflow. A question or answer is referred to as a post in Stack Overflow. Let \( S \in T \) be a sentence from a Stack Overflow post. The NER task is to recognize from the sentence \( S \) a span of words \( s = \langle w_1, w_2, \ldots, w_n \rangle \) \((n \geq 1)\) that refers to a software-specific named entity and classify \( s \) into the entity category it belongs to.
For any NER task, an intuitive and informative inventory of entity categories must be clearly defined. In traditional NER task, entity categories usually include person, location, organization, time and date. For NER in software engineering texts, our first task is to develop a domain-specific inventory of named entity categories which should achieve a good coverage of different aspects of software engineering knowledge that developers care about in Stack Overflow discussions.
To that end, the authors form a focus group and collaboratively review the software entities identified in our formative study. After an iterative development process, we finalize an inventory of software-specific entity categories in Table I.
We define 5 categories of software entities: Programming Language (PL), Platform (Plat), API, Tool-library-framework (Fram), and Software Standard (Stan). In particular, the Programming Language category covers different types of known programming languages, such as Object-oriented, Procedural, Scripting, Markup and Declarative. The Platform category refers to hardware or software platforms, such as CPU instruction sets (e.g., x86, POWER, ARMv9, Sparc), hardware architecture (e.g., CHRP, Mac), operating system and system kernel (e.g., Android, iOS). The API category refers to API elements of libraries and frameworks that developers can program with, such as packages, classes, interfaces, methods, functions, events and modules. The Tool-library-framework category broadly refers to software tools, libraries and frameworks that developers use. The Software Standard category refers to data formats (e.g., pdf, JSON), design patterns (e.g., Abstract Factory, Observer), protocols (e.g., HTTP, FTP), technology acronyms (e.g., Ajax), and so on.
In Table I we illustrate our software-specific NER task using some example Stack Overflow posts. Our task is to recognize and classify those software entities highlighted in boldface. Specifically, “Maven”, “Mac OS X”, “append”, “extend”, “JSON” and “Java” should be recognized as framework, platform, API, software standard and programming language, respectively. Note that if an entity comprises more than one word, e.g., “Mac OS X”, it is recognized correctly if and only if all its words \( w_1, w_2, \ldots, w_n \) are recognized as part of the entity. Furthermore, we do not consider code elements that Stack Overflow users define to explain their questions...
or answers as named entities, because these code elements are not public APIs that a community of developers are concerned with. Finally, in our NER system, we do not consider domain terminologies and concepts as named entities. For example, in the phrase “java plugin”, “java” will be recognized as a programming language entity, while the domain term “plugin” is considered as a common noun, not a named entity. Similarly, we do not consider terms like “database”, “sorting”, “machine learning” as named entities, as they refer to general concepts, not specific entities.
Table II: Software-Entity Examples in Stack Overflow Posts
<table>
<thead>
<tr>
<th>Post ID</th>
<th>Extracted Texts</th>
</tr>
</thead>
<tbody>
<tr>
<td>8825881</td>
<td>Maven Install on Mac OS X</td>
</tr>
<tr>
<td>252703</td>
<td>What’s the difference between the list methods append and extend?</td>
</tr>
<tr>
<td>2591098</td>
<td>How to parse JSON in Java</td>
</tr>
</tbody>
</table>
IV. THE SOFTWARE-SPECIFIC NER SYSTEM
To address the design challenges in NER in software engineering social content, we design a semi-supervised domain-specific NER system (called S-NER) that integrates state-of-the-art supervised sequence modeling and unsupervised NLP techniques. Figure 1 shows an overview of our S-NER system. S-NER is based on Conditional Random Fields (CRF) [21] for supervised model training. S-NER utilizes a rich set of features extracted from heterogeneous data resources, including a small-sized human labeled dataset from Stack Overflow, a large-sized unlabeled dataset from Stack Overflow, and various external knowledge resources. In this section, we discuss data preparation steps, customized tokenization, human entity annotation, unsupervised word clustering, the CRF model, and our feature design for training a CRF model.
A. Data Preparation
1) Labeled Data Preparation: Supervised learning requires annotated (or labeled) data. Unlike previous studies [17], [18] that are limited to one or two programming languages, we do not restrict our NER data to be under the same programming language or platform. From Stack Overflow’s official data dump released on March 16th, 2015, we randomly select posts under a diverse set of Stack Overflow tags, representing popular object-oriented and procedural languages (java, c#), Web and scripting languages (javascript, php, python), markup language (html), platform (android), and library (jquery). In fact, these 8 tags are the most frequently-used tags among all the Stack Overflow tags. Specifically, we select 1,520 Stack Overflow posts from 300 Stack Overflow discussion threads. The number of Stack Overflow posts we select for a particular Stack Overflow tag is proportional to that tag’s usage frequency on Stack Overflow. We refer to this dataset as labeled data, as it will be labeled by human annotators and used for supervised learning and model testing.
We pre-process the collected posts as follows. In the official data dump of Stack Overflow, standalone code snippets are surrounded with HTML tags ⟨pre⟩ and ⟨/pre⟩. We remove these code snippets, because 1) the usage of the official APIs in such code snippets usually follows programming syntax and can be identified using rule and grammar-parser based approaches as shown in previous work [17], [18]; 2) many code elements in such code snippets are defined by question askers or answerers for illustration purpose, and these code elements do not refer to software-specific entities that other developers are concerned with. However, we keep small code elements embedded in the post texts that are surrounded with (code) and ⟨/code⟩ tags. These small code elements often refer to APIs, programming operators and simple user-defined code elements for explanation purpose. Removing them from the texts will impair the sentence’s completeness and meaning. Finally, we strip all other HTML tags from the post texts.
2) Unlabeled Data Preparation: As mentioned in Section II, we use unlabeled Stack Overflow data to compensate for the small-sized human-labeled data. In particular, we randomly select a huge-sized data consisting of more than 7 million Stack Overflow posts from 1.8 million Stack Overflow discussion threads tagged with the 8 most frequently used tags (java, c#, javascript, php, python, html, android, and jquery). Again the number of the posts selected for a particular Stack Overflow tag is proportional to the tag’s usage frequency. We refer to this dataset as unlabeled data, as it will be fed into unsupervised word clustering [24] to learn word representations (i.e., word bitstrings). The word representations will in turn be used as features for training a CRF model. Data pre-processing steps on this huge-sized unlabeled Stack Overflow texts are the same as the above steps on the labeled data.
3) External Knowledge Resources: As mentioned in Section II, gazetteers are collections of authentic named entities for a particular domain. By authentic, it means that every phrase in the gazetteer should be an entity. We can use gazetteers as features for training a CRF model. While there are many gazetteers publicly available for common person names, locations, organizations, products, temporal expressions [9], there are no gazetteers that can help to recognize software-specific entities in software engineering texts.
We contribute a set of software-specific gazetteers, including a comprehensive list of programming languages, a list of platforms, a variety of API names covering popular programming languages, a list of community-recognized software tools, libraries and frameworks, and software standards. For programming languages, we derive notable languages in existence from Wikipedia list [7]. For platforms, we obtain the gazetteer from several Wikipedia lists, including computing platform [8], list of operating system [9], list of instruction sets [10], list of mobile platform [11]. We crawl the API names as defined in Table II from the official websites of the studied programming languages (Java, JavaScript, PHP, C#, Python, HTML), platform (Android), and library (jQuery). In particular, we crawl the latest versions of
https://en.wikipedia.org/wiki/List_of_programming_languages
the APIs for Java 8, PHP 5, Android API level 23. For C#, we collect its API names for .NET 4.5 and 4.6. For Python, we crawl its modules and methods for version 2.7 and 3.4. For HTML, we collect both HTML tags and DOM methods. For jQuery, we collect methods for the jQuery library and the jQuery UI library. For software tools, libraries and frameworks of different programming languages and platforms, we obtain the list from GitHub Awesome Lists. GitHub Awesome Lists is a popular software knowledge source on GitHub curated by developers. For every main-stream programming language or platform, there exists a curated list of well-known software tools, libraries and frameworks, as well as other types of information for that language or platform (See more at: https://github.com/sindresorhus/awesome). For software standards, we obtain design pattern and protocol names from Wikipedia lists. We cannot show all the data sources here due to space limitation. We collect a list of data formats and technology acronyms from Stack Overflow tags.
**TABLE III: An Example of Our S-NER’s Tokenization**
<table>
<thead>
<tr>
<th>Input Sentence</th>
<th>Stanford Tokenizer</th>
<th>S-NER Tokenization</th>
</tr>
</thead>
<tbody>
<tr>
<td>What’s the equivalent of Java’s Thread.sleep() in C#?</td>
<td>What the equivalent of Java’s Thread.sleep() in C#?</td>
<td>What the equivalent of Java’s Thread.sleep() in C#?</td>
</tr>
</tbody>
</table>
**B. Customized Tokenization**
Tokenizers designed for general English texts cannot properly handle software engineering social content which is both social and technical. We develop a domain-specific tokenizer for handling texts with software-specific entities. The tokenizer uses regular expressions to match valid URLs, at-mentions, and emoticons (e.g., :), :)). The tokenizer does not split the name of a software entity, e.g., the name of an API. It does not split valid programming operators, such as “==” and “!=". It considers separate parentheses, i.e., ( and ), as punctuations. However, parentheses, as well as dot, #, and $, that appear in an API are considered as part of the API itself. In Table III, we show an example of the S-NER’s tokenization results. Line 1 is the input sentence. Line 2 is the tokenization done by Stanford Tokenizer, which is designed for general English texts. Line 3 is tokenized by S-NER. As we can see, S-NER is able to tokenize the Java API Thread.sleep() as a whole, while the tokenizer for general texts splits the API name into 5 tokens.
**C. Human Entity Annotation**
For annotation, we use Brat [28], a web-based annotation tool. We adopt the widely used BIO representation of text chunks. In our context, BIO means the Begin, Inside and Outside of an entity. Take the 5-token sentence “Apache ant is a tool” as an example. The correct annotation is “B-Fram I-Fram O O O”, where “Fram” is the annotation tag as shown in Table I, and B and I indicate the Begin and Inside of the text chunk. This means that the phrase “apache ant” refers to a framework, while the last three words are not entities.
The annotation process involves 3 stages and is performed by 9 annotators who are all from computer science background with 5+ years of programming experience. Before annotation, we give all annotators an 1-hour tutorial regarding the tool usage, annotation methods, and entity categories. We provide some annotation examples for the annotators to practice. The purpose is to let them reach a consensus on what kinds of entities to annotate and how.
In Stage 1, each annotator is assigned with some Stack Overflow posts. During this manual annotation process, we ask them to report to us when there are tokenization errors or deficiencies, and when certain tokens are hard to be labeled using the software-specific entity categories we develop. After this stage, we use the feedbacks from our annotators to improve the tokenization, refine our software entity categories, and clean up the annotated data. In Stage 2, we let annotators cross validate the data, i.e., the same set of tokens from Stage 1 is examined by a different annotator in Stage 2. In Stage 3, a final sweep to all the annotated data is made by the first, third and forth author of this paper to improve the consistency of our annotation.
D. Unsupervised Word Clustering
To alleviate the problem of out-of-vocabulary (OOV) and lexical word variations, we rely on unsupervised word clustering to group together words that are distributionally similar. Specifically, we apply Brown Clustering [24], [29] on the unlabeled Stack Overflow posts (see Section IV-A2). Brown Clustering assigns words that appear in similar contexts into the same cluster. Words in the cluster are represented as a string. We use Liang’s implementation of Brown Clustering. We configure the number of clusters to 1000 and we only cluster words that appear no less than 10 times. It takes 15 hours to finish the word clustering of the unlabeled dataset on a 4-core Intel i5-4570 processor. Table IV lists some resulting word clusters and their corresponding bitstrings. We can see that word clusters can represent semantically similar words in Stack Overflow posts.
TABLE IV: Example Word Clustering Results
<table>
<thead>
<tr>
<th>Bitstring</th>
<th>Top words (by frequency)</th>
</tr>
</thead>
<tbody>
<tr>
<td>1111011110</td>
<td>NET Spring ASP.NET Django HTML5 asp.net bootstrap django</td>
</tr>
<tr>
<td></td>
<td>.net wordpress Wordpress Angular AngularJS JPA</td>
</tr>
<tr>
<td>1111111110</td>
<td>foreach settimeout setInterval eval files json encode</td>
</tr>
<tr>
<td></td>
<td>explode exec var_dump print_x await document.write</td>
</tr>
<tr>
<td>111111111111</td>
<td>hover touch mouseover blur keyup keypress keydown</td>
</tr>
<tr>
<td></td>
<td>mouseover fadeln mouseenter mouseleave mousekeydown delegated</td>
</tr>
<tr>
<td></td>
<td>show() fadeOut mousemove hide()</td>
</tr>
</tbody>
</table>
We also build an HTML viewer for interested readers to browse and check our detailed word clustering results. We host the HTML viewer at this web service: http://cyong-oneinfinityloop.com/clusters/cluster_viewer.html
E. Supervised Learning based on CRF
S-NER is based on linear chain Conditional Random Fields (CRF). We describe the CRF model here and the features we use to train the CRF model.
1) Model: Given a sequence of observations (tokens in this work) \( \vec{x} = x_1, x_2, ..., x_n \), we want to assign each observation with a label (from our annotation tags shown in Table I and BIO representation of text chunks), e.g., B-API, I-API, B-Plat, I-Plat, O, and so on. This sequence of labeling is denoted as \( \vec{y} = y_1, y_2, ..., y_n \). In linear chain CRF, the probability vector of assigning \( \vec{y} \) based on \( \vec{x} \) is:
\[
p_{\lambda}(\vec{y}|\vec{x}) \propto \exp\left(\sum_{j=1}^{n} \sum_{i=1}^{m} \lambda_i f_i(y_{j-1}, y_j, \vec{x}, j)\right)
\]
where \( j \) specifies the position in the input sequence \( \vec{x} \), \( f(y_{j-1}, y_j, \vec{x}, j) \) are the features to be designed for training and testing, and \( \lambda_i \) represents the weight of feature \( f_i \).
2) Feature Design: We extract a rich set of features from annotated corpus, unlabeled Stack Overflow texts, and external knowledge resources.
Orthographic features: We design the following orthographic features based on our observations from Stack Overflow texts. We first use regular expressions to detect URLs, at-mentions and emoticons. We consider as features whether a token is initial capitalized, whether all characters in a token are capitalized, whether a token is alphanumeric, whether it contains digits, underscores, or dots. We further examine if a token has parentheses at the end, whether a token contains both digits and dots, whether a token has capitalizations in the middle. If a token has a dot, we also check if its suffixes match a data format name in our collected “software standard” gazetteer. We do so because many files names, e.g., “MyCode.java”, contain a dot but they are not named entities. Note that many URLs have the above mentioned features, therefore, we normalize a URL into “@u@” once it is detected using regular expressions.
Lexical and contextual features: We consider every token in our annotated corpus as a feature. We also consider the uppercase form and the lowercase form of every token as features. To utilize the context information, a window size [-2, 2] is used to add the previous and the next two tokens as features. We experiment other window size settings, but find them not helpful for performance improvement.
Word bitstring features: As mentioned in Section IV-D a word is represented as a bitstring after word clustering. We use the prefixes of the bitstrings as features. Based on our word clustering results, the prefix lengths we use are 5, 6, 7, 8, 9, ..., 15. Take the bitstring at line 1 of Table IV as an example. The length of this bitstring is 11. For prefix lengths 5-11, the prefixes used as features are “11111”, “111110”, ..., “11111011111”. For prefix lengths 12-15, the whole bitstring is used as features.
Gazetteer features: We store the gazetteers for different entity categories into different files. For each programming language, we store the names of packages, classes, modules, methods, events, etc., in separate files. We use string matching results against gazetteer entries in different gazetteer files as features. Specifically, we perform exact string matching for class names and module names. We perform lowercase string matching for entries that only have one word. We perform fuzzy string matching using the fuzzywuzzy tool for entries that consist of a span of words. If the fuzzy ratio is above 0.8, we consider it as a match.
V. Evaluation
Our experiments are designed to demonstrate the need of a machine learning based software-specific NER system, and to test the efficacy of the software-specific feature set we develop, given a small-sized annotated software engineering corpus.
A. Experimental Setup
The annotated corpus consists of 4,646 sentences derived from 1,520 Stack Overflow posts. The total number of tokens after tokenization is 70,570. The number of software-specific named entities is 2,404. Stack Overflow discussions are entity-rich, as evidenced by the fact that there are on average 1.58 (2404/1520) software entities per Stack Overflow post, according to our annotation results.
https://github.com/percyliang/brown-cluster
https://github.com/seatgeek/fuzzywuzzy
In Figure 2, we further show the proportions of different categories of software-specific entities according to our predefined entity categories in Table I. We can see that API is the most discussed entity category among developers, which accounts for 41% of all software-specific entities in our corpus.
For model training and testing, we use 10-fold cross validation. We randomly divide our annotated corpus into 10 equal-sized subsets. Of the 10 subsets, one single subset is retained as the testing data, and the remaining 9 subsets are used as training data. We repeat this process 10 times and produce a single estimation by averaging the 10 results obtained.
It is very likely that a question and its answers discuss the same set of software entities. Therefore, to avoid model overfitting, we make sure that the answers to a particular question will not be put in the testing data if the corresponding question is in the training data.
For the implementation of linear chain CRF, we use CRF++ \(^8\), a popular CRF toolkit that has been widely used for sequential tagging tasks like NER.
### B. Evaluation Metrics
We use standard NER evaluation metrics, i.e., precision, recall, and F1. For each category of named entity, precision measures what percentage the output labels are correct. Recall measures what percentage the named entities in the golden dataset are labeled correctly. F1-score is the harmonic mean of precision and recall.
Using 10-fold cross validation produces 10 sets of testing results. We calculate the average precision, recall and F1-score as the overall performance of our S-NER system. We report phrase-level precision, recall and F1. This means that if an entity consists of a span of tokens, it is considered correctly labeled if and only if all its tokens are labeled correctly (see that example of “Mac OS X” in Table II in Section III).
### C. Baseline System
Our baseline system is implemented using a mixture of empirical lexical rules and dictionary look-ups. The dictionary look-up is based on the gazetteers we collect from external knowledge resources (see Section IV-A3). The testing data used to evaluate the baseline system and the CRF model are always the same.
Since our gazetteers cover a very broad range of software-specific entities of different categories, we initially use these gazetteers directly for string matching. If a span of tokens matches an entry in the gazetteer, we label it as an entity of the corresponding category. However, this simple string-matching approach performs poorly. For example, the F1 of recognizing Programming Language entities and Tool-library-framework entities are as low as 45% and 17%, respectively. Our error analysis indicates that:
- Many entries in the gazetteers are common words, or have only one single character. To name a few, “B”, “D”, “Go”, “GOAL”, “Logo” are programming language names. “Application” is an Android class. “Moment” is a JavaScript date library, “click” is a Python library, “Task” is a tool for running PHP tasks. Such common-words or single-character entity names can impair the system performance significantly when using string matching. This is not a common phenomena for real-word objects, such as person, location, and organization.
- Direct string matching is unable to handle name variations. For example, some users write the Android class “ListView” as “List View”, “listview”, etc.
- For API names, users sometimes follow the standard format package.class.method or class.method, but sometimes write the method names directly.
To improve the performance of the baseline system, we further analyze the gazetteers of different entity categories and design some lexical rules as follows.
- For the gazetteer of programming language entities, we choose not to use the comprehensive list of programming languages which consists of 419 programming languages. Rather, we manually identify programming languages from Stack Overflow tags with tag frequency greater than 10,000. We compile a short list of 30 popular languages, and we add some of the commonly seen lexical variations, e.g., “js” for “JavaScript”.
- We identify class and method names that are not compound words and remove them from the gazetteer. Here compound words refer to words like “ListView” which is made of “List” and “View”. Some examples of the removed APIs include: “Application” Class in Android, “Array” Class in Java, etc. We store packages, classes, methods for a certain language as separate entries in the gazetteers, and use them separately or combine them as necessary to match API mentions. Notice that formally written APIs have distinguishable orthographic features, e.g., formal PHP methods contain underscore in the middle and parentheses at the end, some Python methods follow the syntax of `Module.Method()`. Therefore, we add regular expressions to detect those formal APIs.
- For the tool-library-framework gazetteers from GitHub Awesome Lists, we manually identity those that can not be differentiated from common words, such as the above mentioned library and tool names “Moment”, “click”,
\[^{8}\]https://taku910.github.io/crfpp/
\[^{9}\]http://momentjs.com/
\[^{10}\]http://taskphp.github.io/
\[^{11}\]http://click.pocoo.org/5/
“Task”. We remove a list of such tool-library-frameworks from the gazetteers (we do not list all of them here due to space limitation).
- If a token begins with a dot followed by an entry in the software standard gazetteer, we label it as a software standard entity. For example, “.jar” and “.pdf” are labeled as software entities.
- The string matching methods against gazetteer entities are similar to what we do to the gazetteer feature design (see Section IV-E2).
- We further add some empirical rules. If the current word matches a one-word entry in the programming language, or the platform, or the tool-library-framework gazetteer, we check if its next word is made of digits and dots, we also check if its previous word is a software company or organization name (we manually identity a list of software organization names, such as microsoft, apache, etc.). We do so to enhance the detection of software entities that consist of span of words, such as “python 2.7”, “apache ant”, “microsoft excel”, etc.
Our experience with the baseline system suggests that it is not an easy task to design a robust dictionary and rule based NER system for software engineering social content. We invest a significant effort to improve the baseline so as to make a fair comparison with the machine learning based NER system.
D. Overall Comparison Results of All Entity Categories
In Table V, we show the overall results when using S-NER and the baseline system to recognize all 5 categories of software-specific entities defined in Table I. We see that the overall F1-score of S-NER is 78.176%, which outperforms that of the baseline system by 30.3%. The improvements of precision and recall when comparing S-NER against the baseline system are 47% and 14.4%, respectively.
<table>
<thead>
<tr>
<th>System</th>
<th>Precision(%)</th>
<th>Recall(%)</th>
<th>F1(%)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Baseline</td>
<td>55.849</td>
<td>65.293</td>
<td>60.018</td>
</tr>
<tr>
<td>S-NER</td>
<td>82.093</td>
<td>74.706</td>
<td>78.176</td>
</tr>
</tbody>
</table>
47.0% ↑ 14.4% ↑ 30.3% ↑
TABLE V: Overall Experimental Results
![Graph showing F1 scores for S-NER and Baseline across 10 testing sets.]
Fig. 3: Comparison of S-NER and the Baseline System for the 10 Testing Sets
Recall that we use 10-fold cross validation. In Figure 3 we show the detailed comparisons between S-NER and the baseline on the 10 testing data. We observe that the performance of S-NER exceeds its baseline system consistently on all the testing data. The largest improvement among all the test data occurs in test3, the F1-score of S-NER is slightly more than 81%, while the F1 of the baseline system is around 55%. 81% is the highest F1 obtained for S-NER (test3), while the lowest F1 of S-NER is about 73% (test8). The highest and lowest F1 of the baseline system are 68% (test1) and 55% (test3).
E. Comparison Results of Individual Entity Category
We report the experimental results of each individual entity category in Table VI. As we can see, the NER performances of both S-NER and the baseline system on different categories of entities are quite different.
### NER Results of Individual Entity Category
<table>
<thead>
<tr>
<th>(a) Programming Language Category</th>
</tr>
</thead>
<tbody>
<tr>
<td>System</td>
</tr>
<tr>
<td>Baseline</td>
</tr>
<tr>
<td>S-NER</td>
</tr>
<tr>
<td>4.8% ↑</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>(b) Platform Category</th>
</tr>
</thead>
<tbody>
<tr>
<td>System</td>
</tr>
<tr>
<td>Baseline</td>
</tr>
<tr>
<td>S-NER</td>
</tr>
<tr>
<td>6.0% ↑</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>(c) API Category</th>
</tr>
</thead>
<tbody>
<tr>
<td>System</td>
</tr>
<tr>
<td>Baseline</td>
</tr>
<tr>
<td>S-NER</td>
</tr>
<tr>
<td>71.1% ↑</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>(d) Tool-library-framework Category</th>
</tr>
</thead>
<tbody>
<tr>
<td>System</td>
</tr>
<tr>
<td>Baseline</td>
</tr>
<tr>
<td>S-NER</td>
</tr>
<tr>
<td>57.1% ↑</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>(e) Software Standard Category</th>
</tr>
</thead>
<tbody>
<tr>
<td>System</td>
</tr>
<tr>
<td>Baseline</td>
</tr>
<tr>
<td>S-NER</td>
</tr>
<tr>
<td>62.2% ↑</td>
</tr>
</tbody>
</table>
It follows our expectation that the NER of programming languages achieves very high precision, recall and F1 for both S-NER and the baseline, as shown in Table VIa. The rule and dictionary look-up based baseline is able to perform well with F1 at 87.457%. One example error made by the baseline system is that it excessively labels the word “C” as programming language, even though many times “C” is just a common char. By comparison, the machine learning based S-NER considers the contextual environment of the current word, as covered in Section IV-E2, and is more robust (93.358% F1).
In Table VIb, we show the results for the NER of platforms. We can see that the F1 of the baseline system is very close to that of S-NER. S-NER has the better precision, but the baseline
system has the better recall. We can understand this result from two aspects. First, the naming of platforms is not as ambiguous as the naming of other categories of entities. Common words are not frequently used as platform names. Second, the number of known platforms is much less compared to the number of APIs, tools, frameworks, etc. These factors reduce the difficulty of platform NER.
From Table [VIc, VId, VId], we see that the machine learning based S-NER can outperform the baseline significantly when recognizing APIs, tool-library-frameworks, and software standards. The F1 improvements observed for these three entity categories are all higher than the overall improvement shown in Table [V]. These results suggest that it can be very difficult to develop a robust rule based approach to recognize the informally written API names, tool-library-frameworks, and software standards in software engineering social content like Stack Overflow, because it is impossible to know in advance what word variations and ambiguities there would be.
### F. Feature Ablation
We also perform independent feature ablation experiments to study the effect of individual feature(s) on the NER performance. In Table VII, we ablate one kind of feature(s) at a time from our full feature set and test the resulting F1-score.
We find that using unsupervised word bitstrings as features is very helpful, without which the overall F1 drops sharply from 78.176% to 72.642%. The dropping of F1 without word clustering features is the most significant for the API category and the tool-library-framework category.
The use of gazetteers as features has small impact on the final system performance. Removing gazettee features only leads to the decrease of F1 score for about 0.5%. This is noteworthy: gazetteer features are considered critical in other NER work [9, 12], especially for NER in social texts such as Tweets [12]. In these studies, gazetteers can boost the NER performance tremendously by an F1 increase of 19% as reported in [12]. We will discuss more about the design of software-specific gazetteers in Section VI.
Our results show that initial capitalization feature is not particularly useful for software-specific NER, as we raised in Section II. The F1 without initial capitalization is slightly lower at 77.577%. Without prefixes and suffixes features, F1 decreases slightly to 76.719%. We also ablate other orthographic features one by one, and we find that the absence of one particular orthographic feature does not significantly impair S-NER’s F1 score. Overall, S-NER’s performance is contributed by the combined action of all its features.
### G. Varying Labeled Data Size
We want to further understand how S-NER’s performance changes with the size of the labeled data, so as to know how much data we should label to reach a reasonable F1-score. In this set of experiments, we turn on our full-feature set during training and testing. We randomly select one-tenth, two-tenth, three-tenth, ..., nine-tenth and all of the original labeled dataset, and use these datasets for model training and testing. For each dataset, we use 10-fold cross validation. We report the corresponding averaged F1 of S-NER in Figure 4.
As we increase the size of labeled data, we see F1 increases monotonically. The smaller the size of the labeled data, the larger the increase rate. The F1-score becomes relatively stable after we use about 80% of all the labeled data.
### VI. WHY DOES GAZETTEER NOT WORK?
Two important decisions in the design of our NER system are to include unsupervised word representations and gazetteers as features for training the CRF model. As reported in Table VII, unsupervised word clustering boosts the NER performance as expected. However, gazetteers have marginal effect on the performance of S-NER. This contradicts the results of many NER studies showing that insertion of gazetteers as features in machine learning based NER can significantly boost NER performance [26, 9, 27, 12].
The principle of how gazetteer features work is that: if a span of tokens (n≥1) in the training dataset matches a gazetteer entry and is labeled as an entity, the CRF model will know that a string matching in that gazetteer is an indicator of entity appearance. Accordingly, the weight λ (see Eq.1) to that specific gazetteer feature will increase. However, software-specific gazetteers contain highly common words. Take the Android class-name gazetteer as an example. Words like “Application”, “Path”, “Array” are Android classes. In most of the situations, these words are labeled as “O”, i.e., they are not entities in Stack Overflow texts. Therefore, the weight λ of such gazetteer features will decrease during CRF model training. As a result, even if in some cases that these common words indeed refer to software entities, it could be difficult for S-NER to recognize them.
A common practice to alleviate the issue of common words in NER tasks is to remove these common words from the gazetteer [26, 27], similar to what we do for the baseline system, as covered in Section VIc. However, this practice proves to be not very helpful for software-specific NER. We only observe a small performance improvement for the recognition of tool-library-frameworks after we remove common words from gazetteers. The recognition of APIs, which occupy the
<table>
<thead>
<tr>
<th>Table VII: The Effects of Individual Feature(s)</th>
<th>F1-score for each entity category (%)</th>
<th>Overall F1(%)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Full-feature</td>
<td>91.358 77.204 91.015 71.202 79.995</td>
<td>78.176</td>
</tr>
<tr>
<td>w/o word clustering</td>
<td>91.327 77.492 66.507 57.581 75.823</td>
<td>72.642</td>
</tr>
<tr>
<td>w/o gazettee</td>
<td>92.395 75.092 70.372 72.197 78.476</td>
<td>77.691</td>
</tr>
<tr>
<td>w/o affixes features</td>
<td>92.268 76.642 69.282 71.34 76.901</td>
<td>76.719</td>
</tr>
<tr>
<td>w/o init. Capital</td>
<td>91.862 79.982 69.779 72.801 79.417</td>
<td>77.577</td>
</tr>
</tbody>
</table>
largest proportion of all entities, has almost no improvements. Again, we use the example of Android class-name gazetteer to understand this observation. After removing common words, the entries left in the Android class gazetteer are mostly compound words, such as “AbsListView”, “AbstractList” and “AbsListView.LayoutParams”. These words have distinct orthographic features, e.g., their suffixes, prefixes and capitalization patterns. Words with similar orthographic features appear frequently in the training dataset. Consequently, even without the insertion of gazetteers, the CRF model can recognize these words accurately. As such, gazetteers have little impact on the NER performance.
Our analysis indicates that: how to design a high-quality gazetteer for software-specific NER remains to be an open question for the software engineering research community.
VII. RELATED WORK
A. Software Engineering Information Extraction and Linking
In software engineering community, software information extraction and linking have been extensively studied.
A large body of the work focus on code element extraction and linking [17], [18], [19], [20], [21], [22], [23], [24], [25], [26]. In this line of research, the named entities being recognized involve and only involve APIs of certain programming languages, usually formulated as a traceability recovery problem. For example, Rigby and Robillard [17] develop a code element extraction and linking tool and propose the notion of code salience as an indicator of the importance of a particular code element. Subramanian et al. [18] build a browser extension that links an API in code snippets of Stack Overflow to its corresponding API reference documentation.
Some work recognizes important words or concepts in software artifacts to facilitate content comprehension [20], [26], [27]. For example, Shokripour et al. [26] use part-of-speech information to find software noun terms in bug reports. Hauff et al. [27] utilize the DBPedia Ontology to extract software concepts from GitHub developer profiles.
Other related work includes: Witte et al. [28] build ontology representations for software artifacts. In [29], natural language parsing is used to classify the content of development emails. Bagheri and Ensan [30] mine Wikipedia contents and recommend Wikipedia entries as potential tags for tagging Stack Overflow posts. Sharma et al. [31] identify Tweets that contain software engineering knowledge using language model. Tian et al. [32], Yang and Tan [33], [34] and Howard et al. [35] study software-specific word similarity in software texts.
Comparing with these studies, our work aims to recognize a wide range of software-specific entities (not limited to code elements of a particular programming language), and classify each recognized entity into the entity category it belongs to. In particular, we formulate the research problem of NER in software engineering and present a working solution.
B. Named Entity Recognition in Other Domains
One representative work of Traditional NER is done by Ratinov and Roth [9]. They systematically study the design challenges of NER in formal English texts, and point out design decisions should be made in traditional NER. Apart from formal English texts, NER has also been widely studied in other genres of texts.
NER in social media. The informal nature of social texts introduces new challenges to NER. Liu et al. [12] recognize entities from Tweets, and report an average F1 of 80% when recognizing persons, locations and organizations. Ritter et al. [13] also investigate NER in Tweets. They recognize a wider range of named entities and achieve an overall F1 of 66%.
NER in bioinformatics. Biomedical-specific named entity recognition (Bio-NER) is another active research field. Bio-NER recognizes bio-specific entities such as protein, DNA, RNA and cell. Bio-NER has been raised as a community task [14]. Machine learning based systems are commonly used and are found to outperform rule based systems [15], [16].
Commercial products NER. Yao and Sun [23] perform mobile phone names recognition and normalization in Internet forums. Wu et al. [46] recognize mentions of consumer products from user-generated comments on the Web.
VIII. CONCLUSION AND FUTURE WORK
In this paper, we formulate the research problem of NER in software engineering. To design a software-specific NER system, we show that one must first understand the unique characteristics of domain-specific texts that bring unique design challenges. Then, based on the understandings of these design challenges, we show how we combine state-of-the-art supervised and unsupervised machine learning and NLP techniques to design an effective software-specific NER solution, which can reduce the demand for labeled data, meanwhile maintain the generality and robustness of the NER system.
We build S-NER, a semi-supervised machine learning method for NER in software engineering social content, and demonstrate that S-NER significantly outperforms a well-designed rule-based NER system when applied on Stack Overflow posts. In the process of building this NER system, we contribute an inventory of software-specific entity categories, a corpus of labeled Stack Overflow posts, a software-specific tokenizer, a collection of software-specific gazetteers, unsupervised word clusters, and a rich and effective set of features for NER in software engineering texts. We release our annotated dataset and trained CRF models [47] for community validation and further research.
The method presented in this paper can be extended to more software engineering texts. We are on a continuous effort to extract software-specific entities from different types of software engineering texts (e.g., API documentations, bug reports, Tweets), and to develop entity-centric search systems for the software engineering domain.
Acknowledgments. This work was partially supported by Singapore MOE AcRF Tier-1 grant M4011165.020.
https://drive.google.com/open?id=0ByoLWPpAxGVFdERT09EMCO2Uzg
|
{"Source-Url": "http://yedeheng.weebly.com/uploads/5/0/3/9/50390459/saner2016.pdf", "len_cl100k_base": 12223, "olmocr-version": "0.1.50", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 40920, "total-output-tokens": 13915, "length": "2e13", "weborganizer": {"__label__adult": 0.00031948089599609375, "__label__art_design": 0.0002982616424560547, "__label__crime_law": 0.0002856254577636719, "__label__education_jobs": 0.0008044242858886719, "__label__entertainment": 5.221366882324219e-05, "__label__fashion_beauty": 0.00012934207916259766, "__label__finance_business": 0.00013935565948486328, "__label__food_dining": 0.00021076202392578125, "__label__games": 0.0005650520324707031, "__label__hardware": 0.0005383491516113281, "__label__health": 0.0002682209014892578, "__label__history": 0.0001583099365234375, "__label__home_hobbies": 6.723403930664062e-05, "__label__industrial": 0.00018036365509033203, "__label__literature": 0.0002275705337524414, "__label__politics": 0.00016748905181884766, "__label__religion": 0.00030493736267089844, "__label__science_tech": 0.007793426513671875, "__label__social_life": 8.952617645263672e-05, "__label__software": 0.00879669189453125, "__label__software_dev": 0.97802734375, "__label__sports_fitness": 0.00016868114471435547, "__label__transportation": 0.0002460479736328125, "__label__travel": 0.00014221668243408203}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58405, 0.04897]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58405, 0.51108]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58405, 0.89175]], "google_gemma-3-12b-it_contains_pii": [[0, 5879, false], [5879, 11639, null], [11639, 16475, null], [16475, 22891, null], [22891, 27142, null], [27142, 33460, null], [33460, 38724, null], [38724, 43664, null], [43664, 49753, null], [49753, 55973, null], [55973, 55973, null], [55973, 58405, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5879, true], [5879, 11639, null], [11639, 16475, null], [16475, 22891, null], [22891, 27142, null], [27142, 33460, null], [33460, 38724, null], [38724, 43664, null], [43664, 49753, null], [49753, 55973, null], [55973, 55973, null], [55973, 58405, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58405, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58405, null]], "pdf_page_numbers": [[0, 5879, 1], [5879, 11639, 2], [11639, 16475, 3], [16475, 22891, 4], [22891, 27142, 5], [27142, 33460, 6], [33460, 38724, 7], [38724, 43664, 8], [43664, 49753, 9], [49753, 55973, 10], [55973, 55973, 11], [55973, 58405, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58405, 0.25]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
8ed0e1f9a9544efcbac644eb48bd0211aac9fa09
|
Faster and Lighter LLMs: A Survey on Current Challenges and Way Forward
Arnav Chavan\(^1,2\), Raghav Magazine\(^1\), Shubham Kushwaha\(^1\), Mérouane Debbah\(^3\) and Deepak Gupta\(^2\)
\(^1\)Nyun AI, India
\(^2\)Transmute AI Lab (Texmin Hub), IIT (ISM) Dhanbad, India
\(^3\)KU 6G Research Center, Khalifa University of Science and Technology, Abu Dhabi, UAE
arnav.chavan@nyunai.com, guptadeepak2806@gmail.com
Abstract
Despite the impressive performance of LLMs, their widespread adoption faces challenges due to substantial computational and memory requirements during inference. Recent advancements in model compression and system-level optimization methods aim to enhance LLM inference. This survey offers an overview of these methods, emphasizing recent developments. Through experiments on LLaMA(/2)-7B, we evaluate various compression techniques, providing practical insights for efficient LLM deployment in a unified setting. The empirical analysis on LLaMA(/2)-7B highlights the effectiveness of these methods. Drawing from survey insights, we identify current limitations and discuss potential future directions to improve LLM inference efficiency. We release the codebase to reproduce the results presented in this paper at https://github.com/nyunAI/Faster-LLM-Survey.
1 Introduction
The advent of LLMs, marked prominently by models such as GPT (Brown et al., 2020) and LLaMa (Touvron et al., 2023a; Touvron et al., 2023b) series, has paved a new revolution in language-related tasks, ranging from text comprehension and summarization to language translation and generation. These models, often consisting of billions of parameters, have shown remarkable performance in capturing intricate patterns, fine-detailed contexts, and semantic representations in natural language. As a consequence, they have become indispensable tools in various applications, leading to advancements in various domains, including artificial intelligence, information retrieval, and human-computer interaction.
Despite their unparalleled performance, widespread adoption of LLMs is hindered by their substantial computational and memory requirements, which pose challenges for deployment in resource-constrained environments. For example, loading a LLaMa-70B model requires 140GB of VRAM excluding the memory required for model inferencing. The need for efficient deployment has led to recent research into model compression as well as system-level modification techniques tailored specifically for LLMs. These early works have identified potential ways to improve the inference efficiency of LLMs. However, the current improvements are often accompanied by significant drops in the performance of the model, and novel research directions need to be identified to find the desired solutions to this problem.
A recent survey study has provided a concise overview of the recently proposed LLM compression methods, as well as the evaluation metrics and the data used to benchmark them [Zhu et al., 2023]. However, to further push the frontiers of research towards practical inference improvement for LLMs, a comprehensive study is still missing. In this survey paper, we explore existing methods that aim at making LLMs efficient through model compression as well as through system-level optimizations. To fairly compare various methods, we provide empirical observations using different compression techniques applied to LLaMa(/2)-7B. Our evaluation includes methods that provide a practical advantage and include structured pruning, quantization, and system-level optimizations provided by different inference engines from the existing literature. We share valuable insights drawn from these experiments to present a useful and practical understanding of efficient LLMs. Additionally, we make the code and benchmarks associated with the experiments publicly available. We also examine the difficulties linked to current compression methods in both general deep learning and those specifically suggested for LLMs, and we discuss potential directions of research to overcome these problems.
Overall, the contributions of this paper are as follows.
- We offer a brief overview of the model compression domain, emphasizing essential methodologies that have made notable contributions to the field of lighter and faster LLMs.
- Complementary to model compression, system-level modifications have played an important role in speeding up the LLM inference, and we discuss these approaches as well.
- To provide a practical perspective, we present an empirical analysis of well-known compression methods for LLMs under a standardized setup. The insights derived can help make informed decisions about the selection of LLM compression methods based on the deployment environment.
- Drawing upon insights derived from our survey and em-
2 Model Compression: An Overview
Model compression techniques have emerged as a crucial area of research, offering promising solutions to enhance the efficiency of resource-intensive deep learning models. The domain of developing efficient Large Language Models (LLMs) can significantly benefit from insights and methodologies used in this field. Before diving into the topic of building efficient LLMs and the existing works around it, we provide an overview of some of the popular approaches employed in deep learning model compression. Below, we first introduce the traditional approaches of model compression and briefly discuss the development related to the traditional deep learning models. Following this, we provide an overview of the works related to compression of LLMs in the existing literature.
2.1 Compression of Deep Models
Architecture pruning refers to the process of systematically reducing the complexity of a neural network by eliminating redundant or less impactful connections, neurons, or entire layers [Janowsky, 1989]. This technique aims to enhance model efficiency, reduce computational costs, and mitigate overfitting without significantly compromising performance. Pruning involves identifying and removing connections or units based on various criteria, such as weight magnitudes [Li et al., 2016], activation patterns [Molchanov et al., 2016], or sensitivity analysis [Sanh et al., 2020]. The pruned model retains its critical features while achieving a more compact representation, which is particularly valuable in scenarios with limited computational resources, such as edge devices or mobile applications.
Among the widely studied pruning methodologies, the lottery ticket hypothesis [Frankle and Carbin, 2019] provided fundamental insights into the impact of weight initialization and pruned network structure on neural network pruning. Network Slimming [Liu et al., 2017; Chavan et al., 2022] introduced a method to prune channels in CNNs and reduce the size of weight dimensions in Transformers by imposing sparsity regularization on the channel scaling factor. Movement pruning demonstrated large-scale pruning of BERT [Kenton and Toutanova, 2019] models by leveraging the first-order information i.e. retain weights moving away from zero, as compared to zero-order methods which retain weights with larger magnitudes. [Lagunas et al., 2021] introduced block structures in weight matrices of transformer layers and employed movement pruning on them for practical speedups. More recently, [Jiang et al., 2023a] argued that fine-tuning is redundant for first-order pruning and proposed Static Model Pruning (SMP), a fine-tuning free pruning method for language models.
Quantization reduces the precision of numerical values in a neural network, typically from 32-bit floating-point numbers to lower bit-width representations, such as 8-bit integers thus shrinking the memory footprint of the model, accelerating inference speed, and enabling more efficient deployment on hardware with limited computational resources. During quantization, weights and/or activations are rounded off to a discrete set of values, introducing a trade-off between computational efficiency and model accuracy. Even with this reduction in precision, state-of-the-art quantization methods are capable of minimizing the impact on performance.
Quantization-Aware Training (QAT) [Ni et al., 2020] involves the quantization of model parameters throughout the training process, encompassing both the forward pass and backward propagation. LSQ [Esser et al., 2019] proposed a learnable step size for each weight in conjunction with other network parameters. [Tailor et al., 2021] introduced an architecture agnostic method for pruning graph neural networks. On the other hand, Post Training Quantization (PTQ) [Banner et al., 2019] finds out the optimal clipping range and channel-bit-width settings for weights and activations. OSME [Choukroun et al., 2019] proposed a PTQ method in which l2-distance between the quantized tensor and the corresponding floating-point tensor is minimized.
Knowledge distillation aims at training a computationally efficient model, often referred to as the student model, to mimic the predictions of a larger and more complex model known as the teacher model. This process involves transferring the knowledge embedded in the teacher model, typically characterized by its soft probabilities or intermediate representations, to the student model. Distillation is particularly useful when deploying models in scenarios with limited computational resources, as it enables the creation of smaller models that retain the performance of their larger counterparts. Additionally, distillation helps combat issues such as over-fitting, improves generalization, and facilitates the transfer of knowledge learned by deep models to simpler ones. Knowledge distillation techniques can be divided into three classes i.e. response-based, feature-based and instance-relation based. Response-based distillation [Hinton et al., 2015] trains the student model to mimic the final outputs of the teacher, while feature-based distillation [Tian et al., 2022] trains the student to mimic intermediate feature maps of the teacher. Relation-based distillation takes one step further by using an objective which models the co-relation of the similarity between various feature maps of the student and teacher network. More recently, [Chen et al., 2023b] used knowledge distillation during the pre-training stage and reduced the size of BERT by 40%, making it 60% faster while retaining 97% of its language understanding abilities.
Low-rank decomposition reduces the computational complexity of models by decomposing weight matrices into smaller ones with fewer dimensions which in turn approximate the initial full-rank matrix. This also reduces the number of parameters in the model and speeds up matrix multiplications hence reducing memory and latency requirements.
[Jaderberg et al., 2014] proposed an architecture agnostic method of accelerating convolutional layers using tensor decomposition and discriminative fine-tuning; whereas [Denton et al., 2014] proposed clustering methods with low-rank factorization for faster CNNs. [Sainath et al., 2013] examined low-rank matrix factorization in acoustic models, where the factorization was applied on the final layer of the network. [Lebedev et al., 2015] introduced canonical polyadic decom-
position which is calculated using non-linear least squares for speeding up CNNs. [Tai et al., 2016] proposed a global decomposition optimization algorithm and thus performed better than iterative methods.
2.2 Compression of LLMs
The compression of LLMs represents a distinctive challenge compared to traditional deep learning models, primarily due to the substantial scale of the former. Many established compression methods rely on the paradigm of executing fine-tuning steps to regain lost performance during the compression stage. However, this approach encounters significant limitations when applied to LLMs owing to their considerable size, necessitating a paradigm shift in the treatment of LLM compression as an independent and new research domain.
**Architecture pruning.** LLM-Pruner [Ma et al., 2023] used Taylor series expansion by leveraging a single gradient step to estimate important parts of a pre-trained LLM. LoRA Prune [Zhang et al., 2023] outperformed LLM-Pruner by using gradients of LoRA [Hu et al., 2021] weights, offering computational efficiency. LoRA Shear [Chen et al., 2023a] identified dependencies in LLMs, separated trainable variables into groups, and achieved compression through pruning and fine-tuning. Sheared LLaMA [Xia et al., 2023] introduced targeted structured pruning and dynamic batch loading for end-to-end component removal. FLAP [An et al., 2023] is a fine-tuning free structured pruning method which used a fluctuation-based metric to determine the importance score of various weight columns.
Unstructured pruning methods, such as SparseGPT [Franz et al., 2023], adopted a one-shot technique without the need for fine-tuning. WANDA [Sun et al., 2023] pruned weights based on the product of weight values and activation inputs, eliminating the need for fine-tuning. Another recent work suggested fusing of OBS [Hassibi et al., 1993] and OBD [LeCun et al., 1989] criteria for weight selection and determining layer sparsity based on sensitivities derived from Hessian matrices [S. et al., 2023]. While the structured and unstructured methods mentioned above show promise, the observed performance drop for the achieved compression level remains relatively high. Further efforts are required in developing pruning methods that can lead to efficient LLMs.
**Quantization.** This class of methods has been relatively more successful in the compression of LLMs. LLM.int8() [Dettmers et al., 2022] made it possible to convert the higher bit LLM weights into 8-bit without deterioration in performance post-training. They proposed a two-stage quantization scheme with vector-wise quantization and mixed-precision decomposition for outliers. SmoothQuant [Xiao et al., 2023] is a training-free PTQ method, reduced both weights and activations of LLMs to 8 bits. QLoRA [Dettmers et al., 2023] introduced 4-bit NormalFloat (NF4) and double quantization to save memory without losing out on the performance of models. OmniQuant [Shao et al., 2023] introduced Learnable Weight Clipping (LWC) and Learnable Equivalent Transformation (LET). LWC prevents weights from attaining extreme values by optimizing the clipping threshold, while LET deals with activation outliers by quantizing weights instead of activations through a LET. SqueezeLLM [Kim et al., 2023] enabled compression up to 3-bit by using a sensitivity-based non-uniform quantization scheme, where second-order information is used to find the optimal bit precision. GPTQ [Franz et al., 2023] used second-order information to compress models with up to 175 billion parameters to as low as 3 bits per weight with minimal loss in accuracy, pushing the previously proposed 8-bit methods to a smaller size. [Lin et al., 2023] observed that retaining 1% crucial weights can help reduce the degradation in quantization performance. They proposed Activation-aware Weight Quantization (AWQ) which finds the best channel-wise scaling, outperforming existing techniques in general language modeling and domain-specific tasks. ZeroQuan-FP [Wu et al., 2023] focused on floating point quantization and found that FP8 outperforms INT8 for activations and FP4 is comparable to INT4 for weights. They also incorporated low-rank compensation into their approach for enhancement. EXL2\(^1\) proposed a mixed-precision quantization algorithm, where different precision types for each layer are computed while measuring quantization errors. Their algorithm saves all the tries and associated error rates in the measuring pass and given a target precision, the algorithm quantizes the model by choosing, for each layer’s module, a target precision with the lowest error rate. GGF/GGML\(^2\) proposed a mixed set of quantizations to achieve K-Quants, a mostly K quantization output. For example, 4bit K-Quant uses 6bit for a few of the Attention and MLP layers and the usual 4bit for others.
LLM-QAT [Liu et al., 2023] proposed a data-free distillation method where they queried a pre-trained model to generate data which was used to train a quantized student model using a distillation loss. With the quantization of the KV-cache as well, apart from weights and activations, they can quantize 7B, 13B, and 30M LLaMA down to 4 bits. BitNet [Wang et al., 2023a] introduced a 1-bit LLM transformer architecture. It mainly replaces the standard nn.Linear in PyTorch with BitLinear to train 1-bit weights. As the size of the models increases, it comprehensively outperforms counterparts trained on FP16. [Tao et al., 2022] proposed token-level contrastive distillation and used dynamic scaling to make quantizers adaptive to different modules.
**Knowledge distillation.** Among the knowledge distillation methods, both white-box as well as black-box methods have been used to compress large open-source language models. Instead of solely relying on a fixed set of output sequences, Generalized KD [Agarwal et al., 2023] trains the student on its self-generated output sequences by leveraging feedback from the teacher on such sequences. TED [Li et al., 2023] employs a dual-stage training process. In the first stage, task-specific loss trains filters in both student and teacher models. In the second stage, the student and its filters undergo training with a task-aware layer-wise distillation loss, alongside student-teacher and task-specific losses. In another work [Jha et al., 2023], the student model is initialized with a subset of layers of the teacher and trained on the same corpus and objective as the teacher. This helps to achieve task-agnostic compression without using any distillation loss.
---
1. https://github.com/turboderp/exllama
2. https://github.com/ggerganov/ggml
Other distillation methods include black-box techniques such as Lion [Jiang et al., 2023b], where the student network is trained using a three-stage adversarial loop consisting of an imitation, discrimination, and generation stage. In its discrimination stage, a propriety LLM is used to find hard instructions, i.e., instructions for which the student’s outputs significantly differ from the teacher’s outputs. As a final step, the propriety LLM generates more samples similar to the hard instructions on which the student is trained to complete the loop. DISCO [Chen et al., 2023b] is a counterfactual knowledge approach in which a propriety LLM is given a prompt and is made to generate counterfactual augmentations in it. Then a task-specific teacher model filters out these augmentations, and the student model is trained on them. SCOTT [Wang et al., 2023b] used contrastive decoding to generate rationale from the teacher along with the usual question-answer pair to train the student model.
Low rank approximations. TensorGPT [Xu et al., 2023] compressed the embedding layer of LLMs through Tensor-Train Decomposition and stored it in a reduced Matrix Product State, which can be computed in a distributed fashion. LoSparse [Li et al., 2023] approximated weight matrix in LLMs as the sum of a sparse matrix and another low-rank approximation matrix. The low-rank matrices capture the expressive features among neurons as they involve doing Singular Value Decomposition and the remaining features are captured by the sparse matrix. [Kaushal et al., 2023] show that a simple decomposition of the matrices in LLMs as a product of two sparse low-rank matrices can offer noticeable compression and speedup at a small compromise of perplexity.
Overall, the research direction of using low-rank approximations to compress LLMs is new but exhibits the potential to improve inference efficiency. Two recent works have shown that low-rank approximations can often improve reasoning abilities and undergo compression through layerwise rank reduction in the weight space [Sharma et al., 2023] and/or in the latent feature space [Chavan et al., 2023]. These methods offer the advantage of requiring minimal computational resources for the compression process due to their layerwise approach to matrices involved. However, it should be noted that the level of lossless compression achieved using these techniques remains modest, and further improvements are needed from a practical point of view.
System level approaches. Here we highlight those methods which improve the complementary infrastructure and runtime architecture of LLMs.
Paged Attention [Kwon et al., 2023] - inspired by the classical virtual memory and paging techniques in operating systems, it allows storage of continuous keys and values cached in non-contiguous memory.
Tensor Parallelism - entails dividing a tensor into shards distributed across various GPUs, processing each shard independently and in parallel, and subsequently synchronizing the results at the end of the step.
Pipeline Parallelism - allows a model to be vertically split across multiple GPUs at the layer level, where each GPU handles one or several layers, enabling parallel processing of distinct stages in the pipeline.
CPU/GPU Offloading [Song et al., 2023] - involves transferring specific weight layers to GPU devices for matrix multiplication, subsequently transmitting the computed results back to the secondary device (RAM), thus optimizing parallel processing capabilities while allowing the secondary device to handle the remaining memory intensive computations.
Flash Attention (v2) [Dao et al., 2022; Dao, 2023] - optimizes attention computation by employing incremental softmax reduction through input block tiling, avoiding the need for whole-input access, and expedites the backward pass by storing the softmax normalization factor from the forward pass, eliminating the requirement to read the large attention matrix from high bandwidth memory (HBM). FlashAttention-2 minimizes non-matrix multiplication FLOPs, optimizing the online softmax technique, introducing parallelism over sequence length, and refining workload partitioning among warps within each thread block to reduce synchronization.
Fused Operations - involves consolidating multiple computational tasks, such as combining existing kernels or creating new ones, to minimize the overhead associated with multiple kernel API invocations.
Speculative Decoding [Leviathan et al., 2023] - efficiently generates multiple future tokens from a chosen smaller model and verifies them in parallel using the larger model, enabling the simultaneous decoding of multiple tokens per step.
Notable implementations in this category include vLLM [Kwon et al., 2023], Llama.cpp4, ExLlama(/v2), TensorRT-LLM5, MLC-LLM6, PowerInfer7 [Song et al., 2023], among others. vLLM employs paged attention through a KV-Cache manager that separates logical and physical KV blocks, enabling dynamic growth of the KV cache. ExLlama(/v2) implements fused kernels to minimize launch overheads and API invocation overheads when operating on discontinuous blocks. Llama.cpp is a low-level C/C++ implementation of the LLaMA architecture with support for multiple BLAS backends for fast processing. It operates on the GGUF quantization scheme with CPU and GPU offloading. MLC-LLM focuses on compiler accelerations and runtime optimizations for native deployment across platforms. It encapsulates model execution logic in a container - Intermediate Representation Module (IRModule) which captures the hierarchical structure of computations for optimization and code generation. It employs Paged Attention. Fused Operators, and automatic generation of optimized kernel code for multiple hardware platforms. TensorRT-LLM implements masked multi-head attention with on-the-fly pre-processing of QKV elements. It supports Paged Attention, INT8/FP8 caches, in-flight batching, and tensor/pipeline parallelism for speedups. An additional improvement is attained due to fused in-flight batching with operation fusion. PowerInfer adopts a GPU-CPU hybrid approach, by pre-loading consistently activated hot neurons onto the GPU for fast access, computing variable cold
---
4https://github.com/vllm-project/vllm
5https://github.com/ggerganov/llama.cpp
6https://github.com/NVIDIA/TensorRT-LLM
7https://github.com/mlc-ai/mlc-lim
8https://github.com/SJTU-IPADS/PowerInfer
neurons on the CPU, and integrating adaptive predictors and neuron-aware sparse operators to optimize efficiency.
Overall, these methods work complementary to model compression methods and improve the runtime efficiency of large language models. These engines demonstrate the feasibility and benefits of optimizing the software architecture and infrastructure complementary to model compression.
3 Experimental Analysis
As discussed above, there exist several approaches for model compression, and there is no clear consensus on which method to use when or which method is superior over the others. Thus, we present here an experimental analysis of the different LLM compression methods and present important insights. For all the experiments, we provide practical inference metrics including model weight memory (WM), runtime memory consumption (RM), inference token rate and WikiText2 perplexity computed on a Nvidia A100 40GB GPU.
<table>
<thead>
<tr>
<th>Method</th>
<th>Sparsity</th>
<th>RM (GB)</th>
<th>WM (GB)</th>
<th>Tokens/s</th>
<th>Perplexity</th>
</tr>
</thead>
<tbody>
<tr>
<td>Baseline</td>
<td>-</td>
<td>26.16</td>
<td>12.55</td>
<td>30.90</td>
<td>12.62</td>
</tr>
<tr>
<td>Wanda-SP</td>
<td>20%</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>22.12</td>
</tr>
<tr>
<td></td>
<td>50%</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>366.43</td>
</tr>
<tr>
<td>LLM-Pruner</td>
<td>20%</td>
<td>10.38</td>
<td>10.09</td>
<td>32.57</td>
<td>19.77</td>
</tr>
<tr>
<td></td>
<td>50%</td>
<td>6.54</td>
<td>6.23</td>
<td>40.95</td>
<td>112.44</td>
</tr>
<tr>
<td>LLM-Pruner*</td>
<td>20%</td>
<td>10.38</td>
<td>10.09</td>
<td>32.57</td>
<td>17.37</td>
</tr>
<tr>
<td></td>
<td>50%</td>
<td>6.54</td>
<td>6.23</td>
<td>40.95</td>
<td>38.12</td>
</tr>
<tr>
<td>FLaP</td>
<td>20%</td>
<td>9.72</td>
<td>9.44</td>
<td>33.90</td>
<td>14.62</td>
</tr>
<tr>
<td></td>
<td>50%</td>
<td>6.26</td>
<td>6.07</td>
<td>42.88</td>
<td>31.80</td>
</tr>
</tbody>
</table>
Table 1: Performance measures for various compressed variants of LLaMA-7B model obtained using the following structured pruning methods: Wanda-SP, LLM-pruner and FLaP. Here, * refers to a fine-tuned variant of LLM-pruner.
Pruning of LLaMA-7B. In this analysis, we examine the structured pruning of the LLaMA-7B model using three recent Large Language Model (LLM) pruning methods. Table 1 showcases the performance scores for these methods at sparsity levels of 20% and 50%. Notably, all compression methods exhibit effective performance in terms of perplexity at lower sparsity levels. Wanda-SP denotes Wanda adapted to structured pruning as reported in [An et al., 2023]. Noticeably, Wanda-SP and LLM-Pruner impacts the model’s performance and have suboptimal results at 50% sparsity. On the other hand, both FLaP and the fine-tuned variant of LLM-pruner perform well at this level. Comparing RM, WM, and Perplexity, these two methods demonstrate similar performance, with FLaP slightly outperforming the fine-tuning-based LLM-pruner. It is important to note that beyond superior performance, FLaP is also training-free, which makes it a preferred choice for LLM pruning.
Quantized LLaMA2-7B. Table 2 presents a comparative study demonstrating the efficacy of different quantization methods for improving LLM inference. For each quantization method choice, we default to Pytorch as the default inference engine and use propriety engines when Pytorch support is not available. As can be seen, the perplexity of all the models is mostly intact with only marginal degradation. As expected, lower precision leads to lower working and running memory consumption. Importantly, we see that at 4-bit, OmniQuant can maintain performance the most. However, GPTQ and AWQ have a wider support on different engines. Another interesting observation is that even though sub 4-bit quantizations lead to a drop in model performance, the resultant models are still better than those obtained from pruning at similar compression levels.
System-level optimizations for LLaMA2-7B. We also consider system-level optimization methods and improve LLM inference by employing various inference engines proposed in the existing literature. Related results are presented in Table 3. As can be seen, different methods have advantages across different performance metrics. TensorRT-LLM stands out with impressive performance across all metrics, particularly on NVIDIA GPUs. It provides the best token rate with GPTQ 4-bit quantization, however, efficient 4-bit support is only available for new hardware. It can also be consistently seen that GPTQ is faster than AWQ at the same precision, however, the perplexity is slightly worse. MLC-LLM seems to demonstrate slightly lower performance compared to TensorRT-LLM, however, its compatibility with a range of hardware positions it as a favourable choice in specific scenarios.
4 Challenges and Way Forward
Large-scale pruning/distillation is computationally intensive. The strategies of architecture pruning and knowledge distillation have gained widespread popularity for compressing deep learning models. However, these techniques require several fine-tuning steps, the computational demands of which can rival or even surpass the intensity of the initial training steps. In the context of LLMs, this renders them impractical, given their already substantial computational requirements. While some efforts have been made to address this challenge, they often result in significant accuracy drops even for marginal compression gains. Possible ways to circumvent the issue could include:
- Revisiting the training-free pruning methods to explore their potential in the context of LLMs. For example, knowledge-preserving pruning, which focuses on reducing the unwanted knowledge context in a network rather than eliminating weights, can be improved and adapted for LLMs. Since such methods are mostly training-free, they could offer efficient LLMs at only a small additional computational budget.
- Exploring layerwise pruning of LLMs. A straightforward implementation of layerwise pruning would require defining localized loss functions in terms of regression loss and compressing the sub-network while ensuring that the local output is reproduced. However, in such an approach, even small errors in the early layers could easily propagate to the later layers leading to poor performance of the compressed network.
- Localized distillation of LLMs. A potential solution to overcome the issue of distillation could be to develop localized distillation methods. Instead of condensing the entire teacher LLM information into a smaller student, we present multiple smaller student networks, and the knowledge is transferred in a limited scope.
*Ampere and newer series of GPUs support 4bit runtime
this approach involves learning localized parts of the teacher network in smaller-scale student sub-networks. A strategy can then be devised to combine these sub-networks into a fully compressed student LLM. This approach holds promise as a potential solution to the computational challenges associated with LLM distillation.
- Growing smaller LLMs to reach the desired performance. The primary obstacle in compressing Large Language Models (LLMs) lies in the computational challenges during fine-tuning, attributed to the models’ substantial size. An alternative and ambitious research direction involves growing smaller language models (SLMs) into LLMs using well-defined neural network growing strategies. This approach avoids the need to train a full-scale LLM, and the maximum computational burden is determined by the final compressed LLM obtained through the growth of the SLM.
- Using PEFT methods to fine-tune when pruning efficiency. To address the challenge of full-scale fine-tuning during pruning, an alternative approach is to employ PEFT methods. Unlike traditional methods, PEFT does not require updating the model weights; only the added masks and PEFT parameters are updated [Zhang et al., 2023]. This significantly reduces the computational intensity of the fine-tuning process. However, PEFT methods currently face limitations in achieving large-scale compression of LLMs, indicating a need for further research to develop PEFT methods tailored specifically for compressing LLMs.
**On-the-fly Quant-Dequant makes the inference slow.** The utilization of lower-precision floating-point formats such as FP4 poses a dual challenge regarding memory efficiency and computational speed during inference. While contemporary hardware typically supports formats like FP16 and INT8, which enable substantial memory reduction, the lower precision conversions typically needs the Quantization (Quant) and Dequantization (Dequant) operations. These operations can induce computational overhead, contributing to a slowdown in the inference process compared to using higher-precision formats like FP16. Therefore, while the adoption of lower-precision formats can offer memory efficiency gains, they adversely affect the inference speed and a right balance between the two needs to be struck. A potential solution involves the development of streamlined Quant-Dequant operations, aiming to alleviate the observed overhead in inference speed. Another strategy is to tailor the choice of precision formats according to the specifications of the hardware in use. Concurrently, advancements on the hardware front are essential, necessitating support of lower precision formats to a broader range of popular hardwares.
**Rank selection in the layerwise low rank approximation is hard.** While low-rank approximation exhibits enormous potential for LLM compression, this approach is accompanied by a set of challenges, particularly in the determination of hyperparameters governing the rank reduction process. Deciding on a low-rank approximation strategy lacks a clear consensus for generalizing the method across different models. Moreover, the computational infeasibility of solving a system-level decomposition system adds a layer of complexity, making it challenging to achieve an optimal reduction in model size while preserving performance.
It is crucial to recognize that determining the optimal rank to retain across various layers is not easily addressed through a hyperparameter search problem. Many of these approaches are computationally expensive, particularly in the context of Large Language Models (LLMs). There is a necessity to explore and develop an effective strategy for searching for the right rank when employing low-rank approximations.
**Existing evaluation metrics may not comply well.** Compressing LLMs while preserving their ability to handle extensive contextual information is a challenge, and appropriate evaluation metrics need to be developed to tackle this issue. Another factor is the loss of fidelity. Aggressive compression may lead to a significant loss of model fidelity, impacting the language model’s ability to generate accurate and contextually relevant outputs. Several such characteristics of LLMs need to be captured in their compressed variants, and this can only be identified by the right choice of metrics.
**Python - an interpreted language leads to slower execution times.** The Global Interpreter Lock (GIL) in CPython, the default Python interpreter, further restricts the concurrent ex-
<table>
<thead>
<tr>
<th>Method</th>
<th>Inference Engine</th>
<th>WM (GB)</th>
<th>RM (GB)</th>
<th>Tokens/s</th>
<th>Perplexity</th>
</tr>
</thead>
<tbody>
<tr>
<td>Baseline FP16</td>
<td>PyTorch</td>
<td>12.55</td>
<td>26.16</td>
<td>30.90</td>
<td>5.85</td>
</tr>
<tr>
<td>GPTQ 2bit</td>
<td>PyTorch</td>
<td>2.11</td>
<td>2.98</td>
<td>20.91</td>
<td>NaN</td>
</tr>
<tr>
<td>GPTQ 3bit</td>
<td>PyTorch</td>
<td>2.87</td>
<td>3.86</td>
<td>21.24</td>
<td>7.36</td>
</tr>
<tr>
<td>GPTQ 4bit</td>
<td>PyTorch</td>
<td>3.63</td>
<td>4.65</td>
<td>21.63</td>
<td>6.08</td>
</tr>
<tr>
<td>GPTQ 8bit</td>
<td>PyTorch</td>
<td>6.67</td>
<td>7.62</td>
<td>21.36</td>
<td>5.86</td>
</tr>
<tr>
<td>AWQ 4bit GEMM</td>
<td>PyTorch</td>
<td>3.68</td>
<td>4.64</td>
<td>28.51</td>
<td>6.02</td>
</tr>
<tr>
<td>AWQ 4bit GEMV</td>
<td>PyTorch</td>
<td>3.68</td>
<td>4.64</td>
<td>31.81</td>
<td>6.02</td>
</tr>
<tr>
<td>QLoRa (NF4)</td>
<td>PyTorch</td>
<td>3.56</td>
<td>4.84</td>
<td>19.70</td>
<td>6.02</td>
</tr>
<tr>
<td>LLM.int8()</td>
<td>PyTorch</td>
<td>6.58</td>
<td>7.71</td>
<td>5.24</td>
<td>5.89</td>
</tr>
<tr>
<td>K-Quants 4bit</td>
<td>Llama.cpp</td>
<td>3.80</td>
<td>7.38</td>
<td>104.45</td>
<td>5.96</td>
</tr>
<tr>
<td>OmniQuant 3bit</td>
<td>MLC-LLM</td>
<td>3.20</td>
<td>5.10</td>
<td>83.4</td>
<td>6.65</td>
</tr>
<tr>
<td>OmniQuant 4bit</td>
<td>MLC-LLM</td>
<td>3.80</td>
<td>5.70</td>
<td>134.2</td>
<td>5.97</td>
</tr>
</tbody>
</table>
Table 2: Performance comparison of different quantization methods for the compression of LLaMA2-7B. Here, WM and RM refer to weight memory and running memory consumption, respectively.
Table 3: Performance comparison of compressed variants of LLaMA2-7B using various inference engines, quantized for different predictions and across different hardwares. Here, WM and RM denote weight memory and running memory consumption respectively.
<table>
<thead>
<tr>
<th>Method</th>
<th>Hardware Support</th>
<th>Quantization Type</th>
<th>WM (GB)</th>
<th>RM (GB)</th>
<th>Tokens/sec</th>
<th>Perplexity</th>
</tr>
</thead>
<tbody>
<tr>
<td>Llama.cpp</td>
<td>NVIDIA GPU</td>
<td>GGUF K-Quant 2bit</td>
<td>2.36</td>
<td>3.69</td>
<td>102.15</td>
<td>6.96</td>
</tr>
<tr>
<td></td>
<td>AMD GPU</td>
<td>GGUF 4bit</td>
<td>3.56</td>
<td>4.88</td>
<td>128.97</td>
<td>5.96</td>
</tr>
<tr>
<td></td>
<td>Apple Silicon</td>
<td>GGUF AWQ 4bit</td>
<td>3.56</td>
<td>4.88</td>
<td>129.25</td>
<td>5.91</td>
</tr>
<tr>
<td></td>
<td>CPU</td>
<td>GGUF K-Quant 4bit</td>
<td>3.59</td>
<td>4.90</td>
<td>109.72</td>
<td>5.87</td>
</tr>
<tr>
<td></td>
<td></td>
<td>GGUF 8bit</td>
<td>6.67</td>
<td>7.78</td>
<td>93.39</td>
<td>5.79</td>
</tr>
<tr>
<td></td>
<td></td>
<td>GGUF FP16</td>
<td>12.55</td>
<td>13.22</td>
<td>66.81</td>
<td>5.79</td>
</tr>
<tr>
<td>ExLlama</td>
<td>NVIDIA GPU</td>
<td>GPTQ 4bit</td>
<td>3.63</td>
<td>5.35</td>
<td>77.10</td>
<td>6.08</td>
</tr>
<tr>
<td></td>
<td>AMD GPU</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>ExLlamav2</td>
<td>NVIDIA GPU</td>
<td>EXL2 2bit</td>
<td>2.01</td>
<td>5.21</td>
<td>153.75</td>
<td>20.21</td>
</tr>
<tr>
<td></td>
<td>AMD GPU</td>
<td>EXL2 4bit</td>
<td>3.36</td>
<td>6.61</td>
<td>131.68</td>
<td>6.12</td>
</tr>
<tr>
<td></td>
<td></td>
<td>GPTQ 4bit</td>
<td>3.63</td>
<td>6.93</td>
<td>151.30</td>
<td>6.03</td>
</tr>
<tr>
<td></td>
<td></td>
<td>EXL2 8bit</td>
<td>6.37</td>
<td>9.47</td>
<td>115.81</td>
<td>5.76</td>
</tr>
<tr>
<td></td>
<td></td>
<td>FP16</td>
<td>12.55</td>
<td>15.09</td>
<td>67.70</td>
<td>5.73</td>
</tr>
<tr>
<td>vLLM</td>
<td>NVIDIA GPU</td>
<td>AWQ GEMM 4bit</td>
<td>3.62</td>
<td>34.55</td>
<td>114.43</td>
<td>6.02</td>
</tr>
<tr>
<td></td>
<td>AMD GPU</td>
<td>GPTQ 4bit</td>
<td>3.63</td>
<td>36.51</td>
<td>172.88</td>
<td>6.08</td>
</tr>
<tr>
<td></td>
<td></td>
<td>FP16</td>
<td>12.55</td>
<td>35.92</td>
<td>79.74</td>
<td>5.85</td>
</tr>
<tr>
<td>TensorRT-LLM</td>
<td>NVIDIA GPU</td>
<td>AWQ GEMM 4bit</td>
<td>3.42</td>
<td>5.69</td>
<td>194.86</td>
<td>6.02</td>
</tr>
<tr>
<td></td>
<td></td>
<td>GPTQ 4bit</td>
<td>3.60</td>
<td>5.88</td>
<td>202.16</td>
<td>6.08</td>
</tr>
<tr>
<td></td>
<td></td>
<td>INT8</td>
<td>6.53</td>
<td>8.55</td>
<td>143.57</td>
<td>5.89</td>
</tr>
<tr>
<td></td>
<td></td>
<td>FP16</td>
<td>12.55</td>
<td>14.61</td>
<td>83.43</td>
<td>5.85</td>
</tr>
<tr>
<td>TGI</td>
<td>NVIDIA GPU</td>
<td>AWQ GEMM 4bit</td>
<td>3.62</td>
<td>36.67</td>
<td>106.84</td>
<td>6.02</td>
</tr>
<tr>
<td></td>
<td>AMD GPU</td>
<td>GPTQ 4bit</td>
<td>3.69</td>
<td>37.85</td>
<td>163.22</td>
<td>6.08</td>
</tr>
<tr>
<td></td>
<td></td>
<td>FP4</td>
<td>12.55</td>
<td>37.21</td>
<td>36.91</td>
<td>6.15</td>
</tr>
<tr>
<td></td>
<td></td>
<td>NF4</td>
<td>12.55</td>
<td>37.21</td>
<td>36.32</td>
<td>6.02</td>
</tr>
<tr>
<td></td>
<td></td>
<td>FP16</td>
<td>12.55</td>
<td>38.03</td>
<td>74.19</td>
<td>5.85</td>
</tr>
<tr>
<td>MLC-LLM</td>
<td>NVIDIA GPU</td>
<td>OmniQuant 3bit</td>
<td>3.2</td>
<td>5.1</td>
<td>83.4</td>
<td>6.65</td>
</tr>
<tr>
<td></td>
<td>AMD GPU</td>
<td>OmniQuant 4bit</td>
<td>3.8</td>
<td>5.7</td>
<td>134.2</td>
<td>5.97</td>
</tr>
<tr>
<td></td>
<td>CPU, WebGPU,</td>
<td>AWQ GEMM 4bit</td>
<td>3.62</td>
<td>6.50</td>
<td>23.62</td>
<td>6.02</td>
</tr>
<tr>
<td></td>
<td>Apple Silicon,</td>
<td>Q4F16</td>
<td>3.53</td>
<td>6.50</td>
<td>189.07</td>
<td>-</td>
</tr>
<tr>
<td></td>
<td>Intel GPU</td>
<td>Q3F16</td>
<td>2.84</td>
<td>5.98</td>
<td>185.47</td>
<td>-</td>
</tr>
<tr>
<td></td>
<td>WASM, Adreno Mali</td>
<td>FP16</td>
<td>12.55</td>
<td>15.38</td>
<td>87.37</td>
<td>5.85</td>
</tr>
</tbody>
</table>
ExLlama.cpp, where the transition to a C++ implementation, LLaMA-7B, resulted in significantly improved speed. This shift exemplifies the impact of choosing a language optimized for performance in the context of deep learning models. Moreover, the emergence of Rust-based models has attracted attention for their superior speed. Rust, with its emphasis on both memory safety and performance, has demonstrated effectiveness in accelerating computations, particularly in scenarios where speed is of paramount importance. Thus, for optimizing the inference speed, moving away from Python to C++, Rust or other similar languages might be a future direction to pursue.
5 Conclusions
In conclusion, our survey extensively explores LLM compression, covering both model-level and system-level efficiency enhancements. We discuss various compression methodologies and provide practical insights from experiments conducted on LLaMA(2)-7B, offering valuable information for optimizing LLMs. Analysis of the survey and experimental results highlights the existing bottlenecks in enhancing LLM inference, indicating the necessity for further developments to achieve efficiency. We envision this survey as a stepping stone towards advancing the field and achieving the goal of efficient LLM inference.
References
|
{"Source-Url": "https://www.ijcai.org/proceedings/2024/0883.pdf", "len_cl100k_base": 10348, "olmocr-version": "0.1.53", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 32651, "total-output-tokens": 14300, "length": "2e13", "weborganizer": {"__label__adult": 0.000644683837890625, "__label__art_design": 0.0007548332214355469, "__label__crime_law": 0.0005664825439453125, "__label__education_jobs": 0.00128936767578125, "__label__entertainment": 0.0003383159637451172, "__label__fashion_beauty": 0.0003876686096191406, "__label__finance_business": 0.0004148483276367187, "__label__food_dining": 0.00055694580078125, "__label__games": 0.0013437271118164062, "__label__hardware": 0.0015573501586914062, "__label__health": 0.0009775161743164062, "__label__history": 0.0004732608795166016, "__label__home_hobbies": 0.00013387203216552734, "__label__industrial": 0.0007295608520507812, "__label__literature": 0.0010480880737304688, "__label__politics": 0.0005321502685546875, "__label__religion": 0.0009055137634277344, "__label__science_tech": 0.2685546875, "__label__social_life": 0.0001875162124633789, "__label__software": 0.0155029296875, "__label__software_dev": 0.70166015625, "__label__sports_fitness": 0.0004744529724121094, "__label__transportation": 0.0006456375122070312, "__label__travel": 0.0002703666687011719}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 52197, 0.06526]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 52197, 0.30291]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 52197, 0.84114]], "google_gemma-3-12b-it_contains_pii": [[0, 4824, false], [4824, 11293, null], [11293, 17960, null], [17960, 24408, null], [24408, 30982, null], [30982, 36977, null], [36977, 41820, null], [41820, 47172, null], [47172, 52197, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4824, true], [4824, 11293, null], [11293, 17960, null], [17960, 24408, null], [24408, 30982, null], [30982, 36977, null], [36977, 41820, null], [41820, 47172, null], [47172, 52197, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 52197, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 52197, null]], "pdf_page_numbers": [[0, 4824, 1], [4824, 11293, 2], [11293, 17960, 3], [17960, 24408, 4], [24408, 30982, 5], [30982, 36977, 6], [36977, 41820, 7], [41820, 47172, 8], [47172, 52197, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 52197, 0.28293]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
fafea89d6e8888a2ce96605a4abe35133d279f19
|
[REMOVED]
|
{"Source-Url": "http://www2.imm.dtu.dk/~phbi/files/publications/2009fremC.pdf", "len_cl100k_base": 13087, "olmocr-version": "0.1.50", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 72169, "total-output-tokens": 15681, "length": "2e13", "weborganizer": {"__label__adult": 0.0004076957702636719, "__label__art_design": 0.00045108795166015625, "__label__crime_law": 0.0004661083221435547, "__label__education_jobs": 0.0009784698486328125, "__label__entertainment": 0.00013005733489990234, "__label__fashion_beauty": 0.0002338886260986328, "__label__finance_business": 0.000396728515625, "__label__food_dining": 0.00043320655822753906, "__label__games": 0.0007557868957519531, "__label__hardware": 0.002086639404296875, "__label__health": 0.0008072853088378906, "__label__history": 0.0004286766052246094, "__label__home_hobbies": 0.00017905235290527344, "__label__industrial": 0.0006532669067382812, "__label__literature": 0.00043129920959472656, "__label__politics": 0.0003559589385986328, "__label__religion": 0.0006303787231445312, "__label__science_tech": 0.19287109375, "__label__social_life": 0.0001227855682373047, "__label__software": 0.01190185546875, "__label__software_dev": 0.7841796875, "__label__sports_fitness": 0.00035381317138671875, "__label__transportation": 0.0006780624389648438, "__label__travel": 0.00023603439331054688}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 50187, 0.02742]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 50187, 0.39906]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 50187, 0.84963]], "google_gemma-3-12b-it_contains_pii": [[0, 2650, false], [2650, 5815, null], [5815, 9273, null], [9273, 12208, null], [12208, 16217, null], [16217, 18962, null], [18962, 22524, null], [22524, 25696, null], [25696, 26160, null], [26160, 29304, null], [29304, 32353, null], [32353, 35324, null], [35324, 38764, null], [38764, 41658, null], [41658, 45139, null], [45139, 48527, null], [48527, 50187, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2650, true], [2650, 5815, null], [5815, 9273, null], [9273, 12208, null], [12208, 16217, null], [16217, 18962, null], [18962, 22524, null], [22524, 25696, null], [25696, 26160, null], [26160, 29304, null], [29304, 32353, null], [32353, 35324, null], [35324, 38764, null], [38764, 41658, null], [41658, 45139, null], [45139, 48527, null], [48527, 50187, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 50187, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 50187, null]], "pdf_page_numbers": [[0, 2650, 1], [2650, 5815, 2], [5815, 9273, 3], [9273, 12208, 4], [12208, 16217, 5], [16217, 18962, 6], [18962, 22524, 7], [22524, 25696, 8], [25696, 26160, 9], [26160, 29304, 10], [29304, 32353, 11], [32353, 35324, 12], [35324, 38764, 13], [38764, 41658, 14], [41658, 45139, 15], [45139, 48527, 16], [48527, 50187, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 50187, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
92514c75d836993c84b4cfb329e91a121888593e
|
Enhancing Web Browsing Security on Public Terminals Using Mobile Composition
Richard Sharp & Anil Madhavapeddy
Citrix Systems Inc.
Castle Park
Cambridge, CB3 0AR. UK
richard.sharp@eu.citrix.com,
anil.madhavapeddy@eu.citrix.com
Roy Want & Trevor Pering
Intel Research
2200 Mission College
Santa Clara, CA 95052
roy.want@intel.com,
trevor.pering@intel.com
ABSTRACT
This paper presents an architecture that affords mobile users greater trust and security when browsing the internet (e.g., when making personal/financial transactions) from public terminals at Internet Cafes or other unfamiliar locations. This is achieved by enabling web applications to split their client-side pages across a pair of browsers: one untrusted browser running on a public PC and one trusted browser running on the user's personal mobile device, composed into a single logical interface through a local connection, wired or wireless. Information entered via the personal device's keypad cannot be read by the PC, thwarting PC-based key-loggers. Similarly, information displayed on the personal device's screen is also hidden from the PC, preserving the confidentiality of security-critical data even in the presence of screen grabbing attacks and compromised PC browsers. We present a security policy model for split-trust web applications that defends against a range of crimeware-based attacks, including those based on active-injection (e.g., inserting malicious packets into the network or spoofing user-input events). Performance results of a prototype split-trust implementation are presented, using a commercially available cell phone as a trusted personal device.
Categories and Subject Descriptors
D.2.11 [Software Architectures]: Data abstraction, Domain-specific architectures, Information hiding, Patterns
General Terms
Keywords
Split-trust, trusted personal device, crimeware, phishing, user interface design
1. INTRODUCTION
As people are increasingly relying on the web for security critical tasks, crimeware, malicious software designed expressly to facilitate illegal activity, is being used to steal identities and commit fraud. The Anti-Phishing Working Group (APWG), a global consortium of companies and financial institutions focused on eliminating Internet fraud, report that the use of crimeware has "surged markedly" with the number of new crimeware applications discovered doubling from April to June 2005 [4] and this trend continues into 2008. The increase is so marked that the APWG believe that ultimately "conventional phishing via social engineering schemes will be eclipsed by advanced, automated crimeware" [5].
To date, the most prevalent form of crimeware is the keylogger: a program that secretly records users' key-presses, transmitting sensitive information (e.g. credit card numbers, usernames and passwords) back to criminals. Other examples of crimeware include applications that record the contents of users' screens, silently redirect web browsers to attackers' websites and maliciously spoof user-input to control web applications (e.g. trigger a money transfer in an on-line bank) [30, 18].
Technically savvy individuals have always been wary of the threat of crimeware on public terminals (e.g. Internet cafes). Worryingly, however, the recent wave of crimeware attacks has involved malicious applications installing themselves on users' personal PCs, either as Trojans [19] or by exploiting OS-level vulnerabilities [18].
The threat of crimeware poses fundamental challenges to the web's security model. In particular, although HTTPS/SSL protects data as it is transmitted between client and server, it cannot protect data from compromised end-points. For example, as soon as the contents of an HTTPS URL have been decrypted by the Secure Socket Layer (SSL) it can be snooped by Trojan browser-extensions, screen-grabbers and other forms of crimeware. Similarly, HTTPS/SSL does not preserve the privacy or integrity of user input; malicious applications running on the PC can, for example, record key presses and even fake user input (e.g. generate a spoofed click event on a hyperlink).
Split-Trust Browsing addresses the threat of crimeware by allowing people to browse the web using a combination of a general-purpose networked PC and a personal, more trusted device, linked together as a device composition. For the most part, a user browses the web via the PC as normal. However, security-critical operations are performed in conjunction with their personal device, using its display and keypad for I/O. Information entered via the personal device's keypad cannot be read by the PC, thwarting PC-based key-loggers. Similarly, information displayed on the personal device's screen is also hidden from the PC, preserving the confidentiality of security-
critical data even in the presence of screen-grabbing attacks and compromised PC browsers. We believe that the composition of general purpose PCs with trusted mobile devices gives users the best of both worlds: they can enjoy the rich browsing capabilities of their PC, with its large display and full-sized keyboard and the greater degree of trust associated with viewing/entering security-sensitive data via their personal device.
The technical contribution of this paper is an architecture for split-trust web browsing through mobile composition: a technique that enables web applications to split their HTML across a pair of browsers—one untrusted browser running on a PC and one trusted browser running on a user's personal device. A key feature of our architecture is that it requires only a local (wired or wireless) connection between the personal device and PC; this provides a better user-experience, since a low-latency direct connection means that the two devices can be kept in tight synchronization with each other. As well as splitting content across the PC and personal device, our architecture also allows HTML Forms to be split. In this way secure fields (e.g. credit card details) can be filled in on the trusted personal device, while fields that do not contain sensitive information (e.g. delivery dates or product selections) can be filled in on the PC. In addition to exploring the systems issues surrounding split-trust web-browsing, we also present a Security Policy Model for split-trust web-applications and consider a range of attacks against split-trust systems in general.
The concept of a trusted personal device is an interesting one, and one which is currently topical within the mobile computing industry [1, 11]. One could imagine manufacturing a small, locked-down device with the specific purpose of augmenting a user's web browsing to provide enhanced security. Alternatively, one may argue that some existing cell phones or PDAs already provide a more secure computing platform than general purpose PCs, and can thus be used as trusted personal devices providing increased security [6, 23]: Security is in fact a relative concept as we can raise the bar to prevent a particular level of attack, but no system is without some weakness. However, we believe our work provides a practical improvement over the level of security available to mobile users as this time. A fuller discussion of what constitutes a trusted personal device is presented in Subsection 7.1.
1.1 Structure of the Paper
We begin by presenting a system overview that takes advantage of the potential interplay between untrusted fixed infrastructure and more trustworthy personal mobile devices (section 2). We classify the mechanisms used by crimeware-based attacks and present a set of general design principles that enable split-trust web applications to address these attacks (Section 3). Technical details of our split-trust browsing implementation are then presented (Section 4). Various attacks against split-trust web-applications are considered with discussion of how well our architecture defends against each of them (Section 5). Finally, after describing related work (Section 6) and discussing general design & system issues (Section 7), we conclude and present directions for future work (Section 8).
2. SPLIT-TRUST SYSTEM OVERVIEW
Figure 1 shows a high-level overview of our architecture for split-trust browsing. The (untrusted) PC connects to the web server over the Internet, using HTTP to request web pages in the usual manner [33] The trusted personal device connects directly to the PC using a suitable data-link technology (e.g. USB, Bluetooth, WiFi).

The HTML fetched from the web server contains both regular content, which is rendered in the PC's browser in the usual way, and encrypted messages destined for the trusted personal device. The Remote Device Communication (RDC) Agent, which runs on the PC, is responsible for forwarding such messages between the web server and the personal device. When a message is received by the personal device it is decrypted and displayed on its screen. Similarly, messages generated by the personal device (as a result of user input) are encrypted before being sent back to the web server via the RDC Agent. The session key used to encrypt these messages is known only to the trusted personal device and the web server; crimeware running on the untrusted PC is thus unable to read the encrypted web content. So, although the RDC itself may be compromised, this does not compromise the underlying secure exchanges.
A critical feature of our architecture is that it does not require the personal device to establish a separate Internet connection to the web server. Instead we tunnel data sent between the web server and the personal device over the PC's existing Internet connection, relying on the RDC Agent to demultiplex these two logical channels. This model offers a number of benefits over the “two separate Internet connections” approach: (i) it provides a better user-experience, since the low-latency direct connection between the personal device and PC means that the two devices can be kept in tight synchronization with each other; (ii) it does not require the user to incur the extra cost of a separate Internet connection for their personal device—e.g. over GPRS or 3G; (iii) the architecture is applicable to personal devices that do not support Internet connectivity but still provide direct, point-to-point data connections, e.g. a PDA with a USB link; and (iv) it enables tight integration with client-side functionality such as tabbed browsing: when the user clicks on a different browser tab on the PC, the RDC Agent traps this event and updates the screen of the user's personal device accordingly.
We assume that the user's personal device is free of crimeware and that attackers therefore have no means of either recording the contents of its screen or data entered via its keypad. HTML received via the PC is rendered faithfully in the personal device's browser, and user-input performed via the personal device's keypad is relayed correctly back to the PC.
### 3.2 Security Policy Model
As outlined in subsection 1.2 we address the threat model presented above by migrating security-sensitive parts of the interface to a trusted personal device. However, in order to benefit from the security provided by this browsing model, a split-trust web application must satisfy the following five properties:
1. The end-to-end communication channel between the web server and the trusted personal device must be authenticated and encrypted. This prevents an attacker from snooping traffic between the web server and the phone. It also prevents an attacker from maliciously injecting fresh data into this channel.
2. All security-sensitive form fields must be filled in via the trusted personal device. Combined with Property 1, this prevents the untrusted PC from snooping any security-sensitive data entered by the user.
3. All security-sensitive information must be displayed only on the trusted personal device. Combined with Property 1, this prevents the untrusted PC from snooping any security-sensitive information served by the web application.
4. The web application must not allow form submissions from the trusted device to be replayed. This prevents an attacker from maliciously re-using previous security-sensitive form data entered on the personal device in subsequent transactions.
5. All security-critical operations must be initiated (or confirmed) via a form on the trusted personal device. Further, there must be sufficient information displayed on the personal device's screen to specify fully the action being initiated. Combined with Properties 1 and 4, this ensures that crimeware on the untrusted PC cannot subversively initiate an unauthorized security-critical operation (e.g., a money transfer in an on-line bank) without alerting the user.
Properties 1 to 3 are self-explanatory; however, Properties 4 and 5 require further elaboration. We will consider these properties in reverse order, starting with Property 5.
### 3.3 Property 5
The first part of Property 5 is straightforward: security-critical operations must be initiated or confirmed via the trusted personal device. The motivation for this is clear—by forcing security-critical operations to be confirmed on the trusted personal device, the untrusted PC cannot subversively initiate such operations without alerting the user.
The second part of Property 5 is more subtle and protects against a class of attacks highlighted by Balfanz et. al. [6]. To understand its purpose, it is first helpful to consider the following analogy. Unscrupulous Charlie arrives at Bob's office.
Figure 2 shows our system being used to make a secure e-commerce transaction, using a Motorola E680 cell phone as a trusted personal device. The PC browser is used for non-security-critical tasks: browsing the product catalogue, making selections etc. However, when the user starts to purchase the goods, the form requesting credit card details automatically appears on their cell phone. The user fills in these private details via their cell phone's keypad and selects “submit” from their phone to make the purchase. Crimeware running on the PC is not able to read the content displayed on the phone; nor is it able to snoop the user's key-presses to steal their credit card details.
Although, for the sake of simplicity, this paper assumes that web applications have been written explicitly to support split-trust browsing, the architecture described could be layered on top of existing applications via HTML-rewriting proxies. The design of such proxies and mechanisms for specifying the required transformations is a topic of future work.
### 3. SECURITY MODEL
In order to explain the motivation behind our trusted browsing architecture we first present our threat model and security policy model [3].
#### 3.1 Threat Model
Attackers' motivation is to steal private and confidential information, often with a view to committing identity theft and fraud. We assume that attackers are capable of using crimeware to mount both passive monitoring attacks and active injection attacks against the PC. Passive monitoring attacks include recording everything shown on the PC's display, typed on the PC's keyboard and transmitted over the network. Active injection attacks include injecting malicious data packets into the network, injecting malicious data packets into the direct connection to the personal device and also injecting fake User Interface (UI) events into the PC (e.g. spoofing a click on a hyperlink, or spoofing key-presses to fill-in and submit an HTML form). Further, we assume that the PC-based browser is untrustworthy. For example, crimeware running on the PC may cause the browser to silently redirect the user to an attacker's web site, or to maliciously generate/rewrite HTML (e.g. modify link/form targets, add/remove content).
and says “please sign the following authorization to transfer $100 from your bank account to Alice's bank account.” However, while saying this, he hands Bob a piece of paper which says only “I authorize the money transfer.” Bob signs the paper and Charlie takes it to the bank. As he passes it to the cashier he says “here's the authorization to transfer all funds from Bob's bank account to my bank account.” The cashier checks Bob's signature and performs this transfer. The security flaw here is obvious: the authorization slip is not specific enough: as a result Charlie is able to fool Bob into believing it means one thing, whilst fooling the bank that it means something else.
Unless web applications specify confirmation dialogues for security-critical operations carefully, there is a direct analog of this attack that can be played out in a split-trust browsing scenario. Consider the following example. An on-line bank's web server generates an HTML page which is rendered on the untrusted PC's browser and contains two links: one with text “click here to transfer $100 to Alice's bank account”, and one with text “click here to transfer all funds to Charlie's bank account”. The browser on the untrusted PC has been subverted so that it maliciously swaps the link targets over: the link with text “transfer $100 to Alice's bank account” now points to the action of transferring all funds to Charlie's bank account and vice-versa. The user clicks on one of the links and, in accordance with the first part of Property 5, a confirmation form appears on the screen of their trusted personal device asking them to authorize the money transfer. It is now clear why the text of the confirmation must “specify fully the action being initiated”. If the confirmation is under-specified--e.g., if the text reads only “please confirm money transfer”--then the user is not alerted to the attacker's ploy of swapping the link targets. However, if the confirmation is specified fully--e.g. the text reads “please confirm the transfer of all funds from your account to Charlie's account”--then the user is immediately alerted to the fact that the action currently being performed is not the action they thought they had initiated. The user thus decides not to confirm the action and no money is transferred.
3.4 Property 4
We now turn our attention to Property 4, which specifies that a web application must not allow form submission messages from the trusted personal device to be replayed (i.e., a web application must not accept data arising from the same form submission action more than once). To see why this is important, consider the following attack. An on-line banking system sends a form to a user's trusted personal device asking them to confirm a money transfer to Alice's account. When the user submits the form (via their trusted personal device), the (untrusted) PC records the resulting submit message. Although an attacker cannot read the contents of this message (since Property 1 requires that it is encrypted with a key known only to the personal device and the web server), they can nonetheless replay it in response to a subsequent transaction. Thus, an attacker may maliciously initiate another money transfer to Alice's account (e.g. by spoofing a click-event on the “transfer money” link in the untrusted PC's browser) and then replay the user's previous confirmation message in order to complete the transfer.
Without Property 4 an attacker could thus circumvent our requirement that users explicitly confirm every security-critical operation. This is why the explanatory (non-italic) text of Property 5 observes that it is only when “combined with Property 4” that it ensures “crimeware on the untrusted PC [is prevented from] initiating any unauthorized security-critical operations”.
4. TECHNICAL DETAILS
We have built a prototype split-trust browsing framework using a commercially available cell phone (Motorola E680) as a trusted personal device. Our prototype uses Bluetooth [8] for a wireless connection between the TPD and untrusted PC, relying on the Bluetooth PAN profile to provide IP connectivity over the Bluetooth link; the TPD components are designed for embedded Linux (the operating system running on the Motorola E680). In this section we present the technical details of our implementation. Figure 3 shows the main components of the system. The Firefox browser runs on the untrusted PC with the RDC Agent implemented as a Firefox Browser Extension [9]. The cell phone runs a simple eHTML [17] browser which has been implemented as a Java MIDlet.
On initiating a split-trust browsing session, a user connects their cell phone to the PC using a local communication technology of choice: e.g. USB, Bluetooth, or WiFi. They execute our extended Firefox browser on the PC and start surfing. As usual, regular (non-split-trust) web sites appear entirely on the PC. However, if the user visits a web-application that supports split-trust, then security-sensitive parts of its interface automatically appear on their cell phone.
The HTML fetched from a split-trust web application contains (i) regular content, rendered on the PC as usual; and (ii) a number of AES-encrypted [10], Base64 [16] embedded messages. Each of these messages contains eHTML content that may ultimately appear on the personal device's screen. The RDC Agent, running inside Firefox, extracts embedded messages from the received HTML and forwards them to the phone over HTTP (see Figure 3).
The cell phone runs a local HTTP Daemon that receives an HTTP Request from the RDC Agent and, via CGI scripts, passes the embedded message contained within it to the Crypto Layer. There it is decrypted before being rendered in the phone's browser. The Crypto Layer is also responsible for encrypting the contents of form fields filled-in on the cell phone before this
data is sent back to the RDC Agent on the PC. To simplify user-interface issues the phone’s browser does not allow hyperlinks; instead, all hyperlinks reside on the PC-side interface.
In the remainder of this section we describe the architectural components outlined above in more detail. We start by showing how messages for the personal device are embedded into regular HTML pages (Section 4.1); we then describe the implementation of the RDC Agent (Section 4.2) and briefly outline the design of the components running on the cell phone (Section 4.3). For simplicity, our initial description of the system does not consider the splitting of HTML forms. The details of how form fields can be split between the PC and personal device are described separately (Section 4.4). Finally, we present a performance evaluation of our implementation (Section 4.5).
4.1 Embedding Split-Trust in HTML
Figure 4 shows an example HTML page that may be served by a split-trust-enabled web application. A single meta tag with attribute name="split-trust-browsing" specifies that this page contains embedded messages destined for a trusted personal device. By examining the contents of form rdc-data one can see that the page contains 3 such embedded messages, each stored in the value attribute of a hidden field. On loading the page Firefox renders the HTML in the usual way, displaying the the two tags on the PC’s screen. (Since the messages for the personal device are embedded in hidden form fields they do not affect the page layout.)
The name attribute of a message's enclosing form field specifies the event that the message is associated with. For example, in the page shown in Figure 4, the message contained within the field entitled rdc-onLoad-msg is forwarded to the personal device as soon as the browser has finished loading the HTML. Names prefixed “rdc-onClick” are reserved for messages triggered by click events.
In Figure 4 the message contained in the field entitled rdc-onClick-0-msg is associated with the link defined by the tag with name rdc-onClick-0. Similarly, message rdc-onClick-1-msg is associated with link rdc-onClick-1. When the user clicks on a link, the RDC agent checks if there is an associated message and, if there is, forwards it to the trusted personal device. Although not shown in Figure 4, other names refer to different types of events. For example, we could have named a link rdc-onMouseOver-3 and provided a corresponding message entitled rdc-onMouseOver-3-msg.
4.2 RDC Agent
We implemented the RDC Agent as a Firefox Browser extension, writing it in a combination of JavaScript [13] and XML [33]. Whenever a page is loaded the RDC Agent first checks for the presence of the split-trust-browsing meta tag (see above). If this is not found the RDC Agent stops processing immediately, ensuring that the extension does not degrade the performance of non-split-trust sites. If the meta tag is present, the Browser extension uses the DOM API [13] to check if there are any tags prefixed rdc-. For each of these tags an event listener is added with a callback function that forwards its associated message to the personal device. Finally, if there is a form field named rdc-onLoad-msg then the message it contains is forwarded to the personal device immediately.
4.2.1 Authentication and Key Exchange
A prerequisite to transmitting encrypted messages between the web server and the personal device is the negotiation of a session key between these two parties. Several existing Internet standards define secure key-exchange mechanisms, such as SSHv2 (rfc4253) [32], IKE (rfc2409) [15] and SSL/TLS (rfc2246) [12]. Our current implementation uses SSHv2 authentication/key-exchange, specifically diffie-hellman-group1-sha1 with RSA host keys. We did not use the SSHv2 Diffie-Hellman Group Exchange mechanism due to the
additional round-trip of packets required, but this can easily be added for increased security if desired. The RDC Agent acts as a coordinator for the authentication/key-exchange process.
A split-trust web application initiates key-exchange and authentication by serving an HTML page containing a meta tag with name="kex-init". The RDC agent detects the presence of this tag and sends an HTTP Request (R1 in Figure 5) to the personal device requesting its first key exchange message. The RDC Agent receives M1, contained in the body of the HTTP Response, and forwards it along as a new HTTP Request M1' which is sent to the web server. The web server responds with its key exchange reply M2, which the RDC Agent forwards as M2' to the personal device via another HTTP Request. The response is sent back to the PC via R2, and the processes continues. Thus, by making alternate HTTP Requests between the personal device and the web server, the RDC agent co-ordinates the flow of cryptographic messages necessary for key exchange (the dotted lines of Figure 5). Note that the full diffie-hellman-group1-sha1 protocol requires a third message that, due to space constraints, is not shown in Figure 5.
When the phone has authenticated the web server (verifying the host-key by means of a certificate) it displays a confirmation dialogue on its screen informing the user of the web server's identity and asking if they want to proceed. Thus, if crimeware on the PC has silently redirected the browser to an attacker's site, this fact will be revealed to the user via their trusted personal device. (Redirection attacks will be considered more deeply in Section 5.1).
The value attribute of the kex-init meta-tag contains a continuation URL akin to a form's action attribute. When the key exchange/authentication protocol has been completed, the RDC Agent redirects the browser to this URL. In this way a web application can request a key exchange and then, once a session key, SK, has been established, redirect the browser to show a new split-trust page in which embedded messages are encrypted with SK. Note that key exchange is not limited to the start of a split-trust browsing session: the web server can request a new session key at any time by means of a kex-init meta-tag.
4.3 Components on the Cell Phone
We implemented a prototype Crypto Layer for the cell phone (see Figure 3). The multi-precision Modular Exponentiation required for the key exchange/authentication protocol relies on the open source GNU Multi-Precision Arithmetic library (libGMP), which we cross-compiled for the phone. An open-source AES reference implementation was also cross-compiled for the phone in order to decrypt messages received from the RDC Agent and the encrypted phone-based user input.
For technical reasons we were unable to interface our system with the phone's built-in browser; instead, we implemented a simple cHTML browser as a Java MIDP Application in order to display content on the cell phone. The Java browser interfaces with the (native) Crypto Layer via a loopback TCP connection. The implementation of the phone's browser is made considerably easier by the fact that hyperlinks are not permitted on the personal device (see Section 3).
4.4 Dealing With Forms
So far we have seen how a split-trust web application can embed encrypted content in HTML pages, and how the RDC Agent running on the PC can forward this content to be displayed on the cell phone when specific events occur. Here we show how this framework can be extended to deal with split HTML forms in which some fields are displayed on the PC while others appear (and are filled in) on the cell phone.
When the user clicks on the <a> tag named <rdc-onClick-0> (on their PC) the RDC Agent forwards rdc-onClick-0-msg to the personal device in the usual manner. This message can contain a mix of cHTML content and form fields which are rendered in the phone's browser. If, after decrypting a message, the phone finds that it contains form fields, it relays this information back to the RDC-Agent in its HTTP Response (see Figure 3). This triggers the RDC-Agent to poll the phone for the user's response (via repeated HTTP Requests).
The user fills in the form fields via their phone's keypad and selects “Submit” in their phone's browser. The Crypto Layer, running on the phone, encrypts this user input and returns it to the RDC-Agent in an HTTP Response. When an encrypted response is received, the RDC Agent inserts it into the value attribute of field rdc-onClick-* -response (see above). Thus when myForm is submitted, the web application receives data entered on the cell phone via the contents of this field.
Of course, the untrusted PC may maliciously swap the encrypted messages in the rdc-onClick-* -response fields before submission. To protect against this attack the encrypted message generated by the personal device actually contains a set of (<fieldname>, <user-input>) pairs. On receipt of a form input message from the trusted personal device the web application parses both the fieldname and corresponding user input ensuring that, even if messages are swapped by the untrusted PC, the right user input is bound to the right field.
A single form can contain fields displayed on both the PC and the phone. In the above example, myForm could contain regular (i.e. not hidden) fields which would be rendered by the Firefox Browser in the usual way. On submitting the form, the web application thus receives the values of those fields entered on the PC, as well as encrypted form response messages from the personal device.
4.4.1 Form Submission
There are two alternative mechanisms of submitting split-trust form data back to the web server. First, an application can specify that a form should be submitted by means of a “submit” button displayed on the PC's browser. This is achieved by simply adding a regular submit button to the HTML above.
Second, an application can instruct the RDC Agent to submit a form automatically as soon as a response is received from the phone. In the above example the web application can request this behavior by including:
\[
\text{<field type="hidden" name="rdc-onClick-0-submittype" value="automatic">}
\]
Automatic submission is ideal for scenarios such as phone-based login: as soon as a username and password are entered and confirmed on the phone's keypad the web-application proceeds to the next page. In contrast, manual submission (via a button on the PC's browser) is often suitable for pages that contain multiple phone-based forms. In this case users can fill in each of the forms on their cell phone before finally clicking submit in the PC's browser to transmit all this data back to the web application.
For each phone form, a web-application can also include a corresponding status element, displayed on the PC (e.g. \(<p name="rdc-onClick-0-status">\)). When the RDC Agent forwards a form specification to the phone, it simultaneously updates the innerHTML property [13] of the corresponding status element (rendered on the PC) to inform the user that the form is “currently being edited on the phone”. Similarly, when a user response is received, the status element is updated to notify the user that a “form submission has been received from the phone”.
4.4.2 Avoiding Replay Attacks
Recall that Property 4 of our Security Policy Model (Section 3.2) requires that form data entered via the phone must not be subject to replay attacks. To enforce this property we require that each encrypted form specification served by the web application contains a fresh nonce [28] and a timestamp. The phone's browser automatically copies this information into its encrypted form response message. On receiving a form response message the web application decrypts it and then checks (i) that it has not seen the nonce before; and (ii) that the response is timely.
4.5 Performance Evaluation
To assess the performance of our implementation we measured the latency incurred between a user performing an action (e.g. clicking on a link) and an associated 850 byte message appearing on the phone's screen. The message is encrypted using AES with a 1024-bit key and Base64 encoded; our choice of 850 bytes is very much worst case–we expect most messages sent to the phone to be significantly smaller than this.
Our PC was a 2.5GHz Pentium 4 with 512MB RAM; our trusted personal device was a Motorola E680 smart phone, which has a 400MHz Intel XScale (Bulverde) Processor and 32MB RAM / 32MB Flash. Each of the measurements were averaged over 20 trials. As shown in Figure 7, the latency of each of the components of the system is as follows:
1. The time taken between the RDC Agent receiving a UI-event and initiating an HTTP Request containing the message to be forwarded is negligible (invariably less than 1 ms).
2. With the phone connected to the PC via USB, the time taken to send the HTTP Request containing the encrypted 850 byte message to the phone is 0.1s (s.d. 0.01s).
3. The time taken to Base64 decode the message on the phone is 0.2s (s.d. 0.02s).
4. The time taken to AES-decrypt the message on the phone, w/ a 1024-bit key, is 0.38s (s.d. 0.01s).
5. The time taken to send the decrypted message to the Java Browser (over a loopback TCP connection) and to render the content on the phone's screen is 0.2s (s.d. 0.05s).
Thus the average end-to-end latency between the user generating an event on the PC (e.g. clicking on a link) and the corresponding 850 bytes of content being rendered on the phone's screen is 0.88s. Even for this worst-case message size we believe that 0.88s falls within the limits of acceptable latency for web usage models (since it is comparable to the time taken to fetch a page from a web server over the Internet). Since the time complexity of Base64 decoding and AES decryption is \(O(n)\), the latency would reduce linearly with message size.
Furthermore, higher performance processors are filtering into
\[\text{Note that our AES component only performs decryption; it does not check message integrity. Verifying message integrity on the mobile device would incur extra-latency (adding a factor of at most 2 to the measurement reported here).}\]
the design of modern smart phones, which will further decrease the latency of all cryptographic functions.

Regarding the performance of key exchange, using libGMP the Motorola E680 is able to generate a 1024-bit random number and compute a modular exponentiation using Oakley Group 2 Diffie-Hellman Parameters [15] in an average of 0.06s (s.d. 0.004s). Thus the time taken to perform key exchange and authentication is most likely to be dominated by the round-trip-times of the HTTP messages initiated by the RDC-Agent (see Figure 3).
5. ATTACKS AGAINST SPLIT-TRUST BROWSING
In this section we consider a number of attacks against split-trust browsing and consider how well we can defend against them.
5.1 Phishing
Crimeware attacks are different from conventional phishing attacks: whereas the former rely on malicious software running on users' machines (e.g. key-loggers), the latter rely entirely on social engineering, attempting to fool users into unwittingly entering security-sensitive information into attackers' websites. This paper has motivated split-trust browsing primarily as a technique for addressing PC-based crimeware attacks. However, the general split-trust browsing technique can also be leveraged to address conventional phishing. For example, the server may validate the identity of the user by means of challenge/response authentication with their personal device (cf. one-time passwords). Alternatively, we may combine split-trust browsing with a password hashing [26] scheme. In this case, a password entered on the personal trusted device is hashed with some known properties of the website (including its domain name) before being sent back to the server. Both these techniques would make it harder for phishers to obtain reusable credentials.
Another possible phishing-style attack involves redirecting the untrusted PC to a similar-looking domain name and then presenting a valid certificate for the fake domain. Although, at session-initiation time, a message would appear on the trusted personal device asking if the connection should proceed, the user may not spot that the company/domain name is incorrect. They may therefore click continue and unwittingly connect to the attacker's server.
This is a general problem with certificate-based authentication that we do not claim to have solved. However, as a side note, we observe that we can leverage users' mobile devices to make physical certificate exchange practical. For example, we may forbid the trusted personal device from accepting any certificates over the network. Instead, users may present their trusted personal devices at trusted retail outlets and high-street banks in order for the companies' certificates to be physically uploaded. Although this makes the system more cumbersome to use, it does give users reason to believe that the certificates on their device are only from reputable companies, addressing the redirection problem.
5.2 Active Injection Attacks
Since we assume that the PC may be entirely compromised, crimeware has the capability to rewrite the HTML in the PC's browser—e.g. swapping link targets around, adding new links, modifying text. We address this issue with reference to our Security Policy Model (Section 3.2). From points 4 and 5 of the Security Policy Model we know that even if the user is fooled into initiating a security-sensitive operation due an HTML-rewriting attack, all that will happen is that a fully-specified confirmation dialogue appears on their trusted personal device. If the user does not confirm the action via the trusted personal device the web-application will not carry it out. Similarly, since points 1-3 of the Security Policy Model require the web application to encrypt all security-sensitive content, an HTML rewriting attack cannot cause this information to be revealed.
The problem of active-injection (see Section 3.1) is dealt with in the same way. If crimeware on the untrusted PC maliciously attempts to initiate a security-sensitive operation (say, by spoofing a click on a hyperlink) then our Security Policy Model dictates (i) that no security-sensitive operations will be performed without first requesting confirmation via the trusted personal device; and (ii) that, since security sensitive information is always encrypted, it will not be revealed.
Another form of HTML rewriting attack relates to form submission. In this case the untrusted PC may maliciously put an encrypted user-input message received from the personal device into the wrong form field before completing a form submission (see Section 4.4.1). The aim of this attack may be to fool the web application into binding the wrong piece of user-input to the wrong form field. Recall that (from Section 4.4.1) that we deal with this attack by ensuring that encrypted user-input messages generated by the trusted personal device contain \(<\text{fieldname}, \langle\text{user-input}\rangle>\) pairs, which are parsed by the web application. Since crimeware on the untrusted PC cannot change logging attacks. Thus we would implement password-hashing on the trusted device.
---
2 Although password-hashing can be implemented directly on the untrusted PC [26] this does not protect against OS-level key
the content of the encrypted messages it cannot cause the wrong piece of user-input to be associated with the wrong form field. Also, in accordance with point 4 of our Security Policy Model, we ensure that crimeware cannot replay form submissions (see Section 4.4.2).
It is worth observing that attacks against the RDC Agent directly are really just special cases of HTML-rewriting/active injection attacks.
5.3 Message Re-Ordering Attacks
A major difference between our architecture and conventional secure transport protocols (such as SSH [32]) is that we do not embed sequence numbers in encrypted messages. A man-in-the-middle (including, of course, crimeware on the untrusted PC) is thus able to re-order the messages in transit between the web-application and the trusted personal device.
Our omission of a sequence number is quite deliberate; it would provide no additional security in the context of our architecture. The reason for this is because crimeware on the untrusted PC is already capable of mounting active-injection attacks. Why bother to preserve the order in which packets were sent by the web-application when the order in which they were requested can be spoofed so easily? Instead, we observe that message re-ordering attacks are just a subset of HTML rewriting and active-injection attacks, and address them in the same manner: not by preventing them from happening, but by designing web-applications in such a way that it does not matter if they do happen--i.e. with reference to our Security Policy Model.
As a brief aside, note that one may propose an alternative split-trust web-browsing framework in which all clicks on hyperlinks are initiated (or somehow confirmed) on the personal device. In this context, SSH-style sequence numbering would provide some value, since the order in which the web-application sends its messages is worth preserving. However, the downside of this scheme is that the requirement to initiate/confirm all clicks via the personal device would make the system cumbersome to use. Thus, we argue that our Security Policy Model finds a sweet-spot on the security-usability spectrum for split-trust applications.
5.4 Social Engineering Attacks
Split-trust browsing requires users to understand a simple principle: trust your personal device, not the PC. However, attackers may conspire to make users doubt this principle causing them (say) to unwittingly confirm a security-sensitive operation via their trusted personal device.
For example, the untrusted PC may perform an HTML-rewriting attack, maliciously adding the text “you will now see a confirmation dialogue appearing on your personal device; please click confirm”. At the same time, it may use an active-injection attack to initiate a security-sensitive operation. The question is, when the confirmation dialogue appears on their personal device, will users remember the “trust your personal device, not the PC” principle, or will they be fooled into clicking on confirm?
We believe that this type of attack is dangerous--the success of phishers suggests that some users will always be duped by this kind of ploy. However, although split-trust browsing is not fool proof against attacks of this nature, it still raises the bar. Without split-trust browsing, an active-injection attack perpetrated by crimeware running on the PC would simply result in a security-sensitive operation being performed--the user would not have any chance to prevent it. With split-trust browsing crimeware has to simultaneously initiate the security-sensitive operation and successfully fool the user into OK-ing the fully-specified confirmation dialogue on their phone.
Extensive user testing is required to determine how users of split-trust web applications may respond to this type of attack.
6. RELATED WORK
The idea of simultaneously using multiple devices to access applications and data has been explored extensively by the research community [21, 25]. Our work adopts these ideas, using them to protect against PC-based crimeware attacks. We are also influenced by the Situated Mobility [24, 31] and Parasitic Computing [22] models of ubiquitous computing, in which small mobile devices (e.g. cell phones) co-opt computing resources already present in the environment (e.g. public screens) to facilitate interaction with their users.
In the first author’s previous work [29], split-trust is applied at the framebuffer level of a thin-client/server system. In that framework it is possible for the user to censor information on the public terminal, using the (smaller) display of the trusted device as a “lens” to “reveal” parts of the censored display. This paper explores a different level of abstraction at which the user interface can be split: namely, the HTML level. The advantage of the framebuffer approach is that users can run unmodified desktop applications; the benefit of the approach described in this paper is that we can exploit the additional structure of HTML (as opposed to pixels) to provide a more natural user-interface split between trusted and untrusted devices.
Balfanz and Felton demonstrated the idea of splitting an application between a trusted PDA and untrusted PC in the context of email signing [6]. In this paper we extend their idea, presenting a general architecture for split-trust web applications.
Ross et al. developed a web-proxy which detects security-sensitive words and phrases in HTML content, replacing them with code-words. Users can simultaneously connect their PDA to the proxy in order to download a mapping from code-words back to their original text [27]. Ross’ work does not allow HTML to be split generally and, most critically, does not allow data-entry to be performed via the PDA; as a result his system does not protect against key-logging and active injection attacks. We believe our architecture for splitting HTML generally, our ability to migrate user-input to the trusted personal device to avoid PC-based key-logging attacks, and our Security Policy Model for generalized split-trust web-applications is a significant advance on Ross’ work.
Ross‘ web-proxy [27], and other previous work on split-trust architectures [23] require the personal device to open a dedicated Internet connection to a trusted server. In contrast, one of the interesting aspects of our split-trust framework for web applications is that we are able to embed encrypted messages in the untrusted PC’s HTML, relying on the RDC Agent to de-multiplex these two logical channels. Although it does not affect the security properties of the system, for the
reasons stated in Section 3.2, we believe that this approach leads to significant usability benefits.
Recently researchers have considered an alternative to split-trust applications in which a trusted mobile device is used to establish the trustworthiness of a public terminal [14]. This provides a usage model whereby, once the trustworthiness of the public terminal has been established, one proceeds to use it exclusively, without looking at the trusted personal device again. Whilst this may present some usability advantages, the disadvantages of this approach are (i) the public terminal requires a Trusted Platform Module (thus exposing the architecture to the general problems surrounding TPMs [2]); and (ii) since only the integrity of the software is verified, these systems do not protect against hardware attacks (e.g. keyloggers in compromised keyboards). A hybrid approach is presented in [20] in which a mobile device is used to both verify the integrity of software running on an untrusted terminal, and to facilitate secure input.
Previous work on split-trust systems [6, 23, 27] has not considered how applications may be written to minimize trust in the client PC. We believe that our Security Policy Model is an important contribution in this respect. Whereas Oprea et. al. admit that they are forced to "trust the client PC to a certain extent" [23], our Security Policy Model demonstrates that it is possible to design split-trust applications that put no trust whatsoever in the client PC.
Recent advances in mobile device technology make it possible for users to browse the web conveniently and effectively using solely their phone or PDA. This fact does not undermine the utility of the split-trust model, however, since there will always be many situations where one would prefer to browse the web on a full-size desktop PC as opposed to on a mobile device (e.g. whilst at home or at work)–the split-trust model applies to these scenarios. Also, as mobile devices become increasingly complex they necessarily become less trusted, until ultimately one requires a separate, simpler TPD to use in conjunction with their phone or PDA! This scenario is actually less ridiculous than it first appears since the simpler TPD could be embedded within the form-factor of the mobile device itself. In this model special purpose hardware could even share the screen and keypad between the TPD and (untrusted) Application Processor (AP) in such a way that (i) the keypad/display is only accessible to either the TPD or the AP at any given time; and (ii) the user is given clear, unspoofable indication of this modality (e.g. an LED connected directly to the display/keypad switching circuitry). A hybrid embedded in the same casing as an otherwise untrusted personal device may either be used in conjunction with desktop PCs (as described in this paper), or in conjunction with the regular web browser on the personal device itself.
7. DISCUSSION
This section considers several aspects of the split-trust browsing model to clarify premises & considerations during the planning of this project.
7.1 What Makes a Personal Device Trusted?
Ideally, one could imagine designing and manufacturing trusted personal devices specifically for split-trust browsing. Such devices could be technically very simple supporting only basic I/O capability, a data-link technology that enables direct connection to a PC (e.g. USB or Bluetooth), cryptographic functionality and a stripped down cHTML browser. A security-focused design from the outset, combined with its technical simplicity could make such a product a significantly more trusted platform than a modern general purpose PC.
From a more pragmatic perspective, some security researchers claim that some existing cell phones and PDAs already provide a more trusted computing platform than general purpose PCs [6, 23]. In particular: (i) users only rarely install privileged applications on their phones reducing the risk of Trojan-based crimeware; and (ii) whereas it is often easy for attackers to gain physical access to PCs in order to install crimeware, it is much harder to gain physical access to a user's cell phone.
Thus, whilst the best trusted personal devices would be designed specifically for that purpose from the outset, we believe that, in the short term, users could still benefit from split-trust browsing with their existing PDAs or cell phones. (We note that the architecture presented in this paper is applicable regardless of the implementation details of trusted personal devices.)
A number of manufacturers are starting to incorporate hardware into cell phones specifically to provide strict process isolation and to manage encryption keys/private data [1, 11]. We see this as a promising sign, suggesting that security is increasingly being seen as an important aspect of mobile computing devices. Such technology has the potential to isolate trusted mobile applications (such as application-support required for split-trust browsing) from the effects of mobile phone viruses [7] and malicious code.
7.2 Generalizing Our Architecture
The architecture presented in Section 4 is just one of a number of possible implementation alternatives, each with their own advantages and shortcomings. For example, we may have chosen to implement the RDC Agent as an HTTP proxy that runs as a native process on the PC. This has the benefit of enabling one RDC Agent to work with multiple browsers; however, it makes it more difficult for the RDC Agent to respond to user-interface events occurring within the browser.
Similarly, we may have chosen a different embedding strategy for messages destined for the trusted personal device, or a different mechanism for co-coordinating key exchange. The purpose of Section 4 is thus not to present the definitive architecture for split-trust browsing, but instead to demonstrate that such an architecture can be built on top of existing infrastructure whilst achieving acceptable performance.
3 Many phone applications that users install are sandboxed Java MIDP applets that are not capable of general key-logging or screen-grabbing.
4 For example, the RDC Agent presented in Section 4.2 works well with Firefox's tabbed browsing—when the user clicks on a different tab, the RDC Agent traps this event and forwards the new page's rde-onLoad-msg to the user's personal device.
There are a number of ways that the architecture presented in this paper could be generalized. For example, in its current form, the trusted personal device only stores one session key at a time; thus, when a new split-trust session starts, the previous one is automatically closed. To avoid this we could borrow from SSL client design, enabling the trusted personal device to maintain a table of active session keys indexed by the domain of the current URL.
There are also a number of places where the mechanism for splitting content between PC and personal device could be generalized. For example, our current implementation does not allow images to appear on the personal device. This functionality could be added (say) by allowing image data to be embedded directly in the cHTML forwarded to the personal device. Similarly, one may wish to allow hyperlinks to appear on the trusted personal device (a feature which our current architecture does not allow). Of course, it is unclear whether these generalizations would have a positive or a negative effect on the overall usability of the system. Further research is required to answer such questions.
### 7.3 Usability Issues
Although this is primarily a mobile systems-security paper, there were some usability issues that came to light during our implementation work which we choose to document here.
On the PC screen there is a clear need to visually differentiate between links that cause new content to appear on the PC and links that cause new content to appear on the personal device. To address this issue we used a style-sheet that defined a class of “personal device link”, rendering them with a highlighted background. A web application uses the class attribute to mark these links (see Figure 4).
The factor that we found made the most significant difference to usability is at first a seemingly trivial concern: the ability to stick the personal device on the side of the PC monitor. This enables both hands to be free for mouse/keyboard input; furthermore, the proximity between the PC display and the phone display enables the user to keep them both in their peripheral vision simultaneously. As a result, the user experiences virtually no overhead in managing the two displays: instead, they are able to treat the two logical displays as one.
### 8. CONCLUSIONS
Crimeware is becoming a serious problem, threatening to take over from phishing as the dominant form of cyber-crime in the not too distant future [5]. The web's security model (HTTPS/SSL) protects data as it is transmitted between client and server, but does not prevent crimeware attacks in which the end-points themselves are compromised. In this paper we have proposed an architecture for split-trust browsing through mobile composition that allows users to combine their PC with a trusted personal device to fight crimeware (Section 1).
Our architecture requires web services, public terminals and mobile devices to run special software to facilitate split-trust browsing. Installing the required application on the mobile device is unlikely to pose a problem; neither is installing the software on the untrusted terminal (we have shown this can be packaged and distributed in the form of a browser plug-in). However, the fact that service providers also have to modify their applications is a likely barrier to adoption. To address this issue, an interesting topic of future work would be to implement HTML-rewriting proxies that impose a split-trust policy over unmodified web applications. A simple and generic example of such a proxy would be one that sent all password fields in HTML forms to the mobile device while leaving the rest of the HTML unmodified. More complicated split-trust policies could be written programatically for particular applications, perhaps in a policy-language designed specifically to express split-trust transformations. Of course, such proxies would have to be trusted.
In future work we would like to perform usability testing around the split-trust model. We believe it would be particularly interesting to evaluate the impact of some of the social-engineering attacks against split-trust browsing (see Section 7.4).
As we have discussed in Section 7 split-trust web browsing is not a panacea. However, we do believe that it has the potential to provide consumers with a significantly greater degree of security in the face of ever-increasing crimeware and phishing attacks. Of course, our system delivers value proportional to the security of the trusted personal devices employed. It is our hope, therefore, that by presenting application scenarios for secure mobile computing, split-trust research motivates vendors to incorporate security-enhancing technologies (e.g. ARM's TrustZone [1] and Intel's Mobile IA [11] into personal devices.)
### REFERENCES
|
{"Source-Url": "http://www.roywant.com/cv/papers/pubs/2008-06%20(ACM%20MobiSys08)%20Secure%20Browsing%20.pdf", "len_cl100k_base": 11985, "olmocr-version": "0.1.53", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 38659, "total-output-tokens": 13110, "length": "2e13", "weborganizer": {"__label__adult": 0.00087738037109375, "__label__art_design": 0.0015306472778320312, "__label__crime_law": 0.0125579833984375, "__label__education_jobs": 0.0017175674438476562, "__label__entertainment": 0.00034928321838378906, "__label__fashion_beauty": 0.00042176246643066406, "__label__finance_business": 0.0011682510375976562, "__label__food_dining": 0.0003643035888671875, "__label__games": 0.0029163360595703125, "__label__hardware": 0.02392578125, "__label__health": 0.0010051727294921875, "__label__history": 0.0006909370422363281, "__label__home_hobbies": 0.0002722740173339844, "__label__industrial": 0.0010824203491210938, "__label__literature": 0.0005030632019042969, "__label__politics": 0.0005803108215332031, "__label__religion": 0.0006508827209472656, "__label__science_tech": 0.369140625, "__label__social_life": 0.0001780986785888672, "__label__software": 0.1763916015625, "__label__software_dev": 0.40234375, "__label__sports_fitness": 0.0003654956817626953, "__label__transportation": 0.0007462501525878906, "__label__travel": 0.0002111196517944336}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 60497, 0.02551]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 60497, 0.23405]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 60497, 0.91671]], "google_gemma-3-12b-it_contains_pii": [[0, 4854, false], [4854, 10730, null], [10730, 15955, null], [15955, 21832, null], [21832, 25684, null], [25684, 29888, null], [29888, 35914, null], [35914, 41384, null], [41384, 47987, null], [47987, 54387, null], [54387, 60497, null], [60497, 60497, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4854, true], [4854, 10730, null], [10730, 15955, null], [15955, 21832, null], [21832, 25684, null], [25684, 29888, null], [29888, 35914, null], [35914, 41384, null], [41384, 47987, null], [47987, 54387, null], [54387, 60497, null], [60497, 60497, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 60497, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 60497, null]], "pdf_page_numbers": [[0, 4854, 1], [4854, 10730, 2], [10730, 15955, 3], [15955, 21832, 4], [21832, 25684, 5], [25684, 29888, 6], [29888, 35914, 7], [35914, 41384, 8], [41384, 47987, 9], [47987, 54387, 10], [54387, 60497, 11], [60497, 60497, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 60497, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-11
|
2024-12-11
|
17731f962875e28fd6efa63412876c66517cb476
|
Design, Implementation, and Evaluation of an Approach for Determining When Programmers are Having Difficulty
Jason Carter
Computer Science Department
University of North Carolina at Chapel Hill
Chapel Hill, NC, USA
carterjl@cs.unc.edu
Prasun Dewan
Computer Science Department
University of North Carolina at Chapel Hill
Chapel Hill, NC, USA
dewan@cs.unc.edu
ABSTRACT
Previous research has motivated the idea of automatically determining when programmers are having difficulty, provided an initial algorithm (unimplemented in an actual system), and performed a small student-based evaluation to justify the viability of this concept. We have taken the next step in this line of research by designing and developing two-different systems that incorporate variations of the algorithm, implementing a tool that allows independent observers to code recorded sessions, and performing studies involving both student and industrial programmers. Our work shows that (a) it is possible to develop an efficient and reusable architecture for predicting programmer status, (b) the previous technique can be improved through aggregation of predicted status, (c) the improved technique correlates more with programmers’ perception of whether they are stuck than that of observers manually watching the programmers, (d) the observers are quicker than the developers to conclude that programmers are stuck, (e) with appropriate training, the tool can be used to predict even the observers’ perceptions, and (f) a group training model offers more accuracy than an individual one when the training and test exercises are the same and carried over a small time frame.
Categories and Subject Descriptors
H.5.3 Group and Organization Interfaces: Computer-supported cooperative work.
General Terms
Human Factors
Keywords
Machine learning, data mining, architecture, software development, semantic awareness
1. INTRODUCTION
Often programmers get “stuck” while coding, unable to make much progress despite all efforts to address some issue. It would be useful if an interested remote party could become aware of this situation through, for instance, a notification and/or status change in a buddy list. This idea extends the notion of continuous coordination [1] to continuous help, and provides a new kind of contextualized information in collaborative software development [2]. An educational setting provides particularly compelling applications of this idea because an important goal is to help students and monitor their progress. In fact, based on the results of several previous studies, mentioned later, the true benefits of this idea could actually occur in industry.
One way to support this idea is to allow programmers to manually change a status field displayed to potential helpers. However, there are several apparent problems with this approach. First, studies show students and new programmers are late to use help [3], and programmers often exhaust other forms of help before contacting a teammate [4]. Even those who are willing to manually change their status are likely to not set it back, just as people forget to change their busy status in an IM tool or turn off the “call steward” light in a plane.
Another approach is to allow a pair of developers to monitor the progress of each other, using local [5] or distributed [6] side-by-side programming. However, this approach does not scale to beyond a pair of programmers, requires continuous monitoring of the partner’s display, and is not guaranteed to succeed as an observer may not know if the actor is, in fact, stuck.
Therefore, a superior approach is to develop a mechanism that automatically determines if a programmer is stuck (Figure 2) by mining logs of their interaction with the programming environment. Such an approach is bound to be iterative, consisting of the following steps:
1. Develop an initial naïve algorithm for predicting the (stuck/not stuck) status.
2. Implement the algorithm in one or more programming environments.
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee.
GROUP’10, Nov. 7–10, 2010, Sanibel Island, Florida, USA.
Copyright 2010 ACM 1-58113-000-0/00/0010…$10.00.
3. Ask selected developers in lab and/or field experiments to correct the predictions made by the current algorithm.
4. Analyze the logs to refine the set of features.
5. Input these features to existing selected log-mining algorithms.
6. If none of these algorithms makes a significant improvement, stop.
7. Make the algorithm that gives the best results the current algorithm.
8. Go to 2.
Our previous work [7] carried out the first iteration of the process, and evaluated the resulting algorithm (not implemented in any programming environment) in a study involving six student programmers, whose logs were used both in the training and evaluation phases. It leaves, however, several important questions unanswered.
1. Is it possible to develop a common set of extensible prediction modules for different programming environments?
2. Is it possible for the modules to have no impact on the response times perceived by the developers?
3. How well does the previous algorithm work when it is used by industrial programmers?
4. Is it better to train the modules using logs of the individual developer whose status is predicted, or some group of programmers that excludes him/her?
5. What is the correlation between the perceptions of the developers and their observers regarding whether the developers are having difficulty?
6. If these perceptions differ, how well can the predictions made by a tool correlate with the perceptions of human observers?
In the rest of the paper, we address these questions. In Section 2, we survey related work providing the inspiration for and techniques used in the paper. In Section 3, we describe the results of a small field study involving a naïve implementation of the previous algorithm, and adaptations to its semantics and implementation to overcome some of the problems exposed by this effort. In Section 4, we describe a lab study involving nine student and five industrial programmers, and a coding study in which two coders and the first author classified recordings made during the lab study using a special tool we built for this work. In Section 5, we describe the results of the study using an existing model for determining if programmers are stuck. In Section 6, we consider what happens when group and individual data from the lab and coding study are used train the tool. In Section 7, we consider privacy issues raised by this work, and present preliminary solutions to them. In Section 8, we discuss our findings and provide conclusions and directions for future work.
2. RELATED WORK
The motivation for encouraging programmers to help each other is provided by a variety of previous research efforts, which have explored various degrees of couplings among developers: distributed, co-located, radially co-located, and pair programming.
Herbsleb et al. [8] found that the productivity of distributed teams was lower than that of co-located teams. A more recent study by Cataldo [9] has similar conclusions based on software quality rather than productivity. It found that the number of errors in a project was positively correlated with the number of locations involved in the project. Teasley et al. [10] studied a higher degree of physical coupling, called radical co-location, in which all team members work in a single war-room or bull-pen. They found that the productivity of radically co-located teams was higher than that of co-located ones. In radical co-location, even though the members of the team work in one room, they (can) use different workstations. Higher physical coupling is achieved in pair programming, wherein two programmers sit next to each other, sharing a workstation, and working on a single task, with one programmer, called the driver, providing input, and the other programmer, called the navigator, offering advice. Some studies of pair programming have found that it offers faster task completion times, and more importantly, after taking into account the cost of fixing bugs, much better productivity [11, 12].
The reason higher coupling offers more productivity may lie in how much developers help each other. Pair programming is centered on the idea of the two programmers helping each other with every aspect of the task. Williams and Cockburn report that “pairs often find that seemingly “impossible” problems become easy or even quick, or at least possible, to solve when they work together [11].” Teasley et al. [10] found that in a war-room, if someone was having difficulty with some aspect of code, another developer in the war-room “walking by and seeing the activity over their shoulders, would stop to provide help.” The study by Herbsleb et al. [8] also showed the importance of a helpful software development. It found that in distributed team development, several forms of communication were more difficult: it was harder to find people, get work-related information through casual conversation, get access to information shared with co-located co-workers, get timely information about plan changes, have clearly formed plans, agree about plans, be clear about assigned tasks, and have co-workers provide help (beyond the call of duty). The study found that the perception of received help was the only factor that correlated with productivity. A related study by Herbsleb and Grinter [13] found that developers are less comfortable asking remote rather than co-located software developers for help. A study by Cataldo [9] found that the number of errors correlated with uneven distribution of engineers across locations, which, together with the other studies, seems to suggest that the team would benefit if a location with more engineers (which is likely to have more expertise, and perhaps, more time) helped the one with fewer engineers.
Together, these studies seem to conclude that (1) developers often hesitate to explicitly ask for help, even when they could use it, and (2) the greater the distance between them and potential helpers, the more their hesitation, and the more difficult it is for the latter to determine if the former need help.
One approach to address the second problem, described in [14], makes distributed team members aware of each other’s interactions with the programming environment. For example, [14] gives a scenario in which Bob, on seeing Alice stuck on debugging a particular class, deduces she could use help, and offers it. This distributed scenario directly mimics the war-room scenario quoted above.
Providing virtual channels that give distributed users the feeling of “being there” in a single location is an important goal of CSCW. However, Hollan and Stornetta have argued that if
CSCW is to be truly successful, it should go “beyond being there” by providing capabilities not available in face-to-face interaction [15].
One approach to support this goal is to automatically infer when people are frustrated using cameras, posture sitting chairs, pressure mouse, and wireless Bluetooth skin conductance test as sensors to collect data [16]. A problem with this approach is the overhead including time and cost of using this extra equipment.
An alternative approach is to determine this information by logging developers’ interaction with the system. An important step in this direction is made in [17], which describes a logging-based tool for monitoring student progress. Student teams use a wiki to interact with several tools including CVS, newsgroups, and a metrics module that analyzes students’ data. The wiki allows students to track their development tasks, and analyzes tasks such as file modifications to measure the workload of teams.
A problem with this approach is that the rate of student progress is determined after the fact, when a project is checked-in, rather than incrementally, when the student could use help. This limitation can be addressed by logging interactions with the programming environment. The authors of [17] said they did not take this alternative because “many students have a preferred programming environment and establishing a common one would be a challenge.”
It is possible to overcome this problem by creating such a logger for as many mainstream programming environments as possible. Before this step can be taken, it is important to determine if such an approach is feasible.
There is reason to believe it can work. Previous work by Begole et al. [18] logged email interaction, calendar appointments, and the locations of users to show that there are rhythms or patterns in user activities. An even closer work to the topic of this paper is work by Fogarty et al [19]. Developers are randomly interrupted by a notification and their interactions with the programming environment are logged. Interruptibility is measured from the time the notification appears to the time the notification is acknowledged. The specific actions developers perform right before they were interrupted are used to determine if these actions correlate with being interruptible.
These approaches represent general methods to mining data, which consists of two main steps: (a) an algorithm for deducing semantic awareness (out of office, interruptible) and (b) a scheme for training the system and evaluating the automatic scheme. Our previous work [7] applied this general approach to the problem of determining progress. This work extended an Eclipse plug-in [24] to log developers’ programming actions and allowed the developers to indicate their status: stuck (which is considered, here, synonymous with having difficulty) and making progress. Based on the event and status logs of six student programmers, it developed the following approach for automatically inferring the status. It categorized user input into five categories: navigation, edit (text insertion/deletion), remove (methods and/or classes), debug, and the programming environment losing/gaining focus. The logs were segmented into sections based on the number of events. Every 50 actions, the tool calculated the ratio of occurrences of each category of actions in that segment to the total number of actions in the segment as percentage, and used these percentages as features over which patterns were identified. This event aggregation technique was used to predict developers’ status. The intuition behind the technique was that when the ratio of edit events to total number of events decreases, programmers are stuck. The approach correctly identified 90% of the time when the students were having difficulty. This result is promising because it recognizes with high accuracy when student programmers are having difficulty even though having difficulty is a rare event. As mentioned in Section 1, this approach left several questions unanswered, which are the focus of this paper.
3. INITIAL EVALUATION AND ADAPTATIONS
To determine how well the technique developed in our previous approach [7] works in practice, we took two additional implementation and evaluation steps. (1) We incorporated the algorithm in both the Eclipse and Visual Studio programming environments. (2) Some members of our research group, and one industrial software developer, used the Eclipse and Visual Studio implementations for their daily work. We gained important lessons from these steps.
The industrial developer complained about frequent false positives while building a new product – a workflow system. In particular, when he started a new session, the tool gave a relatively high number of false positives because of the navigations performed to build the working set of files. He also needed more time to determine if the predicted change of status was correct, and, thus, often was not sure about his status.
The second author identified two additional problems. The cost of processing incremental input events was noticeable, and sometimes intolerable, on his 3-year old laptop. Moreover, even when the tool accurately predicted he was having difficulty, seeing the status message hurt his ego, as he felt that the change in progress was caused by the difficulty of the problem rather than lack of appropriate skills! A final problem had to do with the implementation architecture: the Visual Studio and Eclipse implementations performed the same functions, but did not share code. Therefore, when a change was made to the code in the Eclipse implementation, the code in Visual Studio had to also change. Put in another way, there would need to be a different implementation of the tool per programming environment, which increases programming time and effort.
We took several steps to address these problems. To address the “hurt ego” issue, we changed the status message from “Having Difficulty” to “Slow Progress.” In addition, we allowed developers to customize the message so that the second author could, for instance, report it as “Complex Programming.”
To address the false positives faced by the industrial programmer, we developed a label aggregation technique that complemented the event aggregation technique. As before, we computed the status every 50 events. However, we notified the developer every 250 events – the value reported was the dominant status in the last five segments.
Together, the two aggregation techniques take into account the fact that the status of a developer does not change instantaneously. In addition, we added an “indeterminate” status value to capture the fact that developers need time to decide if they are stuck. At startup, before 250 events were input, the tool reported the indeterminate value. We also allowed the developer to correct a predicted status to indeterminate.
Table 1: Field Study of Industrial Software Developer.
<table>
<thead>
<tr>
<th>Status</th>
<th>Guessed</th>
<th># Corrected</th>
<th>Accuracy</th>
</tr>
</thead>
<tbody>
<tr>
<td>Difficulty</td>
<td>17</td>
<td>2</td>
<td>88%</td>
</tr>
<tr>
<td>Making Progress</td>
<td>69</td>
<td>7</td>
<td>89%</td>
</tr>
<tr>
<td>Indeterminate</td>
<td>2</td>
<td>0</td>
<td>100%</td>
</tr>
</tbody>
</table>
Table 1 shows that the changes resulted in a high accuracy for the industrial developer.
However, the table shows that the aggregation scheme results in a large number of false negatives. In particular, it missed 7 of the 22 cases when the developer was having difficulty. To develop a more accurate scheme, we gathered more data points through a user study.
Before this step can be taken, it was important to address the performance and implementation overhead of the Eclipse and Visual Studio implementations. A reusable architecture is crucial for this research because of its iterative nature. We were able to apply certain standard design patterns and existing libraries to address the reuse issue. To address the performance issue, we offloaded event processing to a separate process that worked asynchronously from the programming environment.
Figure 3 shows the architecture. Naturally, a separate module is needed per programming environment to intercept its events. In addition, a separate module is needed per programming environment to display the current status, which is done by using a Google talk plug-in. Thus, in our implementation we use two different event-interception and status-display modules – one pair for Eclipse, and one for Visual Studio.
An event-interception module asynchronously forwards the events to a separate process, which makes the predictions. As the process was written in C#, serialized events could be sent directly from Visual Studio to this process. Java events, on the other hand, require conversion, and we were able to use standard (WOX and IKVM) libraries to do so.
Consider now the modules in the predicting process. Events are received by the “communication director” of the system, the mediator, which mediates between a pipeline of other modules. The mediator gives the received event to the first module in the pipeline. In addition, it receives output from each of these modules and feeds it as input the next module, if such a module exists.
The first module to receive input from the mediator is the event aggregator module. This module aggregates 50 events and passes these events to the mediator. The mediator passes these events to the feature extractor module, which computes the ratios that are used to predict a status. The feature extractor passes the ratios to the mediator, and the mediator gives these ratios to the prediction manager. The prediction manager includes the decision tree algorithm (used in [7]), which uses previous data and the ratios to predict a status. This status is passed to the status aggregator, which aggregates each status and gives a final prediction to the mediator. The mediator delivers this status to the status displayer of the appropriate programming environment.
The benefit of using the mediator pattern is that it allows modules to be loosely coupled so that any change in the flow of communication would not require a change to a module. For example, if the status manager had to be omitted, the mediator would have to change. However, the other modules in the system would stay the same.
The iterative nature of this research requires the ability to easily change also the behavior of each of the individual modules in this pipeline. We used the standard Strategy pattern to achieve this goal. We give below specific uses for it in our context by considering each of the phases in the pipeline, and showing that multiple algorithms could be used in each phase.
1. Event aggregator: There are at least two algorithms that can be run to aggregate events. The current algorithm uses discrete, independent chunks of 50 events. An alternate option is to use a gradual sliding window approach similar to the approach used in TCP/IP. The code below shows the use...
Figure 4: Video Coding Tool
do the strategy pattern to easily switch between the two, assuming both are implemented:
```java
EventAggregator ea = new EventAggregator();
//ea.setEventAggregationStrategy(new SlidingWindow());
//ea.setEventAggregationStrategy(new DiscreteChunks());
```
2. Feature extractor: We currently extract features based on the number of events. For example, the edit ratio it computes is the number of edits divided by the total sum of all actions including editing. It would also be useful extract features based on time such as editing time/total time. Another useful feature that was observed while watching developers solve problems is the number of exceptions per run.
3. Prediction manager: We currently use two machine learning algorithms, decision tree and classification via clustering, to predict developers’ status. In the future, we plan to test other classification or clustering algorithms, and perhaps build our own algorithm.
4. Status manager: There are at least two ways to aggregate statuses. Currently, we aggregate five statuses and take the most dominant status. This algorithm is similar to aggregating events in discrete chunks. Another approach is to use a sliding window, which corresponds to using a sliding window to aggregate events.
Our experience with the new architecture showed that (a) as expected, when multiple strategy objects were implemented for a stage, it was indeed trivial to replace one with the other, and (b) the asynchronous processing did not result in perceptible delays in user-response times.
We were now ready to do a controlled user study to evaluate the adapted algorithm and investigate additional adaptations based on this study.
4. USER AND CODING STUDY
In a controlled user study, the problems must be chosen carefully. Our previous work [7] found that having difficulty is a rare event. Thus, we must try and ensure that developers face difficulty in the small amount of time available (1-4 hours) for a lab study, and yet do not find the problems impossible.
We used problems from the Mid-Atlantic ACM programming competition. These problems are attractive because they have varying difficulty. We piloted several problems to find problems that were difficult but not impossible to solve by the subjects. Based on these pilots, we settled on the problems shown in Table 2. The table characterizes the difficulty of each problem by showing the number of teams that solved the problem, the total number of teams, and the fraction of teams that solved the problem. The difficulty level of each problem was determined by the number of teams that solved the problem. For example, 100% of teams that attempted the Simple Question of Chemistry problem solved it, while only 16% of teams that attempted the Balanced Budget Initiative Problem solved it.
Five industrial and nine student programmers participated in the study. Participants were instructed to correct an incorrect prediction by the system using status-correction buttons (Figure 5). By measuring how often the developers corrected their status, we could, as in [7], measure the accuracy of our approach with respect to the perceptions of the developers.
However, there is a question as to whether participants would accurately report their status, given the hurt ego problem faced by the second author. Moreover, it is useful to compare the tool’s predictions about a developer’s status with that of a third party manually observing the developer. Therefore, the first author and two independent coders observed participants’ programming activities and made an independent determination of their status.
To allow coders to independently and asynchronously observe participants’ programming activities, we used Microsoft Live Meeting® to record the participants’ screens. Live Meeting® also allowed the first author to observe remote sessions. In fact, Tang et al. [20] argued that screen recording is an effective and
unobtrusive technique when subjects do not feel it invades their privacy.
We obtained participants' consent to record their screens. We recorded 40 hours and 44 minutes of video. To relieve coders from watching hours of video, we created a video observation tool, shown in Figure 4. This video tool shows all segments where the participant, first author (while observing the experiments and later when randomly sampling the video), or system indicated the participant was having difficulty or not sure of their status (indeterminate). As it turned out, in our study, there was one indeterminate segment (indicated by a participant). We shall refer to these segments as “stuck” segments.
As there were few such segments, we asked the coders to classify each of these segments. It was not reasonable, however, to ask them to classify all of the other segments, which would have involved watching over forty hours of video. We could use a statistical sampling approach to reduce the number, but because having difficulty is a rare event, we would have had to sample the vast majority of segments to capture the false negatives.
Therefore, we used the following, somewhat arbitrary approach to choose the “making progress” segments. We randomly chose these segments, and made the number of randomly sampled points about the same as the number of having difficulty or indeterminate segments. If there were fewer than three having difficulty or indeterminate segments, we randomly sampled three segments. We shall refer to the randomly sampled segments as “random segments”.
Each segment was two minutes of video. Coders were not aware of the status of each segment and had to classify the segment as making progress or slow progress. They were shown the video that corresponded to a particular participant and problem. If there were any segments for the coder to classify, they were shown on a line below the track bar. The segments on the line corresponded with the particular point in the video the coder needed to classify.
To classify segments, coders right clicked on the segment to label it as “slow progress” (the message displayed for “having difficulty”), and left clicked to label it “making progress”. An image of a mouse was provided to remind coders what each mouse button meant, and a legend was also provided to help coders remember that a black segment meant the segment was unlabeled, a red segment meant slow progress, and a green segment meant making progress.
Two coders and the first author classified 26 stuck segments and 36 random segments.
### Table 2: ACM problems from Mid-Atlantic contest.
<table>
<thead>
<tr>
<th>Year</th>
<th>Problem Title</th>
<th># of teams that solved problem</th>
<th># of teams</th>
<th>% correct</th>
</tr>
</thead>
<tbody>
<tr>
<td>2006</td>
<td>Shrew-ology</td>
<td>43</td>
<td>138</td>
<td>31%</td>
</tr>
<tr>
<td>2004</td>
<td>Balanced Budget Initiative</td>
<td>23</td>
<td>142</td>
<td>16%</td>
</tr>
<tr>
<td>2002</td>
<td>A Simple Question of Chemistry</td>
<td>124</td>
<td>124</td>
<td>100%</td>
</tr>
</tbody>
</table>
5. STUDY RESULTS
After the user study and coding phases were complete, we were able to answer the following questions: What is the correlation between (a) predictions of the two coders; (b) developers’ and coders’ perception of status, (c) predictions of the tool and the developers’ perception of the status, and (d) predictions of the tool and the coders’ perception of the status? As we see below, the answers depended on whether the segment involved was one of the “stuck” segments or random segments.
Table 3 shows that coders agreed 88% of the time with each other on stuck segments, and 83% of the time on random segments, and overall they agreed 85% of the time.
To determine the level of agreement within the stuck (random) segments we counted the number of times observers agreed with each other and divided that by the total number of stuck (random) segments observed.
Interestingly, coders agreed that in 50% of the random segments, which were classified by the tool as “making progress,” participants were actually having difficulty. We examined these eighteen cases individually and found three segments that were three minutes before a stuck segment, so in these cases, the observers were quicker than the tool in determining the status of these segments. In the remaining fifteen segments, the coders seemed to take the inactivity of developers as being stuck. The three early observations were not counted as incorrect.
So what did the participants themselves feel about their status in case of these segments? By definition, they agreed completely with the predicted status for these segments, as these were the segments that were classified by the tool, participant, and first author as “making progress” segments.
We noticed that coders seemed to have a difficult time classifying participants when they were idle, and apparently thinking. The tool uses developers’ actions to predict their status and does not take into account think times or when developers are idle. Therefore, we consider the fifteen random segments as “making progress” when computing the accuracy of the tool.
Consider now the non-random or “stuck segments.” Again, these are the segments classified either by the first author, or the participant, or the tool as “having difficulty”. These segments tell a very different story. Table 4 shows the agreement of the coders with the tool, the author, and the participants for these segments. Interestingly, coders agreed with the tool 100% of the time that participants were stuck. Perhaps even more interestingly, participants never corrected a “having difficulty” status predicted by the tool.
In four of these segments, participants corrected the “making progress” prediction of the tool. Three of those times, participants indicated they were having difficulty, and one of those times participants indicated that they were not sure of their status (indeterminate.) In nine of these segments, the first author classified the “making progress” prediction of the tool as actually “having difficulty”. The coders agreed with seven of these observations (77%). Coders agreed with the participant 75% of the time. The coders disagreed with the participant who indicated indeterminate as the status. The first author also reviewed this disagreement and agreed with the coders that the participant was indeed having difficulty.
Several (preliminary) conclusions can be drawn from these results. What is perhaps most remarkable is that when the tool
Table 3: Observer's agreement with each other.
<table>
<thead>
<tr>
<th>Segment Type</th>
<th># of Agreements</th>
<th># of Observations</th>
<th>% Agreement</th>
</tr>
</thead>
<tbody>
<tr>
<td>Stuck segments</td>
<td>23</td>
<td>26</td>
<td>88%</td>
</tr>
<tr>
<td>Random segments</td>
<td>30</td>
<td>36</td>
<td>83%</td>
</tr>
<tr>
<td>Total</td>
<td>53</td>
<td>62</td>
<td>85%</td>
</tr>
</tbody>
</table>
Table 4: Coders' agreement with the tool, first author, and participants (stuck segments).
<table>
<thead>
<tr>
<th>Entity</th>
<th># of Agreements</th>
<th># of Observations</th>
<th>% Agreement</th>
</tr>
</thead>
<tbody>
<tr>
<td>Tool</td>
<td>13</td>
<td>13</td>
<td>100%</td>
</tr>
<tr>
<td>First Author</td>
<td>7</td>
<td>9</td>
<td>77%</td>
</tr>
<tr>
<td>Participant</td>
<td>3</td>
<td>4</td>
<td>75%</td>
</tr>
<tr>
<td>Total</td>
<td>23</td>
<td>26</td>
<td>88%</td>
</tr>
</tbody>
</table>
predicts programmers are having difficulty, all three types of humans involved in making the prediction – the participants, the coders, and the first author, also think they are having difficulty. Thus, the tool does not seem to give a false positive, which is a very strong result, and a significant improvement over the results in our previous work [7].
Moreover, if we take the participants’ perceptions as ground truth, the tool also gives negligible false negatives – only four segments out of 1222 segments in the entire study were corrected. On the other hand, if we take the coders’ agreements as ground truth, the results are not so good, and it seems, based on our sampling, the tool missed half of the positives (stuck status).
There are two ways to interpret these data. The first relies on the viewpoint of the participants rather than the coders. The argument for doing so is that the observers could not read the mind of the participants, and were probably looking only at idle times to deduce the developer status. Idle times, alone, are not sufficient to distinguish between thinking and having difficulty. Our tool, on the other hand, keeps track of and computes a larger number of factors, such as the navigation, edit, and focus ratios, and thus agrees more with the participants. In fact, when asked about the accuracy of the tool, participants commented that they were happy with it (Table 4). The numbers shown in the table are represented by the following two comments: “I think it worked pretty well; It's non-intrusive, and only pops up with information when the status changes.” “It knew when I was having issues cause it switched to slow progress and when I was flyin doing all the class design it said progress.”
The other interpretation relies on the observers (coders and first author) rather than the participants. The rationale for doing so is that participants tend to underreport their problems [21]. The false negatives of the tool can be explained by two factors:
1. The tool uses developers’ actions to predict their status, and does not take into account idle times, which should probably be considered in a future algorithm.
2. The training set consisted of data from the six student programmers logged in our previous work [7], who used the tool during normal “field work” consisting of assignments and research projects. The behavior of these programmers was different in some ways from those of several of the programmers in this lab study. The first group primarily used the Internet to look for help when they were having difficulty. The participants in this study did not use the Internet often because of the type of tasks and duration of this study. The only times they used the Internet was to remember syntax or look at the Java or .NET API. Moreover, the two groups solved different types of problems, and the group in this study also included industrial programmers. One piece of objective data seems to indicate that the type of programmer may be a factor in automatic status prediction. For three student participants, the automatic predictions were completely in agreement with the perceptions of the coders, when the coders agreed.
Even under this interpretation, our tool seems useful because of the zero false-positive rates. It seems that if a choice has to be made between low false positives and negatives, the former is more desirable, as it does not unnecessarily waste the time of the developers and those who offer help. Missing some “having difficulty” statuses is no worse than the current practice of not having any automatic predictions. Our tool did give several positives (thirteen), which were all correct under this interpretation. Thus, if it is considered desirable to automatically let others know about developers’ difficulties – an assumption of this research based on previous work - then it seems better to use our tool than not use it.
Naturally, it is attractive to try and reduce the false negative rate (under the second interpretation) without increasing the false positive rate. One way to do so is train the system using the observers’ conclusions rather than developer corrections (assuming the former are true). Moreover, the accuracy can be further improved if the training data involved the same exercises as the ones used in the testing phase. We could either build a group model, in which the data of multiple developers is aggregated during the training phase, or an individual model, where no aggregation is done. (The approach described so far was also a group model, but in it, the training group was smaller and solved different problems) Therefore, we decided to, next, explore these directions.
6. PREDICTING OBSERVER STATUS
To build the individual and our group models, we assumed the following ground truth. All segments classified by the participants as stuck, were indeed stuck segments. Participants implicitly classify segments as stuck when they do not correct a stuck prediction of the tool. They explicitly classify them as stuck when they correct a “making progress” segment as “slow progress”.
Of the remaining segments, if the first author and the two coders classified a segment as stuck, then it was also a stuck segment,
regardless of how the participant classified it. All other segments were making progress.
To build and evaluate the individual model, we used a standard technique, known as cross validation, which executes 10 trials of model construction, and splits the data so that 90% of the data are used to train the algorithm and 10% of the data are used to test it. In some of the participant's training sets, the number of "making progress" segments vastly outnumbered the number of "having difficulty" segments, resulting in low accuracy in predicting the "having difficulty" segments. This is an example of the class imbalance problem in classification algorithms, wherein the accuracy of predicting an event can decrease as the frequency of a rare but important event decreases. The SMOTE [22] algorithm implemented in the WEKA toolkit [23] overcomes this problem by replicating rare data records until that data are equal to the more common data.
Therefore we used this scheme in the data sets of those participants who experienced the class imbalance problem. In our case, we used an accuracy threshold of 90% to determine if a participant experienced this problem, which was the accuracy of our previous approach [7]. The accuracy of the model without SMOTE was 66% or less for participants who had difficulty 20% or less of the time. For participants who had difficulty more than 20% of the time, the accuracy of the model without SMOTE was 94% or more. Thus, according to our threshold, participants who had difficulty less than 20% of the time faced the class imbalance problem. For these participants, we used SMOTE to replicate the "having difficulty" segments. In the case of the remaining participants, "having difficulty" was either less or about as frequent as "making progress". Thus, there was never a need to use SMOTE to replicate the "making progress" segments. Three of the twelve participants faced so much difficulty that they did not complete two of the three exercises.
To build the model for a particular individual, we used that individual's data as both the training and test set. To build the group model, we aggregated the data from all of our participants except data from the participant whose status we were trying to automatically predict. The exclusion was meant to test if a tool trained by one set of developers could be used to predict the status of another. We used the group data to predict the status of each individual.
The group data set did not suffer from the class imbalance problem because some of the participants had difficulty just as much as they were making progress. As mentioned before, even those who made relatively smooth progress experienced some difficulty. The decision tree algorithm [23] was used to build both the individual and group models.
Figures 5a and 5b show the accuracy of the tool. We considered four accuracies: (a) group stuck: the accuracy of the group model when predicting having difficulty, (b) individual stuck: the accuracy of the individual model when predicting having difficulty, (c) group overall: the accuracy of the group model when predicting both making progress and having difficulty, and (d) individual overall: the accuracy of the individual model when predicting both making progress and having difficulty. The accuracies are shown for all but two participants. These two participants were not included because their data was not collected correctly.
We expected each individual's model to be more accurate than the group model, but surprisingly, the group model was more accurate in predicting both "having difficulty" and "making progress" than the individual model. This unintuitive result is likely because the group model has more data than the individual model.
Table 5: Survey Questions and Results (Scale: 1 = Strongly Disagree to 7 = Strongly agree).
<table>
<thead>
<tr>
<th>Survey Question</th>
<th>Mean</th>
<th>Median</th>
<th>STDDEV</th>
</tr>
</thead>
<tbody>
<tr>
<td>Q1 I felt that the tool was accurate.</td>
<td>6</td>
<td>6</td>
<td>.95</td>
</tr>
<tr>
<td>Q2 I would prefer to use a speech interface (speaking your status) instead of</td>
<td>2.83</td>
<td>3</td>
<td>1.53</td>
</tr>
</tbody>
</table>
possible that with more training, the individual model would perform better. Even then, it may not be the preferable approach because participants, probably, would not like training the tool. In fact, during the debrief one participant commented that pressing buttons "stopped my flow of thought" and another participant felt that pressing buttons "sort of broke my concentration".
We asked participants if they preferred to speak their status because this could help reduce breaking their concentration (Table 5). Participants did not like this feature either, and felt it would be disruptive to those around them.
There were two participants whose accuracy was 50% or below. We examined these cases and determined that the tool believed these participants were making progress while human observers believed the participants were stuck. In each case, the participants were performing significant edits, which indicated to the tool that they were making progress. However, these edits involved a large number of deletions. This kind of activity suggests that, when extracting features, editing actions should be split into two categories: insertion and deletion of text.
The evaluations above show that it is possible to increase the agreement between a tool and a set of observers by (a) keeping the exercises the same in the training and evaluation set, and (b) using the judgments of these observers in the training set. Additional iterations are required to determine if (a) a tool trained using one set of exercises can be used to predict the status for another set of tasks, and (b) judgments of one set of observers can be used to agree with the judgments of another set of observers.
7. PRIVACY
So far, we have assumed that letting others know about difficulties of others is good. This assumption is probably true when the observers are mentors/advisors, as suggested in [3]. However it is possible to have observers who judge programmers without actually helping them. These judgers can use information about developers being stuck repeatedly in a negative manner which could cause programmers to lose respect in their team. Even when observers can be trusted, the developers may want more time to investigate their problems. There are several ways to solve this problem. One approach is to block judgers, a feature readily available in Google Talk and other IM clients. The problem with this approach is that blocked judgers can realize that they are blocked, which could cause them to become hostile. Therefore, a superior approach is to allow programmers to decide which status they want to report. Figure 6 shows a preliminary scheme we have implemented to support this feature, which is also used by developers to train the system. This interface reports two statuses -- the true status and the reported status. Buttons are provided to change both statuses.
The buttons that change the true status are used to train the system and the buttons that change the reported status determine what others on their buddy lists see (Figure 1). The true status field is automatically copied to the reported status field after a certain time lag. During this time, developers can manually disable the copying. Assuming that having difficulty is indeed a rare event, this user-interface does not impose substantial overhead.
We have not formally evaluated these privacy controls, but we have gotten some initial feedback from those who have used them. Users would indeed like to customize not only what status is reported, but when it is reported, and to whom it is reported.
Thus, this scheme must be extended to control the nature and timing of reported status for different classes of observers such as (a) human observers and tools, (b) a team member sitting on the next seat, radically co-located, and distributed, (c) a close friend, mentor, and boss, and (d) team members who have and do not have the expertise to help solve a problem.
Such elaborate customization could make the overhead required to use the tool high. Future versions of this scheme must allow for setting user-specific defaults. For example, the number of IM messages with team members can be used to identify close friends; organization charts can be used to find mentors and bosses; location information can be used to find the physical distance between developers and various observers; and the difficulty each team member has with different pieces of a project can be used to find expertise. In addition, the tool can adapt how developers morph the reported status. For instance, if they always report the indeterminate status to their boss, then the tool could ask them if they wish to set this value automatically for this observer.
8. CONCLUSIONS AND FUTURE WORK
This paper contributes to both the general area of semantic awareness and the specific subarea of providing awareness about developers’ progress. To the best of our knowledge, other work on semantic awareness has not tied the judgments of third-party observers with those of a tool. Our work shows that (a) these judgments can be different from those of the actors about whom the awareness is being provided, (b) a special tool must be provided to gather third-party observations, and (c) it is possible to train an automatic tool to agree, to a high degree, with those of the observers.
Our main contributions, of course, are in the subarea of difficulty prediction. We have identified a pipeline of modules for predicting and displaying difficulty. We have also shown the usefulness of two well-known design patterns, Mediator and Strategy, in implementing the pipeline. We have created an architecture that allows the pipeline implementation to be reused by and execute asynchronously with multiple programming environments. Our evaluations show that label aggregation can be used to significantly improve the accuracy of a difficulty-prediction algorithm, and a group training model offers more accuracy than an individual one under certain circumstances.
They also show that it is possible to build a tool that does not give false positives, regardless of whether the participant or observer judgments are used about the ground truth. Finally, we motivate
and present new user-interfaces for customizing status messages and exporting the status to others.
As mentioned before, it would be useful to formally evaluate a design space of status customization and exporting interfaces, and determine if (a) a tool trained using one set of exercises can be used to predict the status for another set of tasks, and (b) judgments of one set of observers can be used to agree with the judgments of another set of observers. Perhaps the biggest unresolved issue raised by this work is whether the participants or observers should be relied upon to determine if developers are stuck.
Perhaps more observations are needed to help resolve this issue. Another, more objective approach, is to make the following assumption: The fraction of “having difficulty” segments is proportional to the inherent difficulty of the problem. By using problems of known difficulty, we can determine whether the judgments of observers or developers correspond more closely with problem difficulty.
Once this issue is resolved, the next step would then be to deploy the tool in larger field studies and determine (a) what the developers feel about the accuracy of the tool, (b) how often and to whom they export the “having difficulty” status, and (c) how often and from whom they accept help. Naturally, based on this experience, we can expect additional iterations through the design process identified earlier.
Assuming that observers are not reliable in characterizing developer’s difficulty level, a pair of programmers working side-by-side [6] could use difficulty notifications to determine if they should help each other. Moreover, knowing the rate at which developers get stuck may be useful not only for determining if they need help. It could be used to (a) characterize the inherent difficulty of new problems, (b) determine the expertise of developers to solve certain kinds of problems, (c) estimate how long it will take them to complete their task, (d) compare the effectiveness of the various coupling degrees, mentioned earlier, in reducing the number of times developers face difficulty.
This paper provides a basis and motivation for carrying out these future research directions.
9. ACKNOWLEDGEMENTS
This research was funded in part by NSF grants IIS 0312328, IIS 0712794, and IIS-0810861. We would like to thank the study subjects.
10. REFERENCES
|
{"Source-Url": "http://www.cs.unc.edu/~carterjl/publications/Group2010/Carter_Dewan_Group2010.pdf", "len_cl100k_base": 10425, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 37785, "total-output-tokens": 11801, "length": "2e13", "weborganizer": {"__label__adult": 0.0003786087036132813, "__label__art_design": 0.0004050731658935547, "__label__crime_law": 0.0003025531768798828, "__label__education_jobs": 0.0044708251953125, "__label__entertainment": 6.312131881713867e-05, "__label__fashion_beauty": 0.0001709461212158203, "__label__finance_business": 0.00031757354736328125, "__label__food_dining": 0.0003402233123779297, "__label__games": 0.0005517005920410156, "__label__hardware": 0.00067901611328125, "__label__health": 0.0004591941833496094, "__label__history": 0.00022041797637939453, "__label__home_hobbies": 0.00013196468353271484, "__label__industrial": 0.0003478527069091797, "__label__literature": 0.0003046989440917969, "__label__politics": 0.0002460479736328125, "__label__religion": 0.0004646778106689453, "__label__science_tech": 0.00806427001953125, "__label__social_life": 0.0001550912857055664, "__label__software": 0.004261016845703125, "__label__software_dev": 0.9765625, "__label__sports_fitness": 0.0003418922424316406, "__label__transportation": 0.0006175041198730469, "__label__travel": 0.00022983551025390625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 55084, 0.02457]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 55084, 0.60055]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 55084, 0.95282]], "google_gemma-3-12b-it_contains_pii": [[0, 4522, false], [4522, 11162, null], [11162, 18096, null], [18096, 22201, null], [22201, 26170, null], [26170, 32736, null], [32736, 38921, null], [38921, 42679, null], [42679, 49428, null], [49428, 55084, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4522, true], [4522, 11162, null], [11162, 18096, null], [18096, 22201, null], [22201, 26170, null], [26170, 32736, null], [32736, 38921, null], [38921, 42679, null], [42679, 49428, null], [49428, 55084, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 55084, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 55084, null]], "pdf_page_numbers": [[0, 4522, 1], [4522, 11162, 2], [11162, 18096, 3], [18096, 22201, 4], [22201, 26170, 5], [26170, 32736, 6], [32736, 38921, 7], [38921, 42679, 8], [42679, 49428, 9], [49428, 55084, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 55084, 0.12255]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
59b1d6998b7afb82b92ead7674f7191178d6ee0d
|
Comparative Semantics of Feature Diagrams: FFD vs vDFD
Trigaux, Jean-Christophe; Heymans, Patrick; Schobbens, Pierre-Yves; Classen, Andreas
Published in:
Proceedings of the Workshop on Comparative Evaluation in Requirements Engineering (CERE'06) held in conjunction with the 14th IEEE International Requirements Engineering Conference (RE'06)
Publication date:
2006
Document Version
Early version, also known as pre-print
Link to publication
Citation for published version (HARVARD):
General rights
Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners and it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
• Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
• You may not further distribute the material or use it for any profit-making activity or commercial gain
• You may freely distribute the URL identifying the publication in the public portal
Take down policy
If you believe that this document breaches copyright please contact us providing details, and we will remove access to the work immediately and investigate your claim.
Download date: 15. Aug. 2019
Comparative semantics of Feature Diagrams: FFD vs. vDFD
Jean-Christophe Trigaux, Patrick Heymans, Pierre-Yves Schobbens, Andreas Classen
University of Namur, Computer Science Department
5000 Namur, Belgium
{jtr,phe,pys,aclassen}@info.fundp.ac.be
Abstract
Feature Diagrams are a popular family of modelling languages used for engineering requirements in software product lines. In our previous research, we advocated the use of formal semantics as an indispensable means to clarify discussions about feature diagrams and to facilitate safe and efficient tool automation. We presented a generic formal semantics for feature diagram languages and criteria to compare them. However, other formal semantics exist. We already informally argued in favour of our semantics which, we think, is more abstract, more concise and not tool dependent. However, some of these claims needed to be further objectified. The purpose of this paper is to compare the semantics proposed by van Deursen and Klint with our own following the methodology of comparative semantics. To be made amenable to comparison, van Deursen and Klint’s tool-based definition is first recalled and redefined by correcting some minor mistakes. Their semantics is then mapped to ours through an abstraction function. We then proceed to compare the expressiveness, embeddability and succinctness of both approaches. The study tends to confirm our semantic choices as well as our tool-independent methodology. It also demonstrates that van Deursen and Klint’s language is fully expressive and provides various results likely to help tool developers, especially for implementing model transformations.
1 Introduction
Central to the Product Line (PL) paradigm is the modelling and management of variability, i.e. the commonalities and differences in the applications in terms of requirements, architecture, components, and test artifacts [21]. Variability at the requirement level is commonly modelled through Feature Diagrams (FD). In the last decade, research and industry have developed several FD languages. The first and seminal proposal was introduced as part of the FODA method back in 1990 [16]. An example of a FODA FD is given in Fig. 1. It is inspired from a case study defined in [6] and indicates the allowed combinations of features for a family of systems intended to monitor the engine of a car. As is illustrated, FODA features are nodes of a graph represented by strings and related by various types of edges. On top of the Fig. 1, the node Monitor Engine System is called the root, or concept. The nodes can be mandatory or optional. Optional nodes are represented with a hollow circle above their name, e.g. Coolant. In FODA, mandatory nodes are the ones without a hollow circle (in some other syntaxes [12, 23, 22, 7, 10, 8], they are represented with filled circles). The edges are used to progressively decompose nodes into more detailed features. In FODA, there were only and- and xor- decompositions like illustrated in Fig. 1:
1. and-decomposition e.g. between Monitor Fuel Consumption and its sons, Measures and Methods, indicating that they should both be present in all feature combinations where Monitor Fuel Consumption is present.
2. xor-decomposition where edges are linked by a line segment, as between Measures and its sons, 1/km and Miles/gallon, indicating that only one of them should be present in combinations where Measures is.
Figure 1. FODA: Monitor Engine System
Since Kang et al.’s initial proposal, several extensions have been devised as part of the following methods: FORM [17], FeatureRSEB [12], Generative Programming [10], PLUSS [11], and in the work of the following authors: Riebisch et al. [23, 22], van Gurp et al. [28], van Deursen and Klint [27], Czarnecki et al. [7, 8], Batory [1] and Benavides et al. [2].
Most of these authors [17, 12, 10, 23, 22, 28, 11] present their semantics in the way of examples. Still, most of them have argued for an “improved expressiveness”. However, without a formal semantics, they have failed to demonstrate it.
In previous publications [4, 26, 25], we have developed and applied a rigorous framework to assess those claims. We have first carried out a comprehensive survey of the informal FD variants. We have generalized their various syntaxes through a generic construction called Free Feature Diagrams (FFD). We gave a formal semantics to FFD, thus providing a (hopefully) unambiguous and very concise definition for all the surveyed FD variants. All formalization choices found a clear answer in the original FODA FD definition, which proved that although informal and scattered throughout many pages, it suffered no ambiguity problem. However, having a proper formal semantics remains extremely important. As remarkably argued in [13], formal semantics is the best way to avoid ambiguities and to start building safe automated reasoning tools. Without a formal semantics, new FD languages might continue to proliferate on the basis of shallow or erroneous motivations, leading to interpretation and interoperability problems.
In [4, 26, 25], we argued that FFD contribute to improve the definition, understanding, comparison and reliable implementation of FD languages. In particular, we have highlighted some subtle points in the interpretation of FD. Additionally, we have defined the main decision problems that a FD tool should implement, i.e. we gave a specification of such tools, and subsequently, we studied their minimal execution time.
Some authors have also started to better define their semantics [27, 8, 7, 1]. However, we found that these semantics are less general, abstract and concise. These approaches typically transform FD to other formalisms (for which tools exist). This naturally gives a more complex transformation and a less abstract semantics. It has the dubious advantage that the transformation is correct by definition. On the contrary, we believe that tools should be built or chosen according to a natural, carefully chosen and well-studied semantics. Our approach is justified by our goals: make fundamental semantic issues of FD languages explicit in order to study their properties and rigorously evaluate them before adopting them or implement CASE tools.
We are now in position to proceed to our next phase of study sketched in [26, 25]: given well-defined FD languages, we can start a meaningful discussion of their merits, following the well-established scientific method called comparative semantics. In this paper, we compare the semantics of FFD with the one defined by van Deursen and Klint in [27] which is apparently the first first formal semantics of FD to have been published. For brevity, we call their variant of FD, vFD (van Deursen and Klint’s Feature Diagrams).
This paper is structured as follows. In Sec. 2, we will describe the method that we use to compare formal semantics. FFD are then briefly presented in Sec. 3. In Sec. 4, we will recall the semantics of vFD given in [27] and compare it with our own. The comparison of both formalisms appears in Sec. 5. Finally, related works are examined in Sec. 6 and conclusions are given in Sec. 7.
2 Research method
A proper definition of a formal semantics is preferably done in several steps. The first step in this chain is to have a bidirectional conversion from the concrete syntax (what the user sees), to abstract data structures, called abstract syntax. Indeed, the concrete syntax is usually too cumbersome to be reasoned on efficiently, be it by humans or tools.
The abstract syntax is usually a graph. It is thus essential to specify exactly what are the allowed graphs. There are two common ways to provide this information: (1) mathematical notation (set theory) or (2) meta-model (usually, a UML Class Diagram complemented with OCL constraints). In this paper, we follow [13] and adopt the former for its improved conciseness, rigour and suitability for performing mathematical proofs. The latter might be more readable and facilitate some implementation tasks such as building a repository.
The second step is to provide a formal semantics, i.e. a function from the graphs above to a mathematical structure chosen for being as close as possible to our intuitive understanding. This structure forms the semantic domain. In [3] (and in Def. 3.5 of this paper) we proposed as semantic domain the one of Product Lines (PL) defined as set of products, where a product is characterized by the primitive features it includes.
The works to which we compare often do not follow this methodology, but are amenable to it. For instance, [1] defines a transformation to grammars and propositional formulae. Fortunately, these two formalisms are provided with a standard semantics, so that we can obtain a semantics by composing the transformation followed by the standard semantics. In all similar approaches, the semantic domain of the formalisms used were not designed for features. They are thus usually less abstract: they keep too much syntactic
information, so that fewer diagrams are considered equivalent. To discard this syntactic information, we must introduce abstraction functions between semantic domains (see Fig. 2). As we progress in our comparative semantics study, we will construct a category of semantic domains linked by abstraction functions. The comparative semantics of specification languages [19] of logic programming, of concurrent programming [9] or of coordination languages [5], for instance, is already well developed and integrates developments and tools from different languages.
Given this, we can then evaluate which FD languages are more expressive, succinct, or natural [3] more rigorously. The technical tool for this study are translations or embeddings (see Fig. 2), i.e. transformations (functions between abstract syntaxes) that preserve semantics.
As illustrated in Fig. 2, for each FD language, say X, with an already existing formal semantics, their semantic domain (XSD) and abstract syntax (XFD) should be compared with our semantic domain (PL, see Def. 3.5) and the abstract syntax of FFD (Def. 3.1), respectively, in order to derive abstraction functions and embeddings. Moreover, these translations compose, so that it is useful to have our category of semantic domains, and look at its shape so that most results follow by composition.
We further distinguish “primitive” and “compound features”.
Primitives are “features” that are of interest per se, and that will influence the final product. On the contrary, compound features are just intermediate nodes used for decomposition. For generality, we leave it to the modeler to define which nodes in the FD have such a purpose. Primitive features are thus not necessarily equivalent to leaves, though it is the most common case.
Decomposition edges relate a father node f to a son node s and are noted \( f \rightarrow s \).
FD languages vary in several respects: (1) do they consider FD as trees or DAG, (2) what are the allowed types of operators on nodes, (3) what are the allowed types of graphical constraints, (4) what are the allowed textual constraints. These are the parameters (GT, NT, GCT, TCL) of FFD:
1. GT (Graph Type) is either DAG or TREE.
2. NT (Node Type) is a set of Boolean functions (operators), at most one per arity. E.g.: and is the set of operators \( and \), one for each arity s, that return true iff all their s arguments are true. Similarly, or (resp. xor) is the set of operators \( or \) (resp. xor), that return true iff some (resp. exactly one) of their s arguments is true. Operators \( opt \) in \( opt \) always return true. Operators \( vp \) \( (i,j) \) in \( card \) return true iff at least \( i \) and at most \( j \) of their arguments are true. NT is usually some combination of those sets.
3. GCT (Graphical Constraint Type) is a binary Boolean operator. E.g.: Requires (\( \Rightarrow \)) or Mutex (\( () \)).
4. TCL (Textual Constraint Language) is a subset of the language of Boolean formulae where the predicates are the nodes of the FD. The sublanguage used in FODA FD, “Composition rules” [16, p.71] is: \( CR := p_1(\text{requires} | \text{mutex})p_2 \) where \( p_1, p_2 \) are primitive features.
The syntactic domain of a particular FD language can be defined simply by providing values for these parameters. For example, the abstract syntax of FODA FD is defined as \( FFD(TREE, and \cup xor \cup \{opt_1\}, 0, CR) \). In [4], the abstract syntax of other FD variants, including [17, 12, 10, 23, 22, 28, 11] is defined similarly. As we will see in Sec. 4.2, van Deursen and Klint’s abstract syntax is defined as \( FFD(DAG, and \cup xor \cup or \cup \{opt_1\}, 0, CR') \) where \( CR' := p_1(\text{requires} | \text{excludes})p_2 | (\text{include} | \text{exclude})p \).
The semantics is defined only once for FFD [26, 25], reproduced in Sec. 3.2. The formal semantics of a particular FD language defined through FFD thus comes for free.
2 We adopt the terminology of [1].
3 Compound features" are also called “decomposable features".
Definition 3.1 (Free Feature Diagram, or FFD) A FFD
\(d \in \text{FFD}(\text{GT}, \text{NT}, \text{GCT}, \text{TCL}) = (N, P, r, \lambda, \text{DE}, \Phi, \text{CE})\)
where:
- \(N\) is its set of nodes;
- \(P \subseteq N\) is its set of primitive nodes;
- \(r \in N\) is the root of the FD, also called the concept;
- \(\lambda : N \to \text{NT}\) labels each node with an operator from NT;
- \(\text{DE} \subseteq N \times N\) is the set of decomposition edges;
\((n, n') \in \text{DE}\) will rather be noted \(n \to n'\);
- \(\text{CE} \subseteq N \times \text{GCT} \times N\) is the set of constraint edges;
- \(\Phi \subseteq \text{TCL}\) are the textual constraints.
FFD collect whatever can be drawn. So, FD have additional minimal well-formedness conditions.
Definition 3.2 (Feature Diagram, or FD) A FD is a FFD where:
1. Only \(r\) has no parent: \(\forall n \in N. (\exists n' \in N. n' \to n) \Leftrightarrow n = r\).
2. \(\text{DE}\) is acyclic: \(\exists n_1, ..., n_k \in N. n_1 \to ... \to n_k \to n_1\).
3. If \(\text{GT} = \text{TREE}\), \(\text{DE}\) is a tree: \(\exists n_1, n_2, n_3 \in N. n_1 \to n_2 \land n_3 \to n_2 \land n_1 \neq n_3\).
4. Nodes are labelled with operators of the appropriate arity: \(\forall n \in N. \lambda(n) = \lambda k = \#(n, n')|n \to n'|\).
3. Semantics
A formal semantics is given by a function from the syntactic domain of a language to a semantic domain [13]. The syntactic domain was given in Def. 3.1 and 3.2. Here, after some preliminary definitions, we define the semantic domain as the set of product lines (PL)(Def. 3.5, point 3) and then the semantic function (Def. 3.5, point 4).
The notion of model for a FD was introduced in [16, p.64], with the examples of models of X10 terminals.
Definition 3.3 (Model) A model of a FD is a subset of its nodes: \(M \subseteq P N\).
Definition 3.4 (Valid model) \([16, \text{p.70}]\) A model \(m \in M\) is valid for a \(d \in \text{FD}\), noted \(m \models d\), iff:
1. The concept is in: \(r \in m\)
2. The meaning of nodes is satisfied: If a node \(n \in m\), and \(n\) has sons \(s_1, ..., s_k\) and \(\lambda(n) = \phi_k\), then \(\phi_k(s_1 \in m, ..., s_k \in m)\) must evaluate to true.
3. The model must satisfy all textual constraints: \(\forall \phi \in \Phi, m \models \phi\), where \(m \models \phi\) means that we replace each node name \(n\) in \(\phi\) by the truth value of \(n \in m\), evaluate \(\phi\) and get true. For example, if we call \(\phi_1\) the CR constraint \(p_1\) requires \(p_2\), we say that \(m \models \phi_1\) when \(p_1 \in \Phi \Rightarrow p_2 \in \Phi\) is true.
4. The model must satisfy all graphical constraints: \(\forall (n_1, op_2, n_2, ...) \in \text{CE}, op_2(n_1 \in m, n_2 \in m)\) must be true.
5. If \(s\) is in the model and \(s\) is not the root, one of its parent \(n\), called its justification, must be too: \(\exists n \in N. s \in m \land s \neq r\) \(\exists n \in N. n \in m \land n \to s\).
Definition 3.5 (Product and Product Line, or PL) We define:
1. A product \(c\) is a set of primitive nodes: \(c \subseteq \Phi P\).
2. The product named by a model \(m\), noted \(\|m\|\), is \(m \cap P\).
3. A product line (PL) \(pl\) is a set of products: \(pl \subseteq \Phi P P\).
4. The product line of a FD \(d\) consists of the products named by its valid models: \(\|d\| = \{m \cap P | m \models d\}\).
van Deursen and Klint’s Feature Diagrams: vDFD
van Deursen and Klint have formalized FD, by providing [27]:
1. A syntax presented as a feature description language that we will call vDFD. The authors also provided a feature definition (Def. 4.1) and a grammar for vDFD (Def. 4.3);
2. A semantics presented as a feature diagram algebra defining various sets of rules manipulating vDFD:
(a) Normalization rules (\(N\)) to eliminate duplicate features and degenerate cases of the various constructs (Def. 4.5);
(b) Variability rules to count the number of products allowed in a FD;
(c) Expansion rules (\(E\)) to expand a normalized feature expression into a disjunctive normal form (Def. 4.6);
(d) Satisfaction rules (\(S\)) to determine which feature expressions in disjunctive normal form satisfy the feature constraints (Def. 4.7);
In Fig. 3, we show the sequence of these transformations \((N, E, S)\) on vDFD as proposed in [27]. An alternative sequence of transformations \((N', E', S')\) on vDFD is shown which refers to the small corrections we propose for each of them. In the next sections, we will give the reader more details on this. First, we will present the formalization for vDFD proposed in [27] (Sec. 4.1), then we will discuss their abstract syntax (Sec. 4.2) and revisit their semantics (Sec. 4.3). Finally, we compare the revisited semantics with our own (Sec. 5).
4.1 van Deursen and Klint’s original definition
The primary objective of van Deursen and Klint was to reason on FD using a textual representation rather than a graphical one. Further requirements for this representation were [27]:
1. to contain all the information contained in the graphical form,
2. to be suited for automatic reasoning.
To satisfy these requirements, the authors first produced a feature definition (Def. 4.1). Then they proposed a grammar for their textual FD (Def. 4.3). Finally, they proposed rules to reason on this representation. Rules are used to check the consistency of the representation. \(N\) and \(E\) are used to generate a normal form (syntactic consistency). Rules for computing variability are also defined but are not relevant here as they do not influence the semantics. All these rules are defined and justified in [27]. To present these definitions we reuse the variable naming convention proposed by van Deursen and Klint (Table 1). All the reasoning proposed by the authors is based on their disjunctive normal form (Def. 4.4).
**Definition 4.1 (Feature definition)** A feature definition [27, p.4] is a feature name followed by “:” and a feature expression (Def. 4.2)
**Definition 4.2 (Feature expression)** A feature expression [27, p.4] can consist of
1. an atomic feature,
2. a composite feature: a named feature whose definition appears elsewhere,
3. an optional feature: a feature expression followed by ?,
4. mandatory features: a list of feature expressions enclosed in all( ),
5. alternative features: a list of feature expressions enclosed in one-of( ),
6. non-exclusive selection of features: a list of feature expressions enclosed in more-of( ),
7. a default feature value: default = followed by an atomic feature,
8. features of the form ..., indicating that a given set is not completely specified.
**Definition 4.3 (vDFD Grammar)** A vDFD Grammar [27, p.6] is defined by:
\[
\begin{align*}
[A - Z][a - zA - Z0 - 9]* & \rightarrow \text{FeatureName} \\
[a - z][a - zA - Z0 - 9]* & \rightarrow \text{AtomicFeature} \\
\text{FeatureDefinition}* & \rightarrow \text{FeatureDefinition} \\
\text{Constraint}* & \rightarrow \text{FeatureDiagram} \\
\text{FeatureName}: & \rightarrow \text{FeatureDefinition} \\
\text{FeatureExpr} & \rightarrow \text{FeatureDefinition} \\
\{ \text{FeatureExpr} , \}+ & \rightarrow \text{FeatureList} \\
\text{all}(\text{FeatureList}) & \rightarrow \text{FeatureExpr} \\
\text{one-of}(\text{FeatureList}) & \rightarrow \text{FeatureExpr} \\
\text{more-of}(\text{FeatureList}) & \rightarrow \text{FeatureExpr} \\
\text{FeatureName} & \rightarrow \text{FeatureExpr} \\
\text{AtomicFeature} & \rightarrow \text{FeatureExpr} \\
\text{FeatureExpression} & \rightarrow \text{FeatureExpr} \\
\text{default} = \text{AtomicFeature} & \rightarrow \text{FeatureExpr} \\
\ldots & \rightarrow \text{AtomicFeature} \\
\text{DiagramConstraint} & \rightarrow \text{Constraint} \\
\text{UserConstraint} & \rightarrow \text{Constraint} \\
\text{AtomicFeature requires} & \rightarrow \text{DiagramConstraint} \\
\text{AtomicFeature excludes} & \rightarrow \text{UserConstraint} \\
\text{AtomicFeature} & \rightarrow \text{DiagramConstraint} \\
\text{include AtomicFeature} & \rightarrow \text{UserConstraint} \\
\text{exclude AtomicFeature} & \rightarrow \text{UserConstraint}
\end{align*}
\]
**Definition 4.4 (Disjunctive normal form)** A disjunctive normal form [27, p.9] is a one-of feature expression with only all feature expressions as arguments themselves with only atomic features as arguments. A disjunctive normal form is an expression of the form: one-of(all(A_1, ..., A_{n_1}), ..., all(A_m, ..., A_{n_m}))
This disjunctive normal form indicates explicitly all possible feature combinations. It is obtained by applying the normalization (N) and expansion rules (E). For instance, (N2) removes duplicate features; (N6) transforms a one-of expression containing one optional feature into an optional one-of expression; (E4) translates an all( ) expression containing a more-of expression into three cases: one with the first alternative, one with the first alternative and the remaining more-of expression, and one with only the remaining more-of expression.
**Definition 4.5 (Normalization rules)** The set of normalization rules [27, p.7] is \( N = \{ N_1, \ldots, N_{12} \} \):
\[
\begin{align*}
(N_1) & \quad Fs, F, Fs', F? = Fs, F, Fs', Fs' \\
(N_2) & \quad Fs, F, Fs'; F, Fs'' = Fs, F, Fs', Fs'' \\
(N_3) & \quad F? = F? \\
(N_4) & \quad all(F) = F \\
(N_5) & \quad all(Fs, all(Ft), Fs') = all(Fs, Ft, Fs') \\
(N_6) & \quad one-of( F ) = F \\
(N_7) & \quad one-of( Fs, one-of( Ft ), Fs') = one-of( Fs, Ft, Fs') \\
(N_8) & \quad one-of( Fs, F?, Fs') = one-of( Fs, F, Fs') \\
(N_9) & \quad more-of( F ) = F \\
(N_{10}) & \quad more-of( Fs, more-of( Ft ), Fs') = more-of( Fs, Ft, Fs') \\
(N_{11}) & \quad more-of( Fs, F?, Fs') = more-of( Fs, F, Fs') \\
(N_{12}) & \quad default = A = A
\end{align*}
\]
**Definition 4.6 (Expansion rules)** The set of expansion rules [27, p.9] is \( E = \{ E_1, \ldots, E_4 \} \):
\[
\begin{align*}
(E_1) & \quad all(Fs, F?, Ft) = one-of(all(Fs, F, Ft), all(Fs, Ft)) \\
(E_2) & \quad all(Ft, F?, Fs) = one-of(all(Ft, F, Fs), all(Ft, Fs)) \\
(E_3) & \quad all(Fs, one-of( Ft, Fs') = one-of(all(Fs, F, Fs'), all(Fs, one-of( Ft, Fs'))) \\
(E_4) & \quad all(Fs, more-of( Ft, Fs') = one-of(all(Fs, F, Fs'), all(Fs, more-of( Ft, Fs'), Fs'), all(Fs, more-of( Ft, Fs')))
\end{align*}
\]
On this disjunctive normal form, satisfaction rules (Def. 4.7) are applied to eliminate products that do not satisfy the constraints. These satisfaction rules use the two following functions (not explicitly defined in [27]):
- \( isElement : AtomicFeature \times FeatureExpr \rightarrow B \) which determines whether the AtomicFeature is contained in the FeatureExpr or not.
- \( sat : FeatureExpr \times Constraints \rightarrow B \) which determines whether the FeatureExpr satisfies the Constraints or not.
If no constraint is applicable to the feature expression then (S9) is used. Otherwise, binary constraints (requires, excludes) and unary constraints (include, exclude) are respectively handled by:
- (S1) and (S2) for excludes;
- (S3) and (S4) for requires;
- (S5) and (S6) for include;
- (S7) and (S8) for exclude.
For instance, (S6) defines that the \( sat \) function must return \( false \) if the constraint Include \( A \) is applicable and if the atomic feature \( A \) is not an element of the FeatureExpr \( Ft \).
**Definition 4.7 (Satisfaction rules)** The set of satisfaction rules [27, p.13] is \( S = \{ S_1, \ldots, S_{12} \} \), where \( \models \) means OR:
\[
\begin{align*}
(S_1) & \quad isElement(A2, Fs) \models isElement(A2, Fs') = true \\
& \quad sat(all(Fs, A1, Fs'), Cs A1 excludes A2 Cs') = false \\
(S_2) & \quad isElement(A2, Fs) \models isElement(A2, Fs') = false \\
& \quad sat(all(Fs, A1, Fs'), Cs A1 excludes A2 Cs') = sat(all(Fs, A1, Fs'), Cs Cs') \\
(S_3) & \quad isElement(A2, Fs) \models isElement(A2, Fs') = false \\
& \quad sat(all(Fs, A1, Fs'), Cs A1 requires A2 Cs') = false \\
(S_4) & \quad isElement(A2, Fs) \models isElement(A2, Fs') = true \\
& \quad sat(all(Fs, A1, Fs'), Cs A1 requires A2 Cs') = sat(all(Fs, A1, Fs'), Cs Cs') \\
(S_5) & \quad isElement(A, Ft) = true \\
& \quad sat(all(Ft), Cs include A Cs') = sat(all(Ft), Cs Cs')
\end{align*}
\]
isElement(A, Ft) = false
\( sat(\text{all}(Ft), Cs \text{ include } A \text{ Cs')} = false \)
\( isElement(A, Ft) = true \)
\( sat(\text{all}(Ft), Cs \text{ exclude } A \text{ Cs')} = false \)
\( isElement(A, Ft) = false \)
\( sat(\text{all}(Ft), Cs \text{ exclude } A \text{ Cs}') = sat(\text{all}(Ft), Cs \text{ Cs'}) \)
\( sat(\text{all}(Ft), Cs) = true \)
### 4.2 Abstract syntax
van Deursen and Klint have clearly defined the concrete syntax of the vDFD and the various operations to manipulate the allowed expressions. In terms of FFD, we understand that their abstract syntax is FFD(DAG, and \( \lor \), xor \( \oplus \), CR') where \( CR' := p_1(\text{requires} | \text{excludes}) p_2 (\text{include} | \text{exclude}) p \). The mapping between their concrete syntax and our abstract syntax is presented in Table 2. vDFD is a restricted graph as the different operators construct a tree, except for the leaves which can be shared by several fathers (Def. 4.8).
<table>
<thead>
<tr>
<th>Concrete Syntax</th>
<th>Abstract Syntax</th>
</tr>
</thead>
<tbody>
<tr>
<td>all</td>
<td>( and )</td>
</tr>
<tr>
<td>one-of</td>
<td>( \lor )</td>
</tr>
<tr>
<td>more-of</td>
<td>( \oplus )</td>
</tr>
<tr>
<td>?</td>
<td>( \text{opt}_1 )</td>
</tr>
</tbody>
</table>
### 4.3 Semantics
van Deursen and Klint have defined rewriting rules that lead to disjunctive normal form (Def. 4.4). From this normal form, the semantics is trivial and corresponds to an ordered list of ordered lists of primitive features, that we note \( Or(\text{O}(P)) \).
\[ \forall n_1, n_2, n_3 \in N \Rightarrow n_1 \rightarrow n_3 \land n_2 \rightarrow n_3 \land n_1 \neq n_2 \rightarrow \exists n_4 \in N \rightarrow n_3 \rightarrow n_4 \]
### 4.4 Semantics
\[ \text{Definition 4.9 (vDFD Semantics)} \text{ The semantics of a vDFD } (\text{vdfg}) \text{ is a function } \mathcal{L} : \text{vDFD} \rightarrow \text{O}(\text{O}(P)) \text{ where } \mathcal{L}(\text{vdfg}) = \mathcal{S}(\mathcal{E}N(\text{vdfg})) \]
Nevertheless, we have discovered undesirable semantics due to missing rules. Consequently, we provide some additional rules and justify them:
- The rule in \( N_1 \) is not sufficient to avoid feature lists that combine mandatory and optional features. Indeed, a feature list such as \( Fs, F?, Fs', F, Fs'' \) (where \( F? \) and \( F \) are switched wrt the rule \( N_1 \)) would be considered normalized. The set of normalisation rules should be corrected by adding one simple rule (Def. 4.10);
- The set of expansion rules is not sufficient to produce a correct disjunctive normal form. Indeed, terms of the form \( a \lor \text{one-of}(Fs) \) or \( \text{all}(Fs) \) are allowed. In order to respect the intentions of the authors we extend \( \mathcal{E} \) (Def. 4.11);
- The satisfaction function \( \mathcal{sat} \) is never explicitly called. Consequently, we propose one rule (Def. 4.12) that calls this function and eliminates invalid products (products which do not satisfy the constraints).
\[ \text{Definition 4.10 (Normalization rules)} \text{ The normalization rules are a set of rules } \mathcal{N}' = \mathcal{N} \cup \{ N_{13} \} \text{ where } \]
\[ (N_{13}) Fs, F?, Fs', Fs, Fs'' = Fs, F, Fs', Fs'' \]
\[ \text{Definition 4.11 (Expansion rules)} \text{ The expansion rules are a set of rules } \mathcal{E}' = \mathcal{E} \cup \{ E_5, E_6 \} \text{ where } \]
\[ (E_5) A \quad \text{all}(A) \quad (E_6) \text{all}(Fs) = \text{one-of}(\text{all}(Fs)) \]
\[ \text{Definition 4.12 (Satisfaction call rule)} \text{ The satisfaction rules are a set of rules } \mathcal{S}' = \mathcal{S} \cup \{ S_{10} \} \text{ where } \]
\[ (S_{10}) \text{sat}(\text{all}(Fs), Cs) \quad \text{sat}(\text{one-of}(Fs'), \text{all}(Fs), Fs'', Cs) = \text{sat}(\text{one-of}(Fs', Fs''), Cs) \]
Having revisited van Deursen and Klint’s rewriting rules we need to redefine the semantics function (Def. 4.13).
\[ \text{Definition 4.13 (Revisited vDFD Semantics)} \text{ The revised semantics function of van Deursen and Klint is defined as: } \mathcal{L}' : \text{vDFD} \rightarrow \text{O}(\text{O}(P)) \text{ where } \mathcal{L}'(\text{vdfg}) = \mathcal{S}'(\mathcal{E}N(\text{vdfg})) \]
### 5 Comparison
We will now compare the redefined semantics of vDFD with the one of FFD (Section 5.1) and subsequently define further comparison criteria and examine the languages with respect to those criteria (Section 5.2).
5.1 Comparative Semantics
Even with the modifications of vDFD that we proposed, when we compare their semantics (Def. 4.13) with the one of FFD (Sec. 3.2) two points of divergence appear:
- First, the semantic domains are different (see Fig. 3):
1. On the one hand, the semantics of vDFD translates a vDFD expression into another expression in disjunctive normal form which is an ordered list of ordered lists of atomic features \( O(O(P)) \).
2. On the other hand, the semantic domain of FFD is a set of sets of primitive features \( P(P(P)) \).
- Second, van Deursen and Klint’s semantics always gives preference to inclusion in terms (not in constraints) and thus behaves like a semantics based on edges rather than on nodes.
Let us examine these two points in turn. Although the semantic domains are different, they can be easily related by an abstraction function. Indeed, in [27], atomic features directly correspond to primitive features. Hence, the remaining difference is the one between the mathematical structures list and set. The order of atomic features is important in van Deursen and Klint’s semantics. For instance, the textual vDFD expression \( \text{one-of}(\text{all}(A, B), \text{all}(B, A)) \) contains two products: \( \text{all}(A, B) \) and \( \text{all}(B, A) \). In FFD’s semantics, only one product would be part of PL: \( \{A, B\} \).
Consequently, we can define an obvious abstraction function \( R \) (definition 5.1) that simply abstracts away order from van Deursen and Klint’s semantic domain and directly maps it to our semantic domain. We do not know if this notion of order between features was deliberate or not, but intuitively we consider that two products with the same features should be identical. However, ordering features could be relevant, for example, when each feature corresponds to one transformation (on code or models) [24] and these transformations do not produce the same result if they are applied in a different order.
**Definition 5.1 (Semantic Domain Abstraction \( R \))**
\[
R : O(O(P)) \rightarrow P(P(P)) \\
R(\text{all}(f_1, \ldots, f_m), \ldots, \text{all}(f_{n'}, \ldots, f_{n'})) = \\
\{\{f_1, \ldots, f_m\}, \ldots, \{f_{n'}, \ldots, f_{n'}\}\}
\]
Nevertheless, this abstraction function is not sufficient to find an equivalence between our semantics and van Deursen and Klint’s semantics. Indeed, the latter implicitly always gives preference to inclusion in terms, which is also a characteristic of edge-based semantics (as opposed to node-based semantics). For instance, contrary to a node-based semantics and classical intuition, an edge-based semantics for the FD illustrated in Fig. 4 will consider as valid the products \( \{A,B,C,D,E\} \) and \( \{A,B,C,E,F\} \). The solution to find a semantic equivalence between both semantics (Theorem 5.1) is to apply a preliminary transformation \( T \) on the FFD representation of a textual vDFD. The idea behind the transformation \( T \) is to replace each atomic feature shared by several fathers with one and-node for each incoming edge, each of these and-nodes having only one son which is the corresponding atomic feature. In our concrete notation, to obtain an edge-based semantics, we add one mandatory circle (and-node with one son) for each incoming edge of a shared feature (see Fig. 4).

**Theorem 5.1**
\[
\forall t \in vDFD : \|T(t)\| = R(L'(t))
\]
5.2 Comparison Criteria
Now that we have aligned the definitions of the two languages, we can start to compare them based on various formal criteria. Three important criteria that we have already studied for other notations [26] are expressiveness, embeddability and succinctness.
FFD and vDFD can not be compared directly with these comparison criteria. Indeed, every possible FD language defined through FFD potentially evaluates differently. Ideally, every FFD-based language should be compared with vDFD separately, which is by far out of the scope of this paper. Hence, in the following, we will introduce the criteria and, for each of them, discuss the comparison between vDFD and one or more representative members from the FD language family that we already studied. More studies could come in the future to complement these results.
5.2.1 Expressiveness
We use the formal, well established, definition of expressiveness of a language, as the part of its semantic domain that it can express.
Definition 5.2 (Expressiveness) The expressiveness of a language $L$ is the set $E(L) = \{[[D]|D \in L]\}$, also noted $\llbracket L \rrbracket$. A language $L_1$ is more expressive than a language $L_2$ if $E(L_1) \supseteq E(L_2)$. A language $L$ with semantic domain $\mathbb{M}$ (i.e. its semantics is $\llbracket \cdot \rrbracket : L \rightarrow \mathbb{M}$) is expressively complete if $E(L) = \mathbb{M}$.
The usual way to prove that a language $L_2$ is at least as expressive as $L_1$ is to provide a translation from $L_1$ to $L_2$.
Definition 5.3 (Translation) A translation is a total function $T : L_1 \rightarrow L_2$ that is correct, i.e. preserves semantics: $\llbracket T(D_1) \rrbracket = \llbracket D_1 \rrbracket$.
Given that we have the domain abstraction function $\mathfrak{R}$, we can consider that the semantic domain of vDFD are also product lines (Def. 3.5). Thus, every vDFD expresses a PL. Now, we can ask the converse question: can every PL be expressed by a vDFD? Stated otherwise: are vDFD fully expressive?
In [26], we have examined this question for several FD languages [16, 17, 12, 10, 11, 23, 22, 28]. The definitions of those languages are recalled in Table 3.
<table>
<thead>
<tr>
<th>Name</th>
<th>GT</th>
<th>NT</th>
<th>GCT</th>
<th>TCL</th>
</tr>
</thead>
<tbody>
<tr>
<td>OFT</td>
<td>TREE</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>CR</td>
</tr>
<tr>
<td>OFD</td>
<td>DAG</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>R</td>
</tr>
<tr>
<td>RDF</td>
<td>DAG</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>R</td>
</tr>
<tr>
<td>EFD</td>
<td>DAG</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>R</td>
</tr>
<tr>
<td>GPFT</td>
<td>TREE</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>CR</td>
</tr>
<tr>
<td>PFT</td>
<td>TREE</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>CR</td>
</tr>
<tr>
<td>VDFD</td>
<td>DAG</td>
<td>$\cup \cup \cup \cup \cup$</td>
<td>0</td>
<td>CR</td>
</tr>
</tbody>
</table>
Table 3. FD variants defined on top of FFD
If we ignore the graphical and textual constraints, that is, the last two parameters in FFD, we can prove formally [26] that tree languages (OFT [16], GPFT [10], PFT [11]) are not fully expressive. However, DAG languages (OFD [17], EFD [23, 22], RDF [12]) are fully expressive. More precisely, our results show that the disjunction of features cannot be expressed in OFT. In [12], Griss et al. have proposed to solve this problem by (1) adding $or$-nodes, and (2) considering FD as single-rooted DAG rather than trees. In [26], we proved that the second extension alone guarantees full expressiveness while adding $or$-nodes only does not.
In vDFD, we have $or$-nodes but we do not have DAGs, or at least just a restricted form of DAG where only the sharing of leaf nodes is allowed. Studying the expressiveness of vDFD thus requires specific treatment.
The operators that manipulate the vDFD expressions must always have at least one operand. Therefore, vDFD expressions are expressively incomplete with respect to PL, as the empty PL (i.e. the PL containing no product i.e. $\{\}$) and the base PL (i.e. the PL containing one product in which no feature has been selected i.e. $\{\}$) can not be expressed in their disjunctive normal form. If we add the vDFD textual constraints, these two products can be expressed since preference is given to the constraints. An empty PL can be expressed by: a normal form one-of $(a11(A))$ and a constraint “exclude A”, where $A$ is an atomic feature. A base PL can be expressed by: a normal form one-of $(a11(A))$ and a constraint “exclude A” where $A$ is an atomic feature.
The consequence of this result is that we now know that vDFD equipped with constraints (at least $exclude$ constraints) can be used to express any PL. This is interesting because vDFD are supported by a tool environment and so in theory all FD languages with PL semantics can also be supported by this environment, provided that forward and backward translations between vDFD and the other languages are implemented. We now discuss the practical feasibility of these translations with the two remaining criteria.
5.2.2 Embeddability
In [26], we have proposed a definition of graphical embeddability (Def. 5.4) which generalizes the definition of embeddability for context-free languages (here, simplified):
Definition 5.4 (Graphical embeddability) A graphical language $L_1$ is embeddable into $L_2$ iff there is a translation $T : L_1 \rightarrow L_2$ that is node-controlled [15]: $T$ is expressed as a set of rules of the form $D_1 \rightarrow D_2$, where $D_1$ is a diagram containing a defined node or edge $n$, and all possible connections with this node or edge. Its translation $D_2$ is a subgraph in $L_2$, plus how the existing relations should be connected to nodes of this new subgraph.
Given this definition, we only need to look at the (graph-based) abstract syntax of FD languages to study their embeddability.
All tree FD languages are clearly embeddable into vDFD. For DAG languages, this is not the case because we can have sharing of intermediate nodes in the graph. However, if we consider sublanguages that restrict the sharing to leaves, we just have to apply the linear transformation $T$ (to guarantee edge-based semantics) and from Theorem 5.1 we can directly infer that we have an embedding. Finally, vDFD are embeddable into RDF.
Embeddings are of practical relevance because they ensure that there exists a transformation from one language to the other which preserves the whole shape of the models. This way, traceability between the two models is greatly facilitated and tool interoperability is made more transparent.
Embedding results must however be completed by examining the blow-up caused by the change of notation. This is what is measured by succinctness.
### 5.2.3 Succinctness
Succinctness (Def. 5.5) actually allows to compare the size of the diagram before and after translation.
**Definition 5.5 (Succinctness)** Let \( G \) be a set of functions from \( \mathbb{N} \rightarrow \mathbb{N} \). A language \( L_1 \) is \( G \)-as succinct as \( L_2 \), noted \( L_2 \leq G(L_1) \), iff there is a translation \( T : L_1
\rightarrow L_2 \) that is within \( G \): \( \exists g \in G, \forall n \in \mathbb{N}, \forall l_1 \in L_1, |l_1| \leq n \Rightarrow |T(l_1)| \leq g(n) \). Common values for \( G \) are “identically” \( = \{n\} \), “thrice” \( = \{3n\} \), “linearly” \( = O(n) \), “cubically” \( = O(n^3) \), “exponentially” \( = O(2^n) \).
By definition, wherever there is an embedding, there also exists a linear translation. In our case, a vDFD produced from a tree-shaped FD is identically as succinct as the tree, and a vDFD produced from a restricted DAG is linearly as succinct as the latter (because intermediate nodes and edges need to be added). Also, the translation from a vDFD to an RFD is linear. In all those cases, the transformation engines do not face tractability issues. However, for turning unrestricted DAG into vDFD, we need to precompute all shared cases that vDFD will treat as independent. This will cause an exponential blow-up. In practice, this means that one will be able to apply such transformations only to small diagrams.
### 6 Related work
Beside vDFD, a few more formally defined FD languages exist and need to be compared with FFD (and also possibly between themselves) following the same methodology:
1. Batory [1] provides a translation of FD to both grammars and propositional formulae. His goal is to use off-the-shelf Logic-Truth Maintenance Systems and SAT solvers in feature modelling tools. The semantics of grammars is a set of strings, and thus order and repetition are kept. The semantics of propositional formulae is closer to our products but [1] differs in two respects: (i) decomposable features are not eliminated, and (ii) the translation of operators by an equivalence leads to (we suspect) a counter-intuitive semantics.
2. In [8, 7], Czarnecki et al. define a new FD language to account for staged configuration. They introduce *feature cardinality* (the number of times a feature can be repeated in a product) in addition to the more usual *group* cardinality. Foremost, a new semantics is proposed where the full shape of the unordered tree is important, including repetition and decomposable features. The semantics is defined in a 4-stage process where FD are translated in turn into an extended abstract syntax, a context-free grammar and an algebra. In [7], the authors provide an even richer syntax. The semantics of the latter is yet to be defined, but is intended to be similar to [8].
4. Wang et al. [29] propose a semantics of FD using ontologies. A semantic web environment is used to model and verify FD with OWL DL. The RACER reasoner is used to check inconsistencies during feature configuration. Their semantics slightly differs from ours, since (1) they omit justifications and (2) they did not eliminate auxiliary symbols. Missing justifications yield strongly counter-intuitive results, but keeping auxiliary symbols is harmless for consistency checking (but incorrect for other task) as shown in [4].
As mentioned in Sec. 1, we think that these approaches do not rank as good as ours on generality, abstraction and intuitiveness. However, a finer comparison is required in order to further justify these claims just as we have done in this paper for vDFD. This a topic for future work.
### 7 Conclusion
This work is a first step towards the comparative semantics of feature diagrams. We have recalled FFD, a generic formalization of feature diagrams. We have also recalled and revisited van Deursen and Klint’s definition of FD, which we called vDFD. We have then compared the two by applying the principles of comparative semantics. We have defined an abstraction function that relates van Deursen and Klint’s semantics to our own and then studied 3 properties of vDFD: expressiveness, embeddability and succinctness.
We can summarize the conclusions and practical implications of our investigations as follows:
- By being able to abstract vDFD’s semantics to ours and assuming that the abstracted information (the order of features) is irrelevant, we further validated the appropriateness of our semantic domain and semantic function for representing product lines. This gives us more assurance that FFD can be used as a reference semantics for implementing future tools for engineering the requirements of software product lines.
We have discovered a few minor errors in the original formalization of vDFD which can cause an automatic reasoner based on it to yield erroneous results. These findings can help the developers to improve their tool and future developers to avoid the same problems.
More fundamentally, we analyse the lack of abstraction and the presence of errors in the original formalization as a result of the bias imposed by the tool. This tends to confirm the advantages of our methodology [26, 25] where we recommend tool-independent formalization prior to the adoption or development of any supporting tool. The generic semantic framework of FFD greatly facilitates such a definition (which can boil down to filling a simple 4-entry template as shown in Table 3). From there on, the available results of comparative analyses can guide the adopters in the selection or development of their supporting tools.
We have proved that vDFD are expressively complete with respect to the semantic domain of product lines, just as some other members of the FFD family. From this, we have concluded that the vDFD language and its supporting tools can be used to model, and reason on, product lines without the fear of being limited in expressiveness. However, this conclusion has to be mitigated by the preceding conclusions concerning the lack of abstraction and presence of errors in the original tool-driven formalization.
By looking at translations, we have assessed the feasibility of transforming vDFD models into other kinds of feature models and conversely. Based on these results, developers can start writing model transformation scripts to enable tool interoperability. For example, if some reasoning facilities are not supported by tool A but they are supported by tool B, embeddability of A’s notation into B’s guarantees that a transformation from A to B is not only possible but also that B preserves the shape of the original model so that traceability between the two models is greatly facilitated. Succinctness measures the cost of the translations. Regarding translations that have vDFD as a target, we have observed that, roughly stated, the translation from tree-shaped FD are easily tractable whereas those from DAG-shaped FD are only tractable for small diagrams. Finally, vDFD have been found embeddable into RFD.
Comparative semantics should be obtained for all variants of feature diagrams, so that we know precisely what they are, what are their respective qualities: expressiveness, succinctness, embeddability, but also axiomatizability, complexity of the reasoning tasks, etc. These objective data can then serve to guide the selection of a common standard for feature diagrams. Such a standard could hopefully accelerate their adoption by industry, increase the competition between tool providers, and help to establish efficient and safe requirements and software product line engineering methods.
8 Threats to validity
The main threats to the validity of our method to compare FD languages is that it considers only the formal aspects of these languages. Hence, we are faced with the usual advantages and drawbacks of any formalization endeavour [13]. On the one hand, a mathematically defined semantics tends (i) to eliminate ambiguity in the interpretation of languages, (ii) to facilitate safe and efficient tool automation and (iii) to allow the definition of objective criteria to evaluate and compare languages. On the other hand, a formal semantics can never guarantee by itself that it captures enough and the right information about the domain being modeled.
More general language evaluation frameworks exist that take into account a wider spectrum of qualities and criteria. For example, Krogstie [18] proposes a comprehensive language and model quality framework covering such notions as domain appropriateness (i.e. is the language considered adequate to convey the relevant information on the subject domain). A complete evaluation and comparison of FD languages should also take such aspects into account. These are topics for future work which need to be addressed by empirical validation when we will have developed a concrete (user-oriented) syntax and supporting tools based on FFD. Then, we will be able to evaluate whether there is a good match between the intended (“real world”) and the formal semantic domains and functions of our language. Yet, it should be noted that even if we obtain a good assurance that there is such a match, we will still not be able to guarantee that our language will always be used to represent the relevant information about the requirements of the system at hand for no formal, nor informal, language can ever guarantee this by itself [14].
Another threat to the validity of our results is that all the formal definitions and reasoning in this paper have been carried out by humans without the assistance of tools (except text editing tools). Therefore, we cannot guarantee that human errors are completely absent from our comparative analysis.
Finally, it should be noted that we have only assessed the language defined by van Deursen and Klint from the description that is made of it in [27]. We have not assessed the tool itself, which might well have been improved to address the issues that we mention.
References
|
{"Source-Url": "https://pure.unamur.be/ws/portalfiles/portal/1016783/58154.pdf", "len_cl100k_base": 13224, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 52469, "total-output-tokens": 16240, "length": "2e13", "weborganizer": {"__label__adult": 0.0003037452697753906, "__label__art_design": 0.0005068778991699219, "__label__crime_law": 0.000247955322265625, "__label__education_jobs": 0.001148223876953125, "__label__entertainment": 7.402896881103516e-05, "__label__fashion_beauty": 0.00015079975128173828, "__label__finance_business": 0.000316619873046875, "__label__food_dining": 0.0002980232238769531, "__label__games": 0.000514984130859375, "__label__hardware": 0.0005106925964355469, "__label__health": 0.0003478527069091797, "__label__history": 0.00025725364685058594, "__label__home_hobbies": 8.380413055419922e-05, "__label__industrial": 0.00036978721618652344, "__label__literature": 0.0004515647888183594, "__label__politics": 0.0002200603485107422, "__label__religion": 0.0004220008850097656, "__label__science_tech": 0.0247344970703125, "__label__social_life": 9.92417335510254e-05, "__label__software": 0.0087127685546875, "__label__software_dev": 0.95947265625, "__label__sports_fitness": 0.0002014636993408203, "__label__transportation": 0.0004041194915771485, "__label__travel": 0.00017249584197998047}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 57442, 0.02637]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 57442, 0.44916]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 57442, 0.87071]], "google_gemma-3-12b-it_contains_pii": [[0, 1627, false], [1627, 5095, null], [5095, 10652, null], [10652, 14701, null], [14701, 18933, null], [18933, 23199, null], [23199, 26917, null], [26917, 31272, null], [31272, 35733, null], [35733, 41087, null], [41087, 46224, null], [46224, 51505, null], [51505, 57442, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1627, true], [1627, 5095, null], [5095, 10652, null], [10652, 14701, null], [14701, 18933, null], [18933, 23199, null], [23199, 26917, null], [26917, 31272, null], [31272, 35733, null], [35733, 41087, null], [41087, 46224, null], [46224, 51505, null], [51505, 57442, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 57442, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 57442, null]], "pdf_page_numbers": [[0, 1627, 1], [1627, 5095, 2], [5095, 10652, 3], [10652, 14701, 4], [14701, 18933, 5], [18933, 23199, 6], [23199, 26917, 7], [26917, 31272, 8], [31272, 35733, 9], [35733, 41087, 10], [41087, 46224, 11], [46224, 51505, 12], [51505, 57442, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 57442, 0.04478]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
f4fc4e6db185d6967f4f662a048fdb6c586ad24a
|
ISO-Standardized Requirements Activities for Very Small Entities
Philippe Saliou, Vincent Ribaud
To cite this version:
HAL Id: hal-00504338
https://hal.univ-brest.fr/hal-00504338
Submitted on 20 Jul 2010
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
ISO-Standardized Requirements Activities for Very Small Entities
Philippe Saliou and Vincent Ribaud
Université de Brest, LISyC, CS 93837, 29238 Brest Cedex, France
Université européenne de Bretagne, France
{ Philippe.Saliou@univ-brest.fr, Vincent.Ribaud@univ-brest.fr }
Abstract. The use of Software Engineering standards may promote recognized and valuable engineering practices for Very Small Entities (VSEs) but these standards do not fit the needs of VSEs. The ISO/IEC Working Group 24 (WG24) is developing the ISO/IEC 29110 standard “Lifecycle profiles for Very Small Entities”; this standard is due for approval in June 2010. A pilot project about ISO 29110 use has been established between our Software Engineering group and a 14-person company building and selling counting systems about the frequentation levels of public and private sites. The pilot project aims to help VSEs deliver the Software Requirements Specification, Test Cases and Test Procedures for a new web-based system intended to manage fleets of counting systems. As the project goes along, it appears that the 29110 set of documents was not up to the task of sustaining this VSE in its engineering activities. We supported the VSE in two ways: (i) a Training Session based on the 29110 Requirements Analysis activity, and (ii) Self-Training Packages - a set of resources intended to develop experience and skills in Requirements Identification and SW Requirement Specification (SRS). Our inspiration stems from the 15504-5 standard with a desire to provide software engineers with an exemplar set of base practices providing a definition of the tasks and activities needed to fulfil the process (e.g. requirements) outcomes. Task definition is collected on a task card. The results of this pilot study provide the VSE with a roadmap through the Requirements activity, which is compatible with the ISO/IEC 29110 standard.
Keywords: Very Small Entities, Requirements Specification, ISO/IEC 29110.
1 Introduction
Very Small Entities (VSEs) are recognized as being very important to the software economy, and produce stand-alone or integrated software components in large software systems. The use of Software Engineering standards may promote recognized and valuable engineering practices - but these standards do not fit the characteristics of VSEs. The term ‘Very Small Entity’ (VSE) was defined by the ISO/IEC JTC1/SC7 Working Group 24 (WG24) as being “an entity (enterprise, organization, department or project) having up to 25 people”. This definition has
subsequently been adopted for use in the ISO response to VSEs’ specific needs: the emerging ISO/IEC 29110 standard “Lifecycle profiles for Very Small Entities” [1]. The 29110 standard defines a group of Standardized Profiles, including the ISO/IEC IS 29110-4-1 Basic profile [2] which applies more specifically to a VSE that is involved in software development of a single application by a single project team with no special risk or situational factors.
A VSE claiming compliance with ISO/IEC IS 29110-4-1 will implement and use all the profile elements, as identified in Clause 7 of the profile specification [2]. The profile elements concerning requirements are: Project Plan Execution (PM.2) and Project Assessment and Control (PM.3) - producing the Change Request work product, and Software Requirements Analysis (SI.2) - producing work products Change Request and Requirement Specification.
This paper reports some of the conclusions reached by a pilot project the authors conducted with a 14-person VSE that builds and sells counting systems about the frequentation of private and public sites. Only 3 of the employees are software developers, and the VSE asked for assistance with software project management – mainly managing requirements and establishing a disciplined test process. Deployment Packages (DP) are expected to be particularly helpful. A DP is “a set of artefacts developed to facilitate the implementation of a set of practices, for the selected framework, within a VSE [3]”. As the project goes along, it appears that the 29110 set of documents (including DPs) was not up to the task of sustaining this VSE in its engineering activities. One idea defended here is that implementing standardized software engineering activities in a VSE requires specific and operational materials and mechanisms. We are proposing to provide VSE employees with Self-Training Packages intended to help the engineer carry out [and learn] the task.
Section 2 presents related work, and offers an overview of a SE standard for VSEs. Section 3 introduces the pilot project, presents Self-Training Packages, and evaluates the system's efficiency. We conclude with brief perspectives.
2 Related work
2.1 Requirements engineering for small software companies
In 2007, IEEE Software published a special issue on the theme “SE Challenges in Small Software Companies”. The guest editors’ introduction presents common challenges faced by large and small software development companies: “They need to manage and improve their software processes, deal with rapid technology advances, maintain their products, operate in a global software environment, and sustain their organizations through growth [4]”. Yet VSEs also have specific characteristics and needs.
J. A. Calvo-Manzano et al. [5] presented an SPI solution called MESOPYME for small and medium-size enterprises (SME). MESOPYME is based on the Action Package concept - a mechanism which assists faster and affordable SPI program implementation for SMEs. Experimentation with this package has been carried out in
the Requirements Engineering domain. The structure of an Action Package (such as the Requirements Engineering Action Package) presents similarities to our own structure of Self-Training Packages. Training is provided using the Action Package Training component. This component basically comprises four courses: software process model (CMM), the improvement method (MESOPYME), team building, and training in the process selected for improvement (e.g. Requirements Engineering). Our approach is different in that MESOPYME is a Software Process Improvement method for SMEs, whereas we aim to implement a Lifecycle Standardized Profile in VSEs.
The REDEST project [6] aimed to develop a selection of innovative Requirements Engineering methodologies to act as Best Practice Cases for 14 independent software development companies. REDEST disseminated results via a Best Practice Case Booklet [7]. Case Study 8, carried out by a VSE named SignalKomplex, aimed to experiment with the following features: introduction of a systematic RE process; a more thorough understanding of customer requirements; basic tracking of changes in requirements. The size (24 employees) and the products and services (vehicle traffic control equipment) provided are very similar to the VSE case study reported in this paper. SignalKomplex baseline project (development of a vehicle sensor card) presents similarities with the VSE project (a web-based system intended to manage fleets of counting systems). The RE approach selected by SignalKomplex was a method called PAISLEY, which is an approach whose focus couples Requirements Elicitation with the processes of the object being developed. SignalKomplex selected this approach because it was equally operable for hardware and software requirements, a key issue from the SignalKomplex point of view. As SignalKomplex reported in the REDEST Best Practice Case Book [7, p. 114], the RE solution also required input from other areas of the company, such as the sales and business departments. Combining pure, technical specifics with other inputs was mostly achieved by exploiting spreadsheet features. The ISO/IEC 29110 Basic Profile is applicable to VSEs which do not develop critical software products, and the traceability tool provided with the Deployment Package associated with requirements is a spreadsheet-based tool. Our proposal is to perform a preliminary Requirements Elicitation through the building of a Services Identification List (see Figure 1) which is also supported by spreadsheets. Keeping a powerful requirements management tool as simple as possible is a key issue for a VSE.
2.2 SE Standards for Very Small Entities
ISO initiative. Software engineering standards and methods often neglect the needs and problems of the small and medium-sized organizations which constitute a major part of the software industry. The ISO/IEC Working Group 24 (WG24) is developing the emerging ISO 29110 standard, which is a set of technical specifications and guides for use by very small software enterprises. This set is based on the concept of VSE profile [1]. The purpose of a VSE profile is to define a subset of ISO/IEC standards relevant to the VSE context - for example, selected processes and outcomes of ISO/IEC 12207 [8] and selected products of ISO/IEC 15289 [9].
taxonomy. Part 3 [11] defines the process assessment guidelines and compliance requirements needed to meet the purpose of the defined VSE profiles.
The document ISO/IEC 29110-4-1 [2] provides the specification for all the Generic Profile Group profiles. The Generic Profile Group is applicable to VSEs which do not develop critical software products [1]. The Basic Profile describes the software development of a single application by a single project team with no special risk or situational factors [2]. The ISO/IEC 29110-3-1-2 document [12] provides an implementation management and engineering guide for the Basic Profile.
The ISO 29110 Set of Documents is due for approval in June 2010. It is possible that VSEs may be intimidated by this set. Moreover, this set includes ISO standards, submitted to copyright fees. However, guides are targeted at VSEs, and should be VSE-accessible, in terms of both style and cost [1].
### 2.3 Basic Profile
**Basic Profile Processes: Objectives and Tasks Decomposition.** The Basic Profile establishes VSE characteristics, needs and suggested competencies, and uses it to define process objectives. For instance, objectives related to requirements are: the SI.O2 objective “Software requirements are defined, analyzed for correctness and testability, approved by the Customer, baselined and communicated [2, p. 7]”, the SI.O3 “[…] Consistency and traceability [of the design] to software requirements are established [2, p. 8]”, and the SI.O4 “[…] Traceability [of the software components] to the requirements and design are established [2, p. 8]”.
The Basic Profile consists of 2 processes: Project Management (PM) and Software Implementation (SI). A process is defined as “a set of interrelated or interacting activities which transforms inputs into outputs [8]”. An activity is “a set of cohesive tasks of a process [8]”. For each activity of the PM and SI processes, the Basic Profile details the tasks to be performed: role, description of the task, input and output products. For instance, the starting point of the 29110 use for requirement is the SI.2 “Software Requirements Analysis” activity, its list of tasks: SI.2.1 to SI.2.7 and the associated roles. Roles are: TL Technical Leader, WT Work Team, AN Analyst, and CUS Customer. Table I provides a tasks breakdown for the activity SI.2 [2, pp. 15].
<table>
<thead>
<tr>
<th>Task List</th>
<th>Role</th>
</tr>
</thead>
<tbody>
<tr>
<td>SI.2.1 Assign tasks to the Work Team members in accordance with their role, based on the current Project Plan.</td>
<td>TL, WT</td>
</tr>
<tr>
<td>SI.2.2 Document or update the Requirements Specification.</td>
<td>AN, CUS</td>
</tr>
<tr>
<td>SI.2.3 Verify the Requirements Specification.</td>
<td>AN</td>
</tr>
<tr>
<td>SI.2.4 Validate the Requirements Specification</td>
<td>CUS, AN</td>
</tr>
<tr>
<td>SI.2.5 Document the preliminary version of the Software User Documentation or update the present manual. *</td>
<td>AN</td>
</tr>
<tr>
<td>SI.2.6 Verify the Software User Documentation</td>
<td>AN</td>
</tr>
<tr>
<td>SI.2.7 Incorporate the Requirements Specification, and *Software User Documentation to the Software Configuration in the baseline.</td>
<td>TL</td>
</tr>
</tbody>
</table>
**Basic Profile Products.** Part 29110-4-1 provides Work product specifications, and Activity input & output specification. For instance, SI.2.1 to SI.2.7 tasks have associated output products: **Requirements Specification**, **Verification Results**, **Change Request**, **Validation Results**, and [preliminary] **Software User Documentation**.
2.4 Deployment Package
Significant help is expected from Deployment Packages (DP). C. Laporte, the editor of the ISO/IEC 29110 defines a DP as “a set of artefacts developed to facilitate the implementation of a set of practices, of the selected framework, in a VSE [3]”. The elements of a typical deployment package are: process description (activities, inputs, outputs, and roles), guide, template, checklist, example, presentation material, reference and mapping to standards and models, and list of tools [13]. Packages are designed in such a way that a VSE is able to implement its content without having to implement the entire framework at the same time.
Regarding requirements, the Deployment Package - Software Requirement Analysis [14] adds depth to the standard, providing guidance through a simplified breakdown of the SI.2 SW requirements analysis activity. The DP sums up the SI.2 activity in 4 tasks: requirement identification, requirements refinement and analysis, requirements verification and validation, requirements change management. For each of these 4 tasks, the DP describes a step-by-step method.
This DP follows the SPEM approach promoted by OMG in [15]. In this DP, the tasks required for performing SW requirements analysis are defined through textual step-by-step explanations, describing how specific fine-granular development goals are achieved, through which roles, and with which resources and results. The DP also provides several templates (including a simplification of IEEE 830 [16]) of a Software Requirement Specification Document.
Training materials and an Excel-based Traceability tool can be downloaded from the public WG24 web site http://profs.logti.etsmtl.ca/claporte/English/VSE/index.html.
3 A Pilot Project on Requirements
3.1 Overview
**Context of the VSE.** A VSE of 14 people (with 3 software engineers) requested our help in Spring 2009. This VSE designs, builds, develops and sells a counter system intended to collect and analyze frequency of public or private sites. Counting systems are based on stand-alone counter boxes (including sensors, power supply, data storage, and data exchange) and a software chain able to collect, analyze, present, and report counting data. The data set was downloaded from counters via infrared link or GSM, stored on PC and exchanged via a file transfer utility.
**The new software project.** The VSE started a complete reconstruction of its software chain in order to transform it into a web-based system called Eco-Visio, intended to host data from fleets of counting systems for each client, and able to process statistics and generate analysis reports on counting. At the end of June 2009,
the VSE hired an Information Technology graduate from our university. At the same moment in time, we initiated a pilot project intended to help the VSE implement just one part of the 29110.
The pilot project. The absence of requirement traceability and systematic testing was rapidly recognized by all stakeholders. Both authors also agreed that project management was in need of improvement, but we deliberately omitted this point. We proposed a 2-stage plan of action: 1- implementation of the “Software Requirements Analysis” Deployment Package and 2- implementation of the “Software Testing” Deployment Package. The first stage is complete, and reported on in this paper.
Deployment Package. The starting point of the 29110 use for requirement is the SI.2 “Software Requirements Analysis” activity, its list of tasks - SI.2.1 to SI.2.7 - and the associated roles. A step-by-step approach to perform the required SI.2 tasks is given in the Deployment Package - Software Requirements Analysis [14]. One VSE employee received a short training course, using the training material associated with this DP, and downloaded the Traceability Tool provided with the DP. Despite all this assistance, the VSE engineer was unable to proceed with 29110 Requirements Engineering. He therefore attended a Training Session on requirements, based on the 29110 materials. A description of this session is presented in section 3.2.
Self-training packages. During the training session, the VSE engineer – like his co-trainees – attained an initial level of proficiency in using the 29110 for Requirements Specification – yet trainees asked for further assistance and guidance. We therefore constructed a dedicated assistance approach, which is presented in section 3.3. This approach relies on Self-Training Packages - a set of resources intended to develop experience skills in SE activities, e.g. Requirements Identification and SW requirement specification.
Assessment. We built 2 groups: a control group of 9 people and a study group of 10 people performing the 29110 training. We intended to measure the efficiency of the training system by comparing requirements competencies between both groups.
3.2 Training session
Training session context. We scheduled a training week on 29110 Software Requirements Analysis in December 2009. 10 young engineers (including our VSE engineer) attended the session. The 29110 Training Session comprises a course on requirements and a case study using the DP - Software Requirement Analysis [14].
Content of the training session. The session begins with an introductory lecture on requirements, but trainees are plunged into 'doing' with the preparation of a peer-review on a requirements analysis guide. This guide is issued by an ISO-9001 major software company (at which both authors had been employed for about ten years). The SW Requirements Specification (SRS) Document is issued by the DOD-STD-2167A software development standards [17]. This guide is intended to facilitate the writing of the SRS. Peer-reviewing this guide provided trainees with initial exposure to standardized requirements management.
During the second phase of the session, trainees have to contribute to the writing of a similar guide, based only on the 29110 standard. Authors provide trainees with a preliminary version of the guide, written in a top-down manner, starting from the 12207 standard processes devoted to requirements (6.4.1 Stakeholder Requirements Definition, 7.1.2 SW Requirements Analysis) to the 29110 Basic Profile SI.2 “Software Requirements Analysis” activity. Trainees have to incorporate both the DP - Software Requirement Analysis and its step-by-step approach into the guide. Finally, trainees have to apply the enhanced guide to a 'real' SRS and update this SRS to satisfy compliance with the guide. The 'real' SRS is for eCompass - an existing system developed by the first author and former graduate students.
3.3 Towards requirements management capability
**Objectives.** Despite the path traced in the standard (including the guidance provided by the DP), some young engineers (and this is true of the VSE engineer in particular) may be unable to find their way through the managing requirements. Below, we present the step-by-step path proposed by the DP Requirement Analysis.
*Task 1. Requirements identification.* The objective is to clearly define the scope of the project and identify key requirements of the system. Steps are: (i) Collect information about the application domain; (ii) Identify project scope; (iii) Identify and capture requirements; (iv) Structure and prioritize requirements.
*Task 2. Requirements refinement and analysis.* The objective is to detail and analyze all the requirements identified. Steps are: (i) Detail requirements; (ii) Produce a prototype.
*Task 3. Requirements verification & validation.* The objective is to verify requirements and obtain validation from the customer or his representative. Steps are: (i) Clarify fuzzy requirements (verification); (ii) Review SRS (Software Requirements Specification); (iii) Validate requirements.
*Task 4. Requirements change management.* The objective is to manage requirements change in line with a process agreed upon with the customer. Steps are: (i) Track changes to requirements; (ii) Analyze impact of changes; (iii) Identify changes that are beyond the project scope; (iv) Prioritize changes.
The core of requirements gathering and specification must be performed in tasks 1 and 2. We decided to build two Self-Training Packages aimed at helping young engineers with: A - Requirements Identification and B - SW Requirements Specification. A discussion of Self-Training Packages is beyond the scope of this paper, but we will say that one objective of our research group is to provide VSEs with a training complement to the 29110 set of documents called the 'Self-Training Package'. Self-training packages are intended to be performed autonomously by VSE employees, requiring (almost) no interaction with a coach - except at the time of package delivery to the VSE.
The inspiration stems from the 15504-5 standard [19, Part 5] with a desire to provide software engineers with an exemplar model of software engineering activities together with complementary self-training material. While we are designing self-training for an SE activity (such as Requirements Analysis) and its required tasks
Fig. 1. Example of a task card.
<table>
<thead>
<tr>
<th>N°</th>
<th>Date</th>
<th>Origin</th>
<th>Roles assignment</th>
</tr>
</thead>
<tbody>
<tr>
<td>24</td>
<td></td>
<td></td>
<td>ANalyst Employee X Employee Y</td>
</tr>
<tr>
<td>Process: Software Implementation (SI)</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Activity: Software Requirements Analysis (SI.2)</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Task Title: Requirements bootstrap</td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
**WORK DESCRIPTION**
**Objectives**
The goal of this task is to collect and identify requirements using a structured and prioritized list of requirements, and to establish a synthesis of users’ needs.
Objectives are strongly related to SI.2.1 task objectives: “The objective of this activity is to clearly define the scope of the project and identify the key requirements of the system.”
**Step-by-step**
1. **Identify functional and technical needs**
- Extract users’ needs from the eCompas Statement of Work (call for tender) and the preliminary response to tender.
- Write a unique document “Needs Synthesis Document”, gathering together any elements related to a functional or technical need.
2. **Summarize required services (Services Identification List)**
- Identify, classify and sum up users’ needs through a list of high-level services required by the eCompas software.
- Each identified service (or sub-service) shall be documented with:
- Identification number (could be temporarily left blank)
- Type (Functional or Technical) and Domain (one of the five eCompas domain areas)
- Service number (hierarchical numbering inside domains)
- Actors (main users of the service)
- Summary (a very short description of the service)
- Origin (traceability to Statement of Work or Tender response)
- Link to “Need Synthesis Document” (references to corresponding paragraphs)
3. **Establish a glossary of the eCompas domain**
4. **Structure and prioritize the “Needs Synthesis Document”**
- With the help of the “Services Identification List”, rewrite a new version of the “Needs Synthesis Document” complying with the proposed hierarchy.
- Establish traceability.
- Number services with a hierarchical identification number.
5. **Perform a peer-review of an existing SW Requirements Specification**
- Prepare the review of the eCompas SRS following the instructions of the Reviewer Guide
**Resources**
- eCompas Statement of Work and Tendering answer
- SRS Writing Guide and Peer-Reviewer Guide
**Output products**
The main output product of this task is the “Needs Synthesis Document”, which will be used in the next task - “SRS writing” as a preliminary version of the Software Requirements Specification.
<table>
<thead>
<tr>
<th>Products</th>
<th>V.</th>
<th>Milestone</th>
</tr>
</thead>
<tbody>
<tr>
<td>Needs Synthesis Document</td>
<td>A, B</td>
<td>A</td>
</tr>
<tr>
<td>Services Identification List</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
**Task cards.** The description of the task is designed as a theatre scene: the scene being the reference context in which the action takes place. The scene aims for unity
of place, time and action; it is a situation in which people do [and learn], a scenario of actions, a role distribution, an area mobilizing resources and means. The different components of a scene, along with their articulation, are depicted on a task card (see an example of the Requirements bootstrap card in Figure 1).
Its main elements are:
- **Related 29110 Process / Activity**
This reference (SI / SI.2 SW Requirements Analysis in this instance) provides a smooth link to the 29110 and through the ISP to the 12207 and 15504 standards.
- **Role**
Role (here ANalyst) is a quick reference to the 29110 Role
- **Task Title and Objectives**
Similar to Process Title, Process Purpose, and Process Outcomes as defined in ISO/IEC 12207
- **Step-by-step**
A comprehensive description of the work to be done - intended to be useful as a practical guide to completion of the task.
- **Resources**
The set of resources required. This may set up the context and/or be required to perform the task. It may include online courses that are affordable to a technology transfer centre, where the cost is beyond the reach of a VSE.
- **Output products**
This is generally a 29110 Work Product, or an intermediary product required to build this Work Product. A hidden goal is to initiate and develop a strategy of capitalizing on the activity, and transferring knowledge to VSE employees.
**Self-training.** For the self-training reported in this section, we built two task cards: Requirements bootstrap and SRS writing. Self-training is then performed as a case study: a set of resources is used to set up the context, and engineers have to perform tasks as they should do in a 'real' situation.
Our study group of 10 engineers performed both Self-Training Packages in January and February 2010. The first Training Package was intended to offer an initial level of maturity in ISO/IEC 29110 Requirements Management (through the study of SI.2 activity and a review of a ‘real’ WP11 Requirements Specification) and the second Training Package aims to perform a Requirements Analysis on a ‘real’ case. Very little interaction with the coach (the first author) occurred. Each engineer completed each package in roughly a week.
### 3.4 Process assessment
The Part ISO 29110-3 [11] is an Assessment Guide applicable to all VSE profiles. It is compatible with ISO/IEC 15504-2 and ISO/IEC 15504-3 [18]. As specified in [11], “a VSE-specific Process Assessment Model (PAM) can be derived by selecting only the assessment indicators in the 15504-5 Exemplar PAM, relevant to the corresponding process outcomes defined in ISO/IEC 29110-4.”
For instance, in the Basic Profile, the SI Process defines 7 objectives and SI.02 is the only one relevant to requirements: “Software requirements are defined, analyzed
for correctness and testability, approved by the Customer, baselined and communicated.” [2] Then, reducing the 7.1.2 Software Requirements Analysis Process outcomes (15504 ENG.4) corresponding to the SI.02 objective will give:
1) requirements allocated to the software elements of the system and their interfaces are defined
2) software requirements are analyzed for correctness and testability
6) software requirements are approved and updated as needed
8) software requirements are baselined and communicated to all affected parties
If we apply the profile to the Base Practices of ENG.4, we can remove Base Practices that do not contribute to the selected outcomes (1, 2, 6, and 8). Hence, the list of profiled Base Practices of the ENG.4 Process is reduced to ENG.4.BP1: Specification of software requirements; ENG.4.BP3: Development of criteria for software testing; ENG.4.BP5: Evaluation and updating of software requirements; ENG.4.BP6: Communication of software requirements.
Clause 5 of ISO/IEC 15504-2 [19, Part 2] defines a measurement framework for the assessment of process capability, defined on a six point ordinal scale. Within this measurement framework, the measure of capability is based upon a set of process attributes (PA). Each attribute defines a particular aspect of process capability. The extent of process attribute achievement is characterized on a defined rating scale. Clause 6 of the 15504-5 [19, Part 5] presents the process capability indicators related to the process attributes associated with capability levels 1 to 5. Process capability indicators are the means of achieving the capabilities addressed by the considered process attributes.
ISO/IEC 15504 separates processes and capability levels in two dimensions whilst CMMI handles them in a single dimension. However, it should be pointed out that separate process and capability dimensions may discourage a VSE regarding process assessment. For instance, capability level 2 indicators applied to requirements relate to defining, planning, monitoring and adjusting the performance of the SI.2 Requirements Analysis activity and to identifying, defining, documenting, reviewing and adjusting each work product related to this activity. In our opinion, this kind of assessment will neither determine whether a VSE achieves the Basic Profile, nor help the VSE to improve its Requirements Engineering implementation. We would like VSE employees to understand the importance of the assessment principle, whilst performing regular self-assessment on a reduced set of major objectives. Such an objective should be formulated with a sentence in the “To be able to …” format. This proposal, applied to Requirements Engineering, is detailed in the following section.
3.5 Evaluation of the system efficiency
Since 2008, local employers in Brest have significantly increased take-up of a work placement system called “Contrat de professionnalisation” (professionalization contract) over a period of 12 months. During these 12 months, fully-paid employees attend university for approximately 250 hours of technical training (about 40 days over the whole year). This academic year, 19 young software engineers who graduated from our university in June 2009 after a 4-year programme in Computer
Science or Information Technology, are benefiting from this system. As mentioned above, 10 people chose the 29110 training; the other 9 chose to attend a UML-based analysis course. Thus, we have a population divided into 2 groups: a control group of 9 people and a study group of 10 people performing the 29110 training reported in previous sections. The UML-based analysis course and 29110 training were performed within a period of about 3 weeks between September 2009 and February 2010. Hence, we sought to measure the efficiency of the training system by comparing requirements competencies between both groups.
We defined three major objectives in requirements:
- To mobilize specification methods and tools in a real project
- To work under the control of a standardized baseline
- To produce a Software Requirement Specification (including traceability)
We decided to assess each objective on a self-assessment scale ranging from 0 to 5:
- 0 - 0: Do not know anything about the topic;
- 1 - Fog: has only a vague idea;
- 2 - Notion: has a general idea but is unable to achieve the objective;
- 3 - User: is able to achieve the objective with the help of an experienced colleague and has an initial experience of its achievement;
- 4 - Autonomous: is able to work autonomously;
- 5 - Expert: is able to act as an expert to modify, enrich or develop the knowledge area on which the objective focuses.
We asked each of the 19 engineers to self-assess themselves three times:
1 – At job start: at the beginning of their (first) job, young engineers complete the first self-assessment; all participants did this in September 2009;
2 – At 6 months: after 6 months of employment, young engineers complete the second self-assessment; this was done in March 2010 for the whole group;
3 – At 9 months: in order to assess how software engineering practices are maturing, young engineers complete a third self-assessment in June 2010.
Table 2 presents average self-assessment scores for both groups.
<table>
<thead>
<tr>
<th>Objectives</th>
<th>Control Group</th>
<th>Study Group</th>
</tr>
</thead>
<tbody>
<tr>
<td>SI.2.1 To mobilize specification methods and tools in a real project</td>
<td>1.56 2.11 2.11</td>
<td>1.50 2.70 2.80</td>
</tr>
<tr>
<td>SI.2.2 To work under the control of a standardized baseline</td>
<td>0.78 1.44 1.44</td>
<td>0.70 2.60 2.60</td>
</tr>
<tr>
<td>SI.2.3 To produce a Software Requirement Specification (including traceability)</td>
<td>2.33 2.67 2.89</td>
<td>1.40 2.80 3.20</td>
</tr>
</tbody>
</table>
No statistical comparison was performed. Requirements training took place for both groups. However, there is evidence that self-assessment scores are increasing more significantly for the study group than for the control group.
Table 3 presents score frequency distribution for both sets.
<table>
<thead>
<tr>
<th></th>
<th>September 2009</th>
<th>Control Group</th>
<th>Study Group</th>
</tr>
</thead>
<tbody>
<tr>
<td>SL.2.1</td>
<td>2 3 1 3 0 0 1.56</td>
<td>1 4 4 1 0 0 1.5</td>
<td></td>
</tr>
<tr>
<td>SL.2.2</td>
<td>3 5 1 0 0 0 0.78</td>
<td>5 3 2 0 0 0 0.7</td>
<td></td>
</tr>
<tr>
<td>SL.2.3</td>
<td>0 1 4 4 0 0 2.33</td>
<td>2 4 2 2 0 0 1.4</td>
<td></td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th></th>
<th>March 2010</th>
<th>Control Group</th>
<th>Study Group</th>
</tr>
</thead>
<tbody>
<tr>
<td>Objectives</td>
<td></td>
<td>1 1 4 2 1 0 2.11</td>
<td>0 0 4 5 1 0 2.7</td>
</tr>
<tr>
<td>SL.2.2</td>
<td></td>
<td>2 2 4 2 1 0 1.44</td>
<td>0 0 5 4 1 0 2.6</td>
</tr>
<tr>
<td>SL.2.3</td>
<td></td>
<td>0 1 2 5 1 0 2.67</td>
<td>0 0 3 6 1 0 2.8</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th></th>
<th>June 2010</th>
<th>Control Group</th>
<th>Study Group</th>
</tr>
</thead>
<tbody>
<tr>
<td>SL.2.1</td>
<td></td>
<td>1 1 4 2 1 0 2.11</td>
<td>0 0 3 6 1 0 2.8</td>
</tr>
<tr>
<td>SL.2.2</td>
<td></td>
<td>2 2 4 1 0 0 1.44</td>
<td>0 0 5 4 1 0 2.6</td>
</tr>
<tr>
<td>SL.2.3</td>
<td></td>
<td>0 0 3 4 2 0 2.89</td>
<td>0 0 3 5 2 0 3.2</td>
</tr>
</tbody>
</table>
Empirical evaluation. The VSE engineer reported that he was now ready to apply the SL.2. SW Requirements Analysis on the Eco-Visio project. As the specifications were soon established by another VSE colleague, he only reviewed and rewrote some sections of the existing Requirements Specification, in order to establish compliance with the template provided in the DP - Software Requirement Analysis [14]. Once updated, the WP11 Requirement Specification [Validated] served as an input to the SL.5 SW Integration and Tests. The system has been deployed since April 2010 and load testing and application optimization should be soon complete. Defects have to be corrected through a short cycle of SI activities.
As an empirical measure of its satisfaction, the VSE asked for a similar approach for the SL.5 SW Integration and Tests. In particular, the VSE wants guidance and support in establishing a disciplined Change Request Process. A Self-Training Package is under construction, and we should start with the “Software Testing” DP [19] as a basis for the whole Training Package. Probably because Tests occur in many SE activities, this DP is organized so that it spans PM and SI tasks, raising a wealth of new questions.
5 Conclusion and future work
We reported on a system that was intended to help a VSE with requirements management. Two points are discussed (1) a Training Session based on 29110 materials; (2) Self-Training Packages intended to perform requirements definition and analysis through a step-by-step approach. We used self-assessment to establish a comparison between a control group of 9 people attending a UML-based analysis course and our 10-person study group performing our proposition. Self-assessment scores are increasing more significantly for the study group than for the reference set. The concept of the Self-Training Package seems to extend to other processes such as design or testing. Further work is required to determine how far the scope of this concept and its main tool - task cards - can be extended.
References
|
{"Source-Url": "https://hal.univ-brest.fr/hal-00504338/document", "len_cl100k_base": 8580, "olmocr-version": "0.1.50", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 33885, "total-output-tokens": 10235, "length": "2e13", "weborganizer": {"__label__adult": 0.00036835670471191406, "__label__art_design": 0.0005679130554199219, "__label__crime_law": 0.0003345012664794922, "__label__education_jobs": 0.0137176513671875, "__label__entertainment": 8.380413055419922e-05, "__label__fashion_beauty": 0.00022280216217041016, "__label__finance_business": 0.0013217926025390625, "__label__food_dining": 0.0003905296325683594, "__label__games": 0.0006303787231445312, "__label__hardware": 0.0006780624389648438, "__label__health": 0.00035881996154785156, "__label__history": 0.0003116130828857422, "__label__home_hobbies": 0.00013327598571777344, "__label__industrial": 0.0005793571472167969, "__label__literature": 0.0003654956817626953, "__label__politics": 0.00023508071899414065, "__label__religion": 0.00044083595275878906, "__label__science_tech": 0.01491546630859375, "__label__social_life": 0.0001609325408935547, "__label__software": 0.01108551025390625, "__label__software_dev": 0.9521484375, "__label__sports_fitness": 0.00027680397033691406, "__label__transportation": 0.00043892860412597656, "__label__travel": 0.00021696090698242188}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 40803, 0.04505]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 40803, 0.21474]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 40803, 0.89165]], "google_gemma-3-12b-it_contains_pii": [[0, 915, false], [915, 3455, null], [3455, 6525, null], [6525, 10145, null], [10145, 13168, null], [13168, 16206, null], [16206, 19348, null], [19348, 22635, null], [22635, 25526, null], [25526, 28337, null], [28337, 31614, null], [31614, 34318, null], [34318, 37593, null], [37593, 40803, null]], "google_gemma-3-12b-it_is_public_document": [[0, 915, true], [915, 3455, null], [3455, 6525, null], [6525, 10145, null], [10145, 13168, null], [13168, 16206, null], [16206, 19348, null], [19348, 22635, null], [22635, 25526, null], [25526, 28337, null], [28337, 31614, null], [31614, 34318, null], [34318, 37593, null], [37593, 40803, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 40803, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 40803, null]], "pdf_page_numbers": [[0, 915, 1], [915, 3455, 2], [3455, 6525, 3], [6525, 10145, 4], [10145, 13168, 5], [13168, 16206, 6], [16206, 19348, 7], [19348, 22635, 8], [22635, 25526, 9], [25526, 28337, 10], [28337, 31614, 11], [31614, 34318, 12], [34318, 37593, 13], [37593, 40803, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 40803, 0.19905]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
0cc99fabe69585b90c0b0356decf43cf9974bb20
|
Solving Constrained Horn Clauses over ADTs by Finite Model Finding
Yurii Kostyukov
y.kostyukov@2015.spbu.ru
Saint Petersburg State University
Russia
Dmitry Mordvinov
dmitry.mordvinov@jetbrains.com
Saint Petersburg State University
Russia
Grigory Fedyukovich
grigory@cs.fsu.edu
Florida State University
Tallahassee, USA
Abstract
First-order logic is a natural way of expressing the properties of computation, traditionally used in various program logics for expressing the correctness properties and certificates. Subsequently, modern methods in the automated inference of program invariants progress towards the construction of first-order definable invariants. Although the first-order representations are very expressive for some theories, they fail to express many interesting properties of algebraic data types (ADTs).
Thus we propose to represent program invariants regularly with tree automata. We show how to automatically infer such regular invariants of ADT-manipulating programs using finite model finders. We have implemented our approach and evaluated it against the state-of-art engines for the invariant inference in first-order logic for ADT-manipulating programs. Our evaluation shows that automata-based representation of invariants is more practical than the one based on first-order logic since invariants are capable of expressing more complex properties of the computation and their automatic construction is less expensive.
1 Introduction
Specifying and proving properties of programs is traditionally achieved with the help of first-order logic (FOL). It is widely used in various techniques for verification, from Floyd-Hoare logic [20, 24] to constrained Horn clauses (CHC) [6] and refinement types [50]. The language of FOL allows to describe the desired properties precisely and make the verification technology accessible to the end user. Similarly, verification proofs, such as inductive invariants, procedure summaries, or ranking functions are produced and returned to the user also in FOL, thus facilitating the explainability of a program and its behaviors.
Algebraic Data Types (ADT) enjoy a variety of decision procedures [4, 39, 42, 47] and Craig interpolation algorithms [25, 28], but still many practical tasks cannot be solved by state-of-the-art solvers for Satisfiability Modulo Theory (SMT) such as Z3, CVC4 [2] and Princess [45].
With the recent growth of the use of SMT solvers, it is often tempting to formulate verification conditions using the combination of different theories. Specifically in the ADT case, verification conditions could be expressed using the combination of ADT and the theory of Equality and Uninterpreted Functions (EUF). Although SMT solvers claim to support EUF, in reality the proof search process often hangs back attempting to conduct structural induction and discovering helper lemmas [51].
In this paper, we introduce a new automata-based class of representations of inductive invariants. The basic idea is to find a finite model of the verification condition and convert this model into a finite automaton. The resulting representations of invariants are regular in a sense that they can "scan" the ADT term to the unbounded depth, which cannot be reached by the representations by first-order formulas (called elementary throughout the paper).
Our contribution is the demonstration that regular invariants of ADT-manipulating programs could be constructed from finite models of the verification condition. Intuitively, the invariant generation problem can be reduced to the satisfiability problem of a formula constructed from the FOL-encoding of the program with pre- and post-conditions where uninterpreted symbols are used instead of ADT constructors. Although becoming an over-approximation of the original verification condition, it can be handled by existing finite model finders, such as MACE4 [38], Finder [46], Paradox [12], or CVC4 [44]. If satisfiable, the detected model is used to construct regular solutions of the original problem.
We have implemented a tool called ReCiSe for automated inference of the regular invariants of ADT-manipulating programs and evaluated it against state-of-art inductive invariant generators, namely Z3/SPACER [30] and ELDARICA [26] — the only CHC solvers supporting ADT, to the best of our knowledge. It managed to find non-trivial invariants of various problems, including the inhabitation checking for STLC.
2 Motivating Example
In this section we demonstrate one verification problem which is intractable for state-of-art solvers but is naturally handled by our approach. Basically, this case study demonstrates the expressiveness of regular representations in comparison to FOL-based ones. We believe that this case may
be interesting from theoretical point of view for type theory experts.
Consider the following program sketch:
```
Var ::= ...
Type ::= arrow(Type, Type)
| ... <primitive types> ...
Expr ::= var(Var) | abs(Var, Expr)
| app(Expr, Expr)
Env ::= empty | cons(Var, Type, Env)
```
fun typeCheck(Γ: Env, e: Expr, t: Type): bool =
match Γ, e, t with
| cons(v, t, _), var(v), t -> true
| cons(_, _, Γ'), var(_, _), _ ->
typeCheck(Γ', e, t)
| _, abs(v, e'), arrow(t, u) ->
typeCheck(cons(v, t, Γ), e', u)
| _, app(e1, e2), _ ->
∀u : Type, typeCheck(Γ, e2, u) ∧
typeCheck(Γ, e1, arrow(u, t))
| _ -> false
end
assert ¬(∃e : Expr, ∀a, b : Type,
typeCheck(empty, e, arrow(arrow(a, b), a)))
```
This program checks that there is no closed simply typed lambda calculus (STLC) term inhabiting the type \((a \rightarrow b) \rightarrow a\). It is well-known that this type is uninhabited, so this program is safe.
Suppose, we wish to infer an inductive invariant of typeCheck proving the validity of the assertion. Using, for example, the weakest liberal precondition calculus [16], we may obtain the verification conditions \(VC\) of this program, presented in the Figure 1.
\(VC\) is satisfiable modulo theory of algebraic data types \(Var, Type, Expr\) and \(Env\), if and only if the program is safe. Moreover, the interpretations of \(typeCheck\) satisfying \(VC\) are the inductive invariants of the source program.
The strongest inductive invariant of the program is the least fixed point of a step operator, which is the set of all tuples \((Γ, e, t)\), such that \(Γ \vdash e : t\) in STLC typing rules.
One needs a very expressive assertion language, supporting type theory-specific reasoning, to define this invariant. For example, this way is usually used in interactive theorem proving, when the STLC typing is defined in a sufficiently powerful type system of a proof assistant [10].
Instead, our goal is to verify this program automatically, using the generic-purpose tools. So it is natural to look for coarser invariants. But does this program have weaker inductive invariants than \(((Γ, e, t) | Γ \vdash e : t)\), still proving the validity of the assertion? It turns out that the answer is yes, but it is not a simple task to compose this invariant. One surprisingly simple invariant \(I\) (see below) was discovered by our tool \(RegInv\) based on finite model finding engine in CVC4 (see Sec. 4) completely automatically in less than a second.
Every STLC type can be viewed as propositional formula, where type variables correspond to atomic variables, and arrows correspond to implications. Given type \(t\), its propositional interpretation \(M\) is a map from atomic variables of \(t\) to \(\{0, 1\}\). We write \(M \models t\) to denote that the propositional interpretation \(M\) satisfies the propositional formula corresponding to type \(t\). We also say that type \(u\) is in \(Γ ∈ Env\), if \(Γ = cons(\ldots, cons(\cdot, u, \ldots)) \ldots\).
Consider the following relation:
\[ I ≡ \{(Γ, e, t) \mid \text{for all } M, \text{either } M \models t, \text{or} \] \[ M \not\models u \text{ for some type } u \text{ in } Γ\}. \]
In the following, we explain the idea behind this invariant.
From the Curry-Howard correspondence we know that the STLC type is inhabited if and only if the propositional formula defined by the type is a tautology of intuitionistic logic. But every intuitionistic tautology is the tautology of classical logic as well. So if the type \(t\) is inhabited, then \(M \models t\) for all propositional interpretations \(M\). Thus, clearly, \(I\) over-approximates the strongest inductive invariant of the program. Also, in our example \((a \rightarrow b) \rightarrow a\) is not a propositional tautology, and \(Γ\) is empty, so interpreting \(typeCheck\) with \(I\) satisfies the last clause of \(VC\).
One could attempt to interpret \(typeCheck\) with relation
\[ J ≡ \{(Γ, e, t) \mid t \text{ corresponds to a classical tautology}\}, \]
but it fails because \(J\) is not inductive: for instance, it violates the first clause. Conversely, \(I\) satisfies all clauses. The first clause is satisfied, which could be checked by case splitting: if \(M \models t\), then \((Γ, e, t) ∈ I\), otherwise \(M \not\models t\), but \(t\) is in \(Γ\)
\[ \text{It should be noted that we did not find an answer to this question in the existing literature.} \]
by the premise of the clause, so again \((\Gamma, e, t) \in \mathcal{I}\). Using
the similar dichotomy, it is straightforward to check that \(\mathcal{I}\)
satisfies the rest clauses.
The invariant \(\mathcal{I}\) could be represented by a tree automaton.
First, there is an automaton, which determines if \(t\) is satis-
fiyed by a given interpretation \(M\). This automaton has two
states 0 and 1, and after scanning the constructor \(arrow(\_ , \_ )\)
it transitions from a pair of states (1, 0) to state 0, and to
state 1 from the rest of pairs of states, modeling the logical
implication. Starting from states corresponding to the inter-
pretation of the leaves of \(t\) by \(M\), the automaton stops in state
1 after scanning \(t\) iff \(M \models t\).
Similarly, we can build the automaton which tests if there
is a type \(u\) in \(\Gamma\), such that \(M \not\models u\). For this purpose, we need
two states \(\in\) and \(\notin\). Scanning the empty constructor, the
automaton transits to \(\notin\) state. Scanning the \(cons\) constructor,
the automaton transits to \(\in\) state if it is already in \(\in\) state, or
it is in \(\notin\) state, and the above automaton stops in 1 for the
second argument of cons.
Formally, we have \((\Gamma, e, t) \mid A\) accepts \((\Gamma, t) \models \mathcal{I}\) for the
tree automaton \(A = \{(0, 1, \in , \notin), \Sigma, \{(\in, 0), (\notin, 1), (\in, 1)\}, \Delta\}
with the following transition relation \(\Delta\):
\[
\begin{align*}
Var_i & \mapsto v & \text{arrow}(1, 0) & \mapsto 0 \\
PrimType_i & \mapsto 0 & \text{arrow}(1, 1) & \mapsto 1 \\
var(v) & \mapsto e & \text{empty} & \mapsto \notin \\
abs(a, e) & \mapsto e & \text{cons}(a, 1, \notin) & \mapsto \notin \\
app(v, e) & \mapsto e & \text{cons}(a, e, v, e) & \mapsto \in .
\end{align*}
\]
In fact, if we replace the type \((a \rightarrow b) \rightarrow a\) in the program
assertion by the arbitrary type \(t\), which is not a tautology of
classical logic, \(\mathcal{I}\) still would prove the safety of an assertion.
We have checked this experimentally. Note that \(\mathcal{I}\) is simple
enough to completely ignore the type-checked term \(e\).
One natural question regarding these invariants is what
if we try an uninhabited type which corresponds to a clas-
sical tautology, but not to an intuitionistic one? One such
example is the Pierce’s law \(t \equiv ((a \rightarrow b) \rightarrow a) \rightarrow a\).
In this case \(\mathcal{I}\) is too weak to prove that \(t\) is uninhabited. Our
tool diverged for this input, which might mean that there
is no regular inductive invariant, which over-approximates
the denotational semantics of typeCheck and still proves
the validity of the assertion. Although, that still should be
investigated more thoroughly.
Thus, tree automata seem to be a balanced representa-
tion for ADT program invariants: they can express complex
program properties and their inference can be efficiently
automated. Regular invariants are formally defined in Sec. 3
and their automated inference with finite-model finders is
described in Sec. 4. Our implementation and it’s comparison
against state-of-art on preexisted benchmarks is represented
in Sec. 5.
3 Preliminaries
Many-sorted logic. A many-sorted first-order signature
with equality is a tuple \(\Sigma = (\Sigma_S, \Sigma_F, \Sigma_P)\), where \(\Sigma_S\) is a set
of sorts, \(\Sigma_F\) is a set of function symbols, \(\Sigma_P\) is a set of predic-
ate symbols, among which there is a distinguished equality
symbol \(\equiv\) for each sort \(\sigma\). Each function symbol \(f \in \Sigma_F\) has
associated with it an arity of the form \(\sigma_1 \times \cdot \cdot \cdot \times \sigma_n \rightarrow \sigma\),
where \(\sigma_1, \ldots, \sigma_n, \sigma \in \Sigma_S\), and each predicate symbol \(p \in \Sigma_P\)
has associated with it an arity of the form \(\sigma_1 \times \cdot \cdot \cdot \times \sigma_n\). Variables
are associated with a sort as well. We use the usual
definition of first-order terms with sort \(\sigma\), ground terms,
formulas, and sentences.
A many-sorted structure \(M\) for a signature \(\Sigma\) consists of
non-empty domains \(|M|_\sigma\) for each sort \(\sigma \in \Sigma_S\). For each
function symbol \(f\) with arity \(\sigma_1 \times \cdot \cdot \cdot \times \sigma_n \rightarrow \sigma\), it associates
an interpretation \(M(f) : |M|_{\sigma_1} \times \cdot \cdot \cdot \times |M|_{\sigma_n} \rightarrow |M|_{\sigma}\),
and for each predicate symbol \(p\) with arity \(\sigma_1 \times \cdot \cdot \cdot \times \sigma_n\) it
associated an interpretation \(M(p) \subseteq |M|_{\sigma_1} \times \cdot \cdot \cdot \times |M|_{\sigma_n}\). For
each ground term \(\tau\) with sort \(\sigma\), we define an interpretation
\(M[\tau] \in |M|_{\sigma}\) in a natural way. We call structure finite if the
domain of every sort is finite; otherwise, we call it infinite.
We assume the usual definition of a satisfaction of a sen-
tence \(\varphi\) by \(M\), denoted \(M \models \varphi\). If \(\varphi\) is a formula, then we write
\(\varphi(x_1, \ldots, x_n)\) to emphasize that all free variables of \(\varphi\)
are among \(\{x_1, \ldots, x_n\}\). In this case, we denote the satis-
ifiability \(M \models \forall \varphi\) by \(M\) with free variables evalu-
ated to elements of \(\mathcal{A}_{a_1}, \ldots, a_n\) of the appropriate domains. The
universal closure of a formula \(\varphi(x_1, \ldots, x_n)\), denoted \(\forall \varphi\), is
the sentence \(\forall x_1 \cdot \cdot \cdot \forall x_n. \varphi\). If \(\varphi\) has free variables, we define
\(M \models \varphi\) to mean \(M \models \forall \varphi\).
A Herbrand universe for a sort \(\sigma\) is a set of ground terms
with sort \(\sigma\). If the Herbrand universe for a sort \(\sigma\) is infinite,
we call \(\sigma\) an infinite sort. We say that \(\mathcal{H}\) is the Herbrand
structure \(\mathcal{H}\) for a signature \(\Sigma\) if it associates the Herbrand universe
\(|\mathcal{H}|_\sigma\) to each sort \(\sigma\) of \(\Sigma\) as the domain and interprets
every function symbol with itself, i.e., \(\mathcal{H}(f)(t_1, \ldots, t_n) =
f(t_1, \ldots, t_n)\) for all ground terms \(t_i\) with the appropriate sort.
Thus, there is a family of Herbrand structures for one signa-
ture \(\Sigma\) with identical domains and interpretations of function
symbols, but with various interpretations of predicate sym-
bols. Every Herbrand structure \(\mathcal{H}\) interprets each ground
term \(t\) with itself, i.e., \(|\mathcal{H}|_\sigma = t\).
Assertion language. An algebraic data type (ADT) is a tu-
ple \(\langle C, \sigma, \_ \rangle\), where \(\sigma\) is a sort and \(C\) is a set of uninterpreted
function symbols (called constructors), such that each \(f \in C\)
has a sort \(\sigma_1 \times \cdot \cdot \cdot \times \sigma_n \rightarrow \sigma\) for some sorts \(\sigma_1, \ldots, \sigma_n\).
In what follows, we fix a set of ADTs \(\langle C_1, \sigma_1, \ldots, C_n, \sigma_n \rangle\)
with \(\sigma_i \neq \sigma_j\) and \(C_i \cap C_j = \emptyset\) for \(i \neq j\). We define the
signature\(^2\) \(\Sigma = \langle \Sigma_S, \Sigma_F, \Sigma_P \rangle\), where \(\Sigma_S = \{c_1, \ldots, c_n\}\), \(\Sigma_F =
C_1 \cup \cdot \cdot \cdot \cup C_n\), and \(\Sigma_P = \{\equiv_1, \ldots, \equiv_n\}\). For brevity, we omit
\(^2\)For simplicity, we omit the selectors and testers from the signature because
they do not increase the expressiveness of the assertion language.
the sorts from the equality symbols. We refer to the first-order language defined by \( \Sigma \) to as an assertion language \( \mathcal{L} \).
As \( \Sigma \) has no predicate symbols except the equality symbols (which have fixed interpretations within every structure), there is a unique Herbrand structure \( \mathcal{H} \) for \( \Sigma \). We say that a sentence (a formula) \( \varphi \) in the assertion language is satisfiable modulo theory of ADTs \( \langle C_1, \sigma_1, \ldots, C_n, \sigma_n \rangle \), iff \( \mathcal{H} \models \varphi \).
**Constrained Horn Clauses.** Let \( \mathcal{R} = \{ p_1, \ldots, p_n \} \) be a finite set of predicate symbols with sorts from \( \Sigma \), which we refer to as uninterpreted symbols.
**Definition 1.** A constrained Horn clause (CHC) \( \mathcal{C} \) is a \( \Sigma \cup \mathcal{R} \)-formula of the form:
\[
\varphi \land R_1(t_1) \land \ldots \land R_m(t_m) \rightarrow H
\]
where \( \varphi \) is a formula in the assertion language, called a constraint; \( R_i \in \mathcal{R}; t_i \) is a tuple of terms; and \( H \), called a head, is either \( \bot \), or an atomic formula \( R(t) \) for some \( R \in \mathcal{R} \).
If \( H = \bot \), we say that \( \mathcal{C} \) is a query clause, otherwise we call \( \mathcal{C} \) a definite clause. The premise of the implication \( \varphi \land R_1(t_1) \land \ldots \land R_m(t_m) \rightarrow H \) is called a body of \( \mathcal{C} \).
A CHC system \( \mathcal{S} \) is a finite set of CHCs.
**Satisfiability of CHCs.** Let \( \overline{X} = \langle x_1, \ldots, x_n \rangle \) be a tuple of relations, such that if \( P_i \) has sort \( \sigma_i \times \ldots \times \sigma_m \), then \( X_i \subseteq [H]_{\sigma_i} \times \ldots \times [H]_{\sigma_m} \). To simplify the notation, we denote the expansion \( \mathcal{H}\{ P_1 \rightarrow x_1, \ldots, P_n \rightarrow x_n \} \) by \( \langle H, X_1, \ldots, X_n \rangle \), or simply by \( \langle \mathcal{H}, \overline{X} \rangle \).
Let \( \mathcal{S} \) be a system of CHCs. We say that \( \mathcal{S} \) is satisfiable modulo theory of ADTs, if there exists a tuple of relations \( \overline{X} \) such that \( \langle \mathcal{H}, \overline{X} \rangle \models \mathcal{C} \) for all \( \mathcal{C} \in \mathcal{S} \).
For example, the system of CHCs from the Example 1 is satisfied by interpreting even with the relation
\[
X = \{ (Z, S(S(Z))), (S(S(S(Z)))) \} = \{ S^n(Z) \mid n \geq 0 \}
\]
It is well known that constrained Horn clauses provide a first-order match for lots of program logics, including Floyd-Hoare logic for imperative programs and refinement types for high-order functional programs. So, we assume that for every recursive program over ADTs there is a system of CHCs, such that the program is safe iff the system is satisfiable. In the rest of the article, we identify programs with their verification conditions expressed as systems of CHCs.
**Definability.** A representation class is a function \( \mathcal{C} \) mapping every tuple \( \langle x_1, \ldots, x_n \rangle \in \Sigma^n \) to some class of languages \( \mathcal{C}(\sigma_1, \ldots, \sigma_n) \subseteq 2^{[M]_{\sigma_1} \times \ldots \times [M]_{\sigma_n}} \). We say that a relation \( X \subseteq [M]_{\sigma_1} \times \ldots \times [M]_{\sigma_n} \) is definable in a representation class \( \mathcal{C} \) if \( X \in \mathcal{C}(\sigma_1, \ldots, \sigma_n) \). We say that a Herbrand structure \( \mathcal{H} \) is definable in \( \mathcal{C} \) (or \( \mathcal{C} \)-definable) if for every predicate symbol \( p \in \Sigma_P \) with arity \( \sigma_1 \times \ldots \times \sigma_n \), interpretation \( \mathcal{H}[\{ p \}] \) belongs to \( \mathcal{C}(\sigma_1, \ldots, \sigma_n) \).
**Finite Tree Automata.** In order to define regular representations, we introduce deterministic finite tree automata (DFTA). Let \( \Sigma = \langle \mathcal{S}, \Sigma_P, \cdot \rangle \) be fixed many-sorted signature.
**Definition 1 (cf. [13]).** A deterministic finite tree \( n \)-automaton over \( \Sigma_F \) is a quadruple \( \langle S, \Sigma_F, S^F, \Delta \rangle \), where \( S \) is a finite set of states, \( S^F \subseteq S^n \) is a set of final states, \( \Delta \) is a transition relation with rules of the form:
\[
f(s_1, \ldots, s_m) \rightarrow s,
\]
where \( f \in \Sigma_F \) are \( n \) and \( s, s_1, \ldots, s_m \in S \), and there are no two rules in \( \Delta \) with the same left-hand side.
**Definition 2.** A tuple of ground terms \( \langle t_1, \ldots, t_n \rangle \) is accepted by \( n \)-automaton \( A = \langle S, \Sigma_F, S^F, \Delta \rangle \) iff \( \langle A[t_1], \ldots, A[t_n] \rangle \in S^F \).
\[
A\{ f(t_1, \ldots, t_m) \} \equiv \begin{cases}
\bot, & \text{if } f(A[t_1], \ldots, A[t_m]) \rightarrow s \in \Delta, \\
\bot, & \text{otherwise.}
\end{cases}
\]
**Example 1 (Even).** For example, consider the following Peano integers datatype: \( \text{Nat} := Z : \text{Nat} \mid S : \text{Nat} \rightarrow \text{Nat} \), and a CHC-system:
\[
even(x) \leftarrow x = Z \]
\[
even(x) \leftarrow x = S(S(y)) \land \text{even}(y)
\]
\[
\bot \leftarrow \text{even}(x) \land \text{even}(S(x))
\]
The only possible interpretation of \text{even} satisfying these CHCs is a relation \( \{ S^{2n}(Z) \mid n \geq 0 \} \), which is not expressible in the first-order language of the Nat datatype.
However, the solution could be represented by the automaton \( A = \{ \{ s_0, s_1 \}, \Sigma_F, (s_0), \Delta \} \) which moves to state \( s_0 \) for \( Z \) and flips the state from \( s_0 \) to \( s_1 \) and vice versa for \( S \). The alphabet is simply \( \Sigma_F = \{ Z, S() \} \). The set of transition rules \( \Delta \) can be represented as:
\[
\begin{align*}
Z \rightarrow s_0 \\
S \rightarrow s_1
\end{align*}
\]
**Regular Herbrand Models** Let \( \mathcal{H} \) be a Herbrand structure for a signature \( \langle \Sigma, F, \cdot \rangle \). We say that \( n \)-automaton \( A \) over \( \Sigma_F \) represents a relation \( X \subseteq [H]_{\sigma_1} \times \ldots \times [H]_{\sigma_n} \) iff
\[
X = \{ (a_1, \ldots, a_n) \mid (a_1, \ldots, a_n) \text{ is accepted by } A, a_i \in [H][\sigma_i] \}.
\]
If there is a DFTA representing \( X \), we call \( X \) regular. We denote the class of regular relations by \( \text{Reg} \). A structure \( \mathcal{H} \) is regular if it is \( \text{Reg} \)-definable.
**4 Automated Inference of Regular Invariants**
In this section, we demonstrate an approach to obtaining regular models of CHCs over ADTs using a finite model finder, e.g., [12, 38, 44, 46]. The main outline is shown in Figure 2.
The algorithm works in four steps. Given a system of constrained Horn clauses, we first rewrite it into a formula over uninterpreted function symbols by eliminating all disequalities from the clause bodies. Then we reduce the satisfiability modulo theory of ADTs to satisfiability modulo EUF and apply a finite model finder to construct a finite model of the
4.1 Translation to EUF
Recall that by definition, we call the system of CHCs over ADTs satisfiable if every clause is satisfied in some expansion of the Herbrand structure. The main insight is that this satisfiability problem can be reduced to checking the satisfiability of a formula over uninterpreted symbols in a usual first-order sense.
Informally, given a system of CHCs, we obtain another system by the replacement of all ADT constructors in all CHCs with uninterpreted function symbols. Thus we allow the interpretations of constructors to violate the ADT axioms (distinctiveness, injectivity, exhaustiveness, etc.). This system with uninterpreted symbols is either satisfiable or unsatisfiable in the usual first-order sense. If it is satisfiable, then every clause is satisfied by some structure \( \mathcal{M} \). We could use this structure \( \mathcal{M} \) to recover the interpretations of uninterpreted symbols in the Herbrand structure \( \mathcal{H} \) which satisfy the original system over \( \mathcal{H} \).
For instance, for the system of CHCs in the even example, we check the satisfiability of the following formula:
\[
\forall x. (x = Z \rightarrow even(x)) \land \\
\forall x, y. (x = S(y) \land even(y) \rightarrow even(x)) \land \\
\forall x, y. (even(x) \land even(y) \land y = S(x) \rightarrow \bot)
\]
The formula is satisfied by the following finite model \( \mathcal{M} \):
\[
\begin{align*}
\mathcal{M}_Nat &= \{0, 1\} \\
\mathcal{M}(even) &= \{0\} \\
\mathcal{M}(S)(x) &= 1 - x
\end{align*}
\]
4.2 Finite Models To Tree Tuples Automata
A procedure for constructing tree tuples automata (and, hence, regular models) from finite models follows immediately from the construction of an isomorphism between finite models and tree automata [35].
Given a finite structure \( \mathcal{M} \), we construct an automaton \( A_\mathcal{P} = (|\mathcal{M}|, \Sigma_F, \mathcal{M}(P), \tau) \) for every predicate symbol \( P \in \Sigma_F \). A shared set of transitions \( \tau \) is defined as follows: for each \( f \in \Sigma_F \) with arity \( \sigma_1 \times \ldots \times \sigma_n \rightarrow \sigma \), for each \( x_i \in |\mathcal{M}|_{\sigma_i}, \tau(f(x_1, \ldots, x_n)) = M(f)(x_1, \ldots, x_n) \).
Thus, for the even example we have \( A_{even} \) isomorphic to one introduced in Example 1.

Theorem 2. For the constructed automaton \( A_\mathcal{P} = (S, \Sigma_F, S_F, \tau), \mathcal{L}(A_\mathcal{P}) = \{\langle \mathcal{M}[t_1], \ldots, \mathcal{M}[t_n] \rangle \in \mathcal{M}(P)\} \).
Proof. The proof is straightforward from the fact that \( A_\mathcal{P} \) reflects checking the satisfiability in \( \mathcal{M} \). \( \square \)
In practice, this means that CHCs over ADTs could be automatically solved by finite model finders, such as MAC4 [38], FINDER [46], PARADOX [12] or CVC4 in a special mode [44]: if a finite model (in the usual first-order sense) is found, then there exists a regular Herbrand model of the CHC system. In Sec. 5 we evaluate an implemented tool with the finite model finding engine in CVC4 as a backend against state-of-art CHC solvers.
4.3 Herbrand Models Without Equality
With the correspondence between finite models and tree automata in hand, it remains to show that the Herbrand model induced by the constructed tree automaton is a model of the original CHC system. In this subsection we show that it is straightforward when the system has no disequality constraints, but otherwise some additional steps should be done.
First, let us assume that the signature \( \Sigma \) of the assertion language does not have the equality symbol. Then there are no predicate symbols at all, and thus we may assume that every constraint in every CHC is \( \top \). For instance, the above example could be rewritten to:
\[
even(Z) \leftarrow \top \land \even(S(x)) \leftarrow \even(x) \land even(S(x))
\]
Lemma 3. Suppose that a CHC system \( S \) over uninterpreted symbols \( \mathcal{R} = \{P_1, \ldots, P_k\} \) with no constraints is satisfied by some first-order structure \( \mathcal{M} \), i.e., \( \mathcal{M} \models C \) for all \( C \in S \). Let \( X_i = \{\langle \mathcal{M}[t_1], \ldots, \mathcal{M}[t_n] \rangle \in \mathcal{M}(P_i)\} \).
Then \( \langle \mathcal{H}, X_1, \ldots, X_k \rangle \) is the Herbrand model of \( S \).
Proof. As clause bodies have no constraints, each CHC is of the form \( C \equiv R_1(t_1) \wedge \ldots \wedge R_m(t_m) \rightarrow H \). Then by definition \( \langle \mathcal{H}, X_1, \ldots, X_k \rangle \models C \iff \mathcal{M} \models C \), so every clause in \( S \) is satisfied by \( \langle \mathcal{H}, X_1, \ldots, X_k \rangle \). \( \square \)
For the above example, we put \( X \defeq \{t \mid \mathcal{M}[t] = 0\} = \{S^{2n}(Z) \mid n \geq 0\} \), indeed satisfying the system.
4.4 Herbrand Models With Equality
In the presence of the equality symbol, which has the predefined semantics, a finite model finder searches for a model in a completely free domain, thus, breaking the regular model. Consider the system consisting of the only CHC
\[
Z \neq S(Z) \rightarrow \bot.
\]
This system is unsatisfiable because $\mathcal{H} \models Z \neq S(Z)$. But in a usual first-order sense, i.e., if we treat $Z$ and $S$ as uninterpreted functions, this CHC is satisfiable, e.g., as follows:
$$|M|_{\text{nat}} = \{0\}$$
In general, every clause with a disequality constraint in the premise may be satisfied by falsifying its premise. It suffices to make the disequality false by picking a sort with the cardinality $1$.
We propose the following way of attacking this problem. For every ADT $(C, \sigma)$, we introduce a fresh uninterpreted symbol $\text{diseq}_\sigma$ and define $\mathcal{R}' \equiv \mathcal{R} \cup \{\text{diseq}_\sigma \mid \sigma \in \Sigma_S\}$.
Below we present the process of constructing another system of CHCs $S'$ over $\mathcal{R}'$. Without loss of generality, we may assume that the constraint of each clause $C \in \mathcal{S}$ is in the Negation Normal Form (NNF). Let $C'$ be a clause with every literal of the form $(t = u)$ in the constraint (which we refer to as disequality constraints) substituted with the atomic formula $\text{diseq}_\sigma(t, u)$ for every clause $C \in \mathcal{S}$, we add $C'$ into $S'$. Finally, for every ADT $(C, \sigma)$, we add the following rules for $\text{diseq}_\sigma$ to $S'$:
- For all distinct $c, c'$ of sort $\sigma$:
$$t \rightarrow \text{diseq}_\sigma(c(x), c'(x'))$$
- For all constructors $c$ of sort $\sigma$, all $i$, and $x$ and $y$ of sort $\sigma'$:
$$\text{diseq}_\sigma(x, y) \rightarrow \text{diseq}_\sigma(c(\ldots, x, \ldots, c(\ldots, y, \ldots)))$$
Let $D_\sigma \overset{\text{def}}{=} \{(x, y) \in |H|_\sigma \mid x \neq y\}$ for each sort $\sigma$ in $\Sigma_S$.
It is well-known that the universal CHCs admit the least model, which is the denotational semantics of the program modeled by the CHCs, i.e., the least fixed point of the step operator. Thus, the following fact is trivial.
**Lemma 4.** The rules of $\text{diseq}_\sigma$ have the least model over $\mathcal{H}$, which interprets $\text{diseq}_\sigma$ by the relation $D_\sigma$.
As a corollary of this lemma, we state the following fact.
**Lemma 5.** For a CHC system $S$, let $S'$ be a system with the disequality constraints. Then, if $\langle \mathcal{H}, X_1, \ldots, X_k, Y_1, \ldots, Y_n \rangle \models S'$, then $\langle \mathcal{H}, X_1, \ldots, X_k, D_{\sigma_1}, \ldots, D_{\sigma_n} \rangle \models S'$ (here $Y_i$ and $D_{\sigma_i}$ interpret the diseq$_{\sigma_i}$ predicate symbol).
**Example 2.** For $S = \{Z \neq S(Z) \rightarrow \bot\}$ we get the following system of CHCs:
$$t \rightarrow \text{diseq}_{\text{Nat}}(Z, S(x))$$
$$t \rightarrow \text{diseq}_{\text{Nat}}(S(x), Z)$$
$$\text{diseq}_{\text{Nat}}(x, y) \rightarrow \text{diseq}_{\text{Nat}}(S(x), S(y))$$
$$\text{diseq}_{\text{Nat}}(Z, S(Z)) \rightarrow \bot.$$
Recall that $S$ is satisfiable in a usual first-order sense, but unsatisfiable in $\mathcal{H}$. But $S'$ is unsatisfiable in a first-order sense since the query clause is derivable from the first rule, which solves our problem. In our workflow, we search for finite models of $S'$ instead of $S$, and then act as in the equality-free case. Finally, we end up with the following theorem:
**Theorem 6.** Let $S$ be CHC system and $S'$ be CHC system with the disequality constraints. If there is a finite model of $S'$ over EUF, then there is a regular Herbrand model of $S$.
**Proof.** Without loss of generality, we may assume that each clause $C \in S'$ is of the form (otherwise we rewrite the constraint into DNF, split it into different clauses and eliminate all the equality atoms by the unification and substitution):
$$C \equiv y_1 \neq t_1 \land \ldots \land y_k \neq t_k \land R_1(\bar{x_1}) \land \ldots \land R_m(\overline{x_m}) \rightarrow H.$$
In $S'$, this clause becomes $C' \equiv \\text{diseq}(y_1, t_1)\land \ldots \land \text{diseq}(y_k, t_k) \land R_1(\bar{x_1}) \land \ldots \land R_m(\overline{x_m}) \rightarrow H$.
So, each clause in $S'$ has no constraint (rules of diseq have no constraints as well), and by Lemma 3 there is a model $\langle \mathcal{H}, X_1, \ldots, X_k, Y_1, \ldots, Y_n \rangle$ of every $C' \in S'$. Then, by Lemma 5 we have $\langle \mathcal{H}, X_1, \ldots, X_k, D_{\sigma_1}, \ldots, D_{\sigma_n} \rangle \models C'$. But $\langle \mathcal{H}, X_1, \ldots, X_k, D_{\sigma_1}, \ldots, D_{\sigma_n} \rangle \vdash [C'] \equiv \langle \mathcal{H}, X_1, \ldots, X_k \rangle \models C$, thus giving us $\langle \mathcal{H}, X_1, \ldots, X_k \rangle \models C$ for every $C \in S$. $\square$
**On finite model existence for CHCs with the disequality constraints.** There is an interesting observation about finite models and disequality constraints. It can be (straightforwardly) shown that if ADT of sort $\sigma$ has infinitely many terms, then the CHC
$$\text{diseq}_\sigma(x, x) \rightarrow \bot$$
is satisfied only by infinite structure, i.e., if we force the interpretations of diseq to omit the pairs of equal terms, then such system has no finite models. For comparison, if we force diseq being false in just one tuple, the finite model may exist. For example, the query clause $Q$ over the Nat datatype with
$$Q \equiv \text{diseq}_{\text{Nat}}(Z, Z) \rightarrow \bot$$
is satisfiable in a finite model
$$|M|_{\text{Nat}} = \{0, 1\}, M(Z) = 0, M(S)(\ast) = 1,$$
$$M(\text{diseq}_{\text{Nat}}) = \{(0, 1), (1, 0), (1, 1)\}.$$
Intuitively, if for proving the satisfiability of CHCs we need to assume the disequality of a large number of ground terms, the chance of finite model existence is getting lower. In practice, this means that tests containing disequalities constraints have fewer chances to be satisfiable in some finite models. This is confirmed by our experimental evaluation (see Sec. 5).
**5 Implementation and Experiments**
We have evaluated our tool inferring regular invariants against state-of-art: Z3 and Eldarica on preexisted benchmarks.
**Implementation.** We have implemented a regular invariant inference tool called ReqInv based on the preprocessing approach presented in Sec. 4 and an off-the-shelf finite-model finder [44]. ReqInv accepts input clauses in SMTLIB2 [3] format and TIP extension with define-fun-rec construction [11]. It takes conditions with a property and checks if the property holds, returning safe inductive invariant if it does. Thus ReqInv can be run as a backend solver for functional program verifiers, such as MoCHi [29] and RCAML [49].
RegInv can handle existentially-quantified Horn clauses. We run CVC4\(^3\) as a backend multi sort finite-model finder to find regular models (see Sec.3).
**Benchmarks.** We empirically evaluate RegInv against state-of-art CHC solvers on benchmarks taken from works of Yang et al. [51], De Angelis et al. [14] and "Tons of inductive problems" (TIP) benchmark set by Claessen et al. [11].
We have modified the benchmarks of Yang et al. [51] and De Angelis et al. [14] by replacing all non-ADT sorts with ADTs (e.g., the Int sort in LIA with Peano integers using the Nat ADT) and adding CHC-definitions for non-ADT operations (for example, the addition was replaced by the addition of Peano numbers expressed as two CHCs). Thus, the aggregated testset\(^6\) consists of 60 CHC systems over binary trees, queues, lists, and Peano numbers.
The test set was divided into two problem subsets, which we call PositiveEq and Diseq. PositiveEq is a set of CHC-systems with equality only occurring positively in clause bodies. Diseq set includes tests with occurrences of disequality constraints in clause bodies, substituted with diseq atoms, which is a sound transformation (see Sec. 4.4).
From TIP [11], we filtered out 377 problems with only ADT sorts (the remaining problems use the combinations of ADTs with other theories), converted all of them into CHCs, replaced the disequalities with the diseq atoms as described in Sec. 4.4 and replaced all free sorts declared via (declare-sort ... 0) with the Nat datatype. Thus TIP benchmark consists of 377 inductive ADT problems over lists, queues, regular expressions, and Peano integers originally generated from functional programs.
**Compared tools.** The evaluation was performed against Z3/Spacer [15] with Spacer engine [31] and Eldarica [26] — state-of-art Horn-solvers which construct elementary models and support ADTs. Spacer works with elementary model representations. It incorporates standard decision, interpolation and quantifier elimination techniques for ADT [5]. Spacer is based on property-directed reachability (PDR) technique, which alternates counter-example finding and safe invariant construction subtasks by propagating reachability facts and pushing partial safety lemmas in a property-directed way.
Eldarica builds models with size constraints, which count the total number of constructor occurrences in them. It relies on their own Princess SMT solver [45], which offers decision and interpolation procedures for ADT with size constraints by reduction to combination of AUF and LIA [25].
Finally, as a baseline we include the CVC4 induction solver [43] into the comparison (denoted CVC4-Ind\(^7\)), which leverages a number of techniques for inductive reasoning in SMT.
\(^3\)Using cvc4 \text{--finite-model-find}
\(^6\)The link is omitted for the anonymity.
\(^7\)Using cvc4 \text{--quant-ind --quant-cf --conjecture-gen --conjecture-gen-per-round=3 --full-saturate-quant}
---
**Results.** The results are summarized in Table 1. On the PositiveEq and Diseq benchmark set, Spacer solved 7 problems and for the rest, it ended with 8 UNKNOWN results and 45 timeouts. Eldarica solved 2 problems (which were also solved by Spacer) with 58 timeouts. RegInv found 31 regular solutions, one counterexample and had 28 timeouts. Most of the solved problems are from PositiveEq test.
---
**Figure 3.** Comparison of engines performance. Each point in a plot represents a pair of the run times (sec x sec) of RegInv for Regular construction (x-axis) and a competitor for (Size x Elem) construction (y-axis). Timeouts are placed on the inner dashed lines, crashes are on the outer dashed lines.
**Table 1.** Results of experiments on three ADT problem sets. Number in each cell stands for the amount of correct results within 300-seconds time limit. RegInv was used for regular model construction, Spacer was used for elementary model construction and Eldarica was used for building elementary models with size constraints.
faster than other tools. On Figure 3, some unsafe benchmarks were handled faster by CVC4-Ind and Spacer. This is possibly due to a more effective procedure of quantifier instantiation in CVC4-Ind and a more balanced tradeoff between the invariant inference and the counterexample search in the PDR core of Spacer.
Other experiments. We have tried 23 hand-written programs related to the type theory (recall Sec. 2), questioning the inhabitability of different STLC types, typability of STLC terms, and programs modeling different term-rewriting systems. All these benchmarks were intractable for all the solvers, except the finite model finder. For that reason, we omit the detailed statistics. We have also tried to run another finite model finders (for example, MACE4) as a backend, but they have shown worse results than CVC4.
Discussion. Clearly, finite model finding did much better on benchmarks from Yang et al. [51], De Angelis et al. [14] and our own experiments. This is due to two reasons: the expressiveness of tree automata for representing the invariants and the efficiency of Regular's backend CVC4-f engine. More importantly, Spacer and Eldarica diverged more often because of inexpressiveness of their FOL-based languages. Within the limits of their invariant representations, they perform smoothly.
On TIP benchmarks Eldarica solved more testcases than Regular, but the analysis of the testcases solved only by Eldarica has shown, that all such tests define the Peano ordering, easily handled by Eldarica by the reduction to LIA. On testcases solved by both engines Regular was faster in average. Still, lots of interesting test cases in the TIP set obtained from proof assistants are currently beyond the abilities of state-of-art engines under comparison.
From this evaluation we conclude that tree automata are very promising for automated verification of ADT-manipulating programs: they often allow to express complex properties of recursive computation, and can be efficiently inferred by the existing engines.
6 Related Work
Language classes considered in this work have already been studied in the literature. Although these were separate works from different subfields of computer science.
Finite models and tree automata. A classic book on automated model building Cafera et al. [7] gives a generous overview of finitely representable models and their features, like decision procedures and closure properties. Also, some results for tree automata and their extensions are accumulated in Comon et al. [13]. There is also an ongoing research on extensions of regular tree languages, which still enjoy nice decidability and closure properties [8, 9, 17, 21, 27, 33].
A number of tools, like MACE4 [38], FInDER [46], PARADOX [12] and CVC4 [44] are used to find finite models of
first-order formulas. Most of them implement a classic DPLL-like search with propagating assignments. CVC4, in addition, uses conflict analysis to accelerate the search. They were applied to various verification tasks [34] and even infinite models construction [40]. Yet we are unaware of applying finite model finders to inference of invariants of ADT-manipulating programs.
Recently, Hauvebourg et al. [23] proposed a regular abstract interpretation framework for invariant generation for high-order functional programs over ADTs. Authors derive a type system where each type is a regular language and use CEGAR to infer regular invariants. Their procedure is much more complex because they support high-order reasoning which is not the goal of this paper, comparing ADT-invariant representation. Targeting first-order functions over ADT only we obtain a more straightforward invariant inference procedure by using effective finite-model finders. Moreover, this work makes clear the gap between different invariant representations and their expressivity and aims not to advertise regular invariants themselves but to overcome mental inertia towards elementary invariant representations.
Herbrand model representations. There is a line of work studying different computable representations of Herbrand models [18, 19, 22, 48], which can be fruitful to study to find out new ADT invariant representations. Even though tree automata enjoy lots of effective properties, they are limited in their expressive power, so a few of their extensions were widely studied by various researchers in the automated model building field [7]. A survey on computational representations of Herbrand models, their properties, expressive power, correspondences and decision procedures can be found in [36, 37].
ADT solving. There is a plenty of proposed quantifier elimination algorithms and decision procedures for the first-order ADT fragment [4, 39, 41, 42, 47] and for an extension of ADT with constraints on term sizes [52]. Some works discuss the Craig interpolation of ADT constraints [25, 28]. Such techniques are being incorporated by various SMT solvers, like Z3 [15], CVC4 [2] and Princess [45].
Some work on automated induction for ADT was proposed. Support for inductive proofs exists in deductive verifiers, such as Dafny [32] and SMT solvers [43]. The technique in CVC4 is deeply integrated in the SMT level — it implements Skolemization with inductive strengthening and term enumeration to find adequate subgoal. De Angelis et al. [14] introduces a technique for eliminating ADTs from the CHC-system by transforming it to CHC-system over integers and booleans. Recently, Yang et al. [51] applied a method based on Syntax-Guided Synthesis [1] to leverage induction by generating supporting lemmas based on failed proof subgoals and user-specified templates.
7 Conclusion
We have demonstrated that tree automata are very promising for representing the invariants of computation over ADTs, as they allow to express properties of the unbound depth. On the downside, tree automata cannot express the relations between different variables.
Using the correspondence between finite models and tree automata, we were able to use the finite model finders for automated inference of regular inductive invariants. We have bypassed the problem of disequality constraints in the verification conditions and implemented a tool which automatically infers the regular invariants of ADT-manipulating programs. This tool is competitive with the state-of-art CHC solvers Z3/Spacer and Eldarica. Using this tool, we have managed to detect interesting invariants of various inductive problems, including the non-trivial invariant of the inhabitation checking for STLC.
References
10
|
{"Source-Url": "https://www.sci.unich.it/hcvs21/papers/HCVS_2021_paper_6.pdf", "len_cl100k_base": 12525, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 48080, "total-output-tokens": 16712, "length": "2e13", "weborganizer": {"__label__adult": 0.0004351139068603515, "__label__art_design": 0.0005626678466796875, "__label__crime_law": 0.0005197525024414062, "__label__education_jobs": 0.0015249252319335938, "__label__entertainment": 0.00013172626495361328, "__label__fashion_beauty": 0.0002440214157104492, "__label__finance_business": 0.00031685829162597656, "__label__food_dining": 0.0006527900695800781, "__label__games": 0.0011005401611328125, "__label__hardware": 0.0012826919555664062, "__label__health": 0.00095367431640625, "__label__history": 0.00046944618225097656, "__label__home_hobbies": 0.00020515918731689453, "__label__industrial": 0.000823974609375, "__label__literature": 0.0006546974182128906, "__label__politics": 0.0004887580871582031, "__label__religion": 0.000812530517578125, "__label__science_tech": 0.1815185546875, "__label__social_life": 0.0001480579376220703, "__label__software": 0.00795745849609375, "__label__software_dev": 0.79736328125, "__label__sports_fitness": 0.0004072189331054687, "__label__transportation": 0.001068115234375, "__label__travel": 0.00026226043701171875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 55619, 0.02008]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 55619, 0.31392]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 55619, 0.81635]], "google_gemma-3-12b-it_contains_pii": [[0, 4761, false], [4761, 9208, null], [9208, 16705, null], [16705, 23741, null], [23741, 28938, null], [28938, 35429, null], [35429, 39423, null], [39423, 42233, null], [42233, 48549, null], [48549, 55619, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4761, true], [4761, 9208, null], [9208, 16705, null], [16705, 23741, null], [23741, 28938, null], [28938, 35429, null], [35429, 39423, null], [39423, 42233, null], [42233, 48549, null], [48549, 55619, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 55619, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 55619, null]], "pdf_page_numbers": [[0, 4761, 1], [4761, 9208, 2], [9208, 16705, 3], [16705, 23741, 4], [23741, 28938, 5], [28938, 35429, 6], [35429, 39423, 7], [39423, 42233, 8], [42233, 48549, 9], [48549, 55619, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 55619, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-27
|
2024-11-27
|
889b956f750695e40600299a877ee4960e718e14
|
[REMOVED]
|
{"Source-Url": "https://hal.laas.fr/hal-02263832/document", "len_cl100k_base": 13678, "olmocr-version": "0.1.53", "pdf-total-pages": 18, "total-fallback-pages": 0, "total-input-tokens": 64777, "total-output-tokens": 17374, "length": "2e13", "weborganizer": {"__label__adult": 0.0004031658172607422, "__label__art_design": 0.0007033348083496094, "__label__crime_law": 0.0005083084106445312, "__label__education_jobs": 0.0022449493408203125, "__label__entertainment": 0.00014150142669677734, "__label__fashion_beauty": 0.00022804737091064453, "__label__finance_business": 0.0005116462707519531, "__label__food_dining": 0.0005273818969726562, "__label__games": 0.0010442733764648438, "__label__hardware": 0.0017251968383789062, "__label__health": 0.0010976791381835938, "__label__history": 0.0006551742553710938, "__label__home_hobbies": 0.0002357959747314453, "__label__industrial": 0.0012083053588867188, "__label__literature": 0.0006289482116699219, "__label__politics": 0.0005106925964355469, "__label__religion": 0.0007853507995605469, "__label__science_tech": 0.4326171875, "__label__social_life": 0.0001628398895263672, "__label__software": 0.00939178466796875, "__label__software_dev": 0.54248046875, "__label__sports_fitness": 0.00036978721618652344, "__label__transportation": 0.001434326171875, "__label__travel": 0.0002758502960205078}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 55460, 0.05967]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 55460, 0.31069]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 55460, 0.85587]], "google_gemma-3-12b-it_contains_pii": [[0, 1150, false], [1150, 3683, null], [3683, 6910, null], [6910, 10338, null], [10338, 13520, null], [13520, 17273, null], [17273, 20843, null], [20843, 25195, null], [25195, 27495, null], [27495, 31231, null], [31231, 34808, null], [34808, 38771, null], [38771, 40808, null], [40808, 43892, null], [43892, 46208, null], [46208, 49297, null], [49297, 52394, null], [52394, 55460, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1150, true], [1150, 3683, null], [3683, 6910, null], [6910, 10338, null], [10338, 13520, null], [13520, 17273, null], [17273, 20843, null], [20843, 25195, null], [25195, 27495, null], [27495, 31231, null], [31231, 34808, null], [34808, 38771, null], [38771, 40808, null], [40808, 43892, null], [43892, 46208, null], [46208, 49297, null], [49297, 52394, null], [52394, 55460, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 55460, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 55460, null]], "pdf_page_numbers": [[0, 1150, 1], [1150, 3683, 2], [3683, 6910, 3], [6910, 10338, 4], [10338, 13520, 5], [13520, 17273, 6], [17273, 20843, 7], [20843, 25195, 8], [25195, 27495, 9], [27495, 31231, 10], [31231, 34808, 11], [34808, 38771, 12], [38771, 40808, 13], [40808, 43892, 14], [43892, 46208, 15], [46208, 49297, 16], [49297, 52394, 17], [52394, 55460, 18]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 55460, 0.09459]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
a2370116b926e87541d5461de8ea72126dd4c977
|
Pitfalls and Best Practices in Algorithm Configuration
Katharina Eggensperger
Marius Lindauer
Frank Hutter
Institut für Informatik, Albert-Ludwigs-Universität Freiburg,
Georges-Köhler-Allee 74, 79110 Freiburg, Germany
Abstract
Good parameter settings are crucial to achieve high performance in many areas of artificial intelligence (AI), such as propositional satisfiability solving, AI planning, scheduling, and machine learning (in particular deep learning). Automated algorithm configuration methods have recently received much attention in the AI community since they replace tedious, irreproducible and error-prone manual parameter tuning and can lead to new state-of-the-art performance. However, practical applications of algorithm configuration are prone to several (often subtle) pitfalls in the experimental design that can render the procedure ineffective. We identify several common issues and propose best practices for avoiding them. As one possibility for automatically handling as many of these as possible, we also propose a tool called GenericWrapper4AC.
1. Introduction
To obtain peak performance of an algorithm, it is often necessary to tune its parameters. The AI community has recently developed automated methods for the resulting algorithm configuration (AC) problem to replace tedious, irreproducible and error-prone manual parameter tuning. Some example applications, for which automated AC procedures led to new state-of-the-art performance, include satisfiability solving (Hutter, Babić, Hoos, & Hu, 2007a; Hutter et al., 2017), maximum satisfiability (Ansòtegui, Gabás, Malitsky, & Sellmann, 2016), scheduling (Chiarandini, Fawcett, & Hoos, 2008), mixed integer programming (Hutter, Hoos, & Leyton-Brown, 2010a; López-Ibáñez & Stützle, 2014), evolutionary algorithms (Bezerra, López-Ibáñez, & Stützle, 2016), answer set solving (Gebser et al., 2011), AI planning (Vallati, Fawcett, Gerevini, Hoos, & Saetti, 2013) and machine learning (Thornton, Hutter, Hoos, & Leyton-Brown, 2013; Feurer, Springenberg, & Hutter, 2015).
Although the usability of AC systems improved over the years (e.g., SpySMAC, Falkner, Lindauer, & Hutter, 2015), we still often observe fundamental issues in the design and execution of experiments with algorithm configuration methods by both experts and new users. The goals of this work are therefore to:
- highlight the many pitfalls we have encountered in AC experiments (run by ourselves and others);
- present best practices to avoid most of these pitfalls; and
©2019 AI Access Foundation. All rights reserved.
propose a unified interface between an AC system and the algorithm it optimizes (the so-called target algorithm) that directly implements best practices related to properly measuring the target algorithm’s performance with different parameter settings.
Providing recommendations and best practices on how to empirically evaluate algorithms and avoid pitfalls is a topic of interest cutting across all of artificial intelligence, including, e.g., evolutionary optimization (Weise, Chiong, & Tang, 2012), algorithms for NP-complete problems (Gent et al., 1997), and reinforcement learning (Henderson et al., 2018) to mention only a few. Running and comparing implementations of algorithms is the most commonly used approach to understand the behaviour of the underlying method (McGeoch, 1987). There is a rich literature on how to best conduct such empirical studies (Hooker, 1995; Gent et al., 1997; Howe & Dahlman, 2002; McGeoch, 2002, 2012), and for some journals abiding by such guidelines is even mandatory in order to publish research (Dorigo, 2016; Laguna, 2017). Research in AC depends even more on proper empirical methodology than the rest of artificial intelligence, since AC systems need to automatically evaluate the empirical performance of different algorithm variants in their inner loop in order to find configurations with better performance. Nevertheless, many of the underlying characteristics of empirical evaluations still remain the same as for other domains, and our guidelines thus share many characteristics with existing guidelines and extend them to the setting faced in AC.
The structure of this work is as follows. First, we provide a brief overview of AC, including some guidelines for new users, such as why and when to use AC, and how to set up effective AC experiments (Section 2). Afterwards, we describe common pitfalls in using AC systems and recommendations on how to avoid them. We first discuss pitfalls concerning the interface between AC systems and target algorithms (Section 3), followed by pitfalls regarding over-tuning (Section 4). Throughout, we illustrate pitfalls by AC experiments on propositional satisfiability solvers (Biere, Heule, van Maaren, & Walsh, 2009) as a prototypical AC example, but insights directly transfer to other AC problems. From our own experiences, we provide further general recommendations for effective configuration in Section 5. We end by presenting a package to provide an interface between AC systems and target algorithms that aims to improve the reliability, reproducibility and robustness of AC experiments (Section 6).
2. Background: Algorithm Configuration
The algorithm configuration problem can be briefly described as follows: given an algorithm \( A \) to be optimized (the so-called target algorithm) with parameter configuration space \( \Theta \), a set of instances \( \Pi \), and a cost metric \( c : \Theta \times \Pi \to \mathbb{R} \), find a configuration \( \theta^* \in \Theta \) that minimizes the cost metric \( c \) across the instances in \( \Pi \):
\[
\theta^* \in \arg \min_{\theta \in \Theta} \sum_{\pi \in \Pi} c(\theta, \pi).
\]
A concrete example for this algorithm configuration problem would be to find a parameter setting $\theta \in \Theta$ of a solver $\mathcal{A}$ for the propositional satisfiability problem (SAT) (such as *glucose*, Audemard & Simon, 2009 or *lingeling*, Biere, 2013) on a set of CNF instances $\Pi$ (e.g., SAT-encoded hardware or software verification instances) that minimizes $\mathcal{A}$’s average runtime $c$. Another example would be to find a hyperparameter setting for a machine learning algorithm that minimizes its error $c$ on a given dataset (Snoek, Larochelle, & Adams, 2012; Feurer et al., 2015); in this latter example, $c$ would be validation error, either measured via $k$-fold inner cross-validation (giving rise to $k$ instances for algorithm configuration) or a single validation set (in which case there is just a single instance for algorithm configuration).
The general workflow of a sequential algorithm configuration procedure (short: configurator) is shown in Figure 1. In each step, the configurator picks a configuration $\theta \in \Theta$ and an instance $\pi \in \Pi$, triggers a run of algorithm $\mathcal{A}$ with configuration $\theta$ on instance $\pi$ with a maximal runtime cutoff $\kappa$ (and other resource limitations that apply, such as a memory limit), and measures the resulting cost $c(\theta, \pi)$. As detailed in Section 6, this step is usually mediated by a target-algorithm specific wrapper. The configurator uses this collected data about the target algorithm’s performance to find a well-performing configuration, typically operating as an anytime procedure until its configuration budget is exhausted (e.g., a maximal number of target algorithm calls or a time budget)$^2$; when terminated, it returns its current incumbent, i.e., the best found configuration so far.
2.1 Why and When to Consider AC?
Algorithm configuration should always be considered if (i) the empirical performance of an algorithm is relevant and (ii) the algorithm has performance-relevant parameters. This is quite obvious for most empirical studies showing that a new algorithm $\mathcal{A}$ establishes a new state-of-the-art performance on benchmark problem $X$. However, in this setting it is also important to tune the parameters of all algorithms to compare against — without this, a comparison would not be fair because one of the algorithms may only perform best because
---
2. Alternatively, the termination criterion could be defined as stopping when no (or only little) further improvement is expected. Although this is a common choice for some other anytime algorithms, such as gradient descent, we often observe that AC trajectories are step functions with long periods of time between finding improving configurations, complicating the prediction of whether improvements will still happen. For these reasons and to enable an efficient use of resources, we chose to treat the budgets as discussed in the text.
its parameters were tuned with the most effort (Hooker, 1995). Indeed, as shown several times in the AC literature, optimized configurations often perform much better than default ones; in some cases, the default configuration may even be worse than one drawn uniformly at random (e.g., see Figure 6a).
There are several other advantages of AC compared to manual parameter tuning (cf. López-Ibáñez, Dubois-Lacoste, Caceres, Birattari, & Stützle, 2016), including:
**Reproducibility** Automated algorithm configuration is often more reproducible than doing manual parameter tuning. Manual parameter tuning strongly depends on the experience and intuition of an expert for the algorithm at hand and/or for the given instance set. This manual procedure can often not be reproduced by other users. If algorithm developers also make their configuration spaces available (e.g., as the authors of Lingeling, Biere, 2014, and Clasp, Gebser, Kaufmann, & Schaub, 2012, do), reproducing the performance of an algorithm using AC is feasible.
**Less human-time** Assuming that a reasonable configuration space is known, applying algorithm configuration is often much more efficient than manual parameter tuning. While ceding this tedious task to algorithmic approaches can come at the cost of requiring more computational resources, these tend to be quite cheap compared to paying a human expert and are increasingly widely available.
**More thoroughly tested** Since humans are impatient by nature (e.g., during the development of algorithms), they often focus on a rather small subset of instances to get feedback fast and to evaluate another configuration. Compared to humans, configurators often evaluate (promising) configurations more thoroughly on more instances.
**More configurations evaluated** Because of similar reasons as above, humans tend to evaluate far less configurations than most configurators would do.
However, there are also two major limitations of AC, which must be considered:
**Homogeneous instances** To successfully apply AC, the instances have to be similar enough such that configurations that perform well on subsets of them also tend to perform well on others; we call such instance sets homogeneous. If the instances are not homogeneous, it is harder to find a configuration that performs well on average; it is even possible that a configurator returns a configuration $\theta$ that performs worse than the default one (although $\theta$ may appear to perform better based on the instances the configurator could consider within its limited budget). Unfortunately, so far, none of the existing AC tools implement an automatic check whether the given instances are sufficiently homogeneous. For heterogeneous instance sets, portfolio approaches (Xu, Hutter, Hoos, & Leyton-Brown, 2008; Kadioglu, Malitsky, Sabharwal, Samulowitz, & Sellmann, 2011; Malitsky, Sabharwal, Samulowitz, & Sellmann, 2012; Lindauer, Hoos, Hutter, & Schaub, 2015) or instance-specific algorithm configuration (Xu, Hoos, & Leyton-Brown, 2010; Kadioglu, Malitsky, Sellmann, & Tierney, 2010) provide alternative solutions.
**Specialization** From the restriction to homogeneous instances, the second limitation of AC follows: the optimized configurations (returned by a configurator) are always
specialized to the instance set and cost metric at hand. It is hard to obtain a robust configuration on a large variety of heterogeneous instances. (In fact, it is not even guaranteed that a single configuration with strong performance on all instances exists.)
2.2 Setting up AC Experiments
In the following, we describe the typical steps to set up and run AC experiments, and provide pointers to the pitfalls and best practices discussed later.
1. Define an instance set of interest, which should be homogeneous (see Section 5.3) and representative of future instances (see Section 5.2);
2. Split your instances into a training and test instances (see Section 5.1); the test instances are later used to safeguard against over-tuning effects (see Section 4.2);
3. Define the ranges of all performance-relevant parameters giving rise to the configuration space (see Sections 5.6 and 5.7);
4. Implement the interface between your algorithm and the configurator; take Pitfalls 1-4 into consideration (Section 3);
5. Choose your preferred configurator (e.g., ParamILS, GGA++, irace or SMAC; see Section 2.3)
6. Define the resource limitations your algorithm (cutoff time and memory limit) and the configurator (configuration budget) should respect (see Section 5.4);
7. Define your cost metric to be optimized; if the cost metric is runtime, configurators typically optimize PAR10 as the metric of interest, which is the penalized average runtime (in CPU seconds) counting runs exceeding the cutoff time \( \kappa \) as \( 10 \cdot \kappa \); furthermore please consider Pitfalls 2 and 3 (see Section 3.2) and recommendations in Section 5.8 for runtime optimization; if the cost metric is related to the quality of the solution, e.g. the error of a model on a dataset, configurators typically minimize validation error.
8. Run the AC experiments on the training instances and obtain the final incumbent—consider to use parallel runs (Section 5.5);
9. Evaluate the default configuration and the optimized configuration on the test instances, to obtain an unbiased estimate of generalization performance on new instances, and to assess over-tuning effects (Section 4);
10. Optionally, use further tools to obtain visualizations and gain more insights from the AC experiments, e.g., CAVE (Biedenkapp, Marben, Lindauer, & Hutter, 2018).
As an exemplary application where AC yields dramatic speedups, we ran SMAC to optimize 75 parameters of the configurator Clasp (Gebser et al., 2012) to solve N-Rooks (Manthey & Steinke, 2014a) instances. We will return to this scenario in more detail in Subsection 4.2. Here, we used a training set of 484 instances and a test set of 351 instances to evaluate the best found configurations over time. We used a cutoff of 300 seconds, within which the default configuration solves 82% of all training instances. Figure 2 reports results from 16 independent SMAC runs, showing that AC using an adequate setup can robustly yield large speedups compared to not tuning the algorithm.
Figure 2: Exemplary application of AC, optimizing 75 parameters of Clasp to solve N-Rooks problems. At each time step $t$, we show the penalized average runtime (PAR10) score on the training set (orange) and test set (green) of the incumbent configuration at time $t$. I.e., at each time step, we take the best configuration found so far (the one the configurator would return if stopped at that time), ran the algorithm with it on the training and test set and recorded its PAR10 score. We show the median and quartiles of repeating this process 16 times using different random seeds.
2.3 Approaches for Solving the AC problem
For subproblems of the AC problem that deal neither with instances nor with capped and censored runs, there exist several approaches in the fields of parameter tuning, hyperparameter optimization and expensive black-box optimization. Prominent examples include Bayesian optimization (Mockus, Tiesis, & Zilinskas, 1978; Shahriari, Swersky, Wang, Adams, & de Freitas, 2016), sequential parameter optimization (Bartz-Beielstein, Lasarczyk, & Preuss, 2010), evolution strategies (Hansen, 2006), and combinations of several classical search strategies (Ansel et al., 2014).
For solving the full AC problem, there are several configurators. *ParamILS* (Hutter, Hoos, Leyton-Brown, & Stützle, 2009) uses local search in the configuration space, employing a racing strategy to decide which of two configurations performs better without running both of them on all instances. Recently, Cáceres and Stützle (2017) also proposed to use variable neighborhood search instead of the iterated local search used in *ParamILS*. *irace* (López-Ibáñez et al., 2016) uses iterative races via F-race (Birattari, Stützle, Paquete, & Varrentrapp, 2002) on a set of sampled configurations to determine the best one. *SMAC* (Hutter, Hoos, & Leyton-Brown, 2011) and its distributed version *dSMAC* (Hutter, Hoos, & Leyton-Brown, 2012) use probabilistic models of algorithm performance, so-called empirical performance models (Hutter, Xu, Hoos, & Leyton-Brown, 2014b), to guide the search for good configurations by means of an extension of Bayesian Optimization (Brochu, Cora, & de Freitas, 2010). *GGA* (Ansótegui, Sellmann, & Tierney, 2009) represents parameters as genes and uses a genetic algorithm with a competitive and a non-competitive gender; its newest version *GGA++* (Ansótegui, Malitsky, Sellmann, & Tierney, 2015) also uses an empirical performance model for guidance. For a more detailed description of these algorithms, we refer the interested reader to the original papers or to the report of the Configurable SAT Solver Challenge (Hutter et al., 2017).
If the cost metric $c$ is runtime using PAR10 scores, several configurators use an adaptive capping strategy (Hutter et al., 2009) to terminate slow algorithm runs prematurely to save time. For example, if the maximal cutoff time used at test time is $\kappa_{\text{max}} = 5000$ seconds and the best configuration known so far solves each instance in 10 seconds, we can save dramatically by cutting off slow algorithm runs after $\kappa > 10$ seconds instead of running all the way to $\kappa_{\text{max}}$. Since $\kappa$ is adapted dynamically, each target algorithm run can be issued with a different one.
### 2.4 The Role of the Target Algorithm Wrapper
As depicted in Figure 1, configurators execute the target algorithm with configurations $\theta \in \Theta$ on instances $\pi \in \Pi$ and measure the resulting cost $c(\theta, \pi)$. To be generally applicable, configurators specify an interface through which they evaluate the cost $c(\theta, \pi)$ of arbitrary algorithms to be optimized. For a new algorithm $A$, users need to implement this interface to actually execute $A$ with the desired configuration $\theta$ on the desired instance $\pi$ and measure the desired cost metric $c(\theta, \pi)$ (e.g., runtime required to solve a SAT instance or validation error of a machine learning model).
In order to avoid having to change the algorithm to be optimized, this interface is usually implemented by a wrapper. In the simplest case, the input to the wrapper is just a parameter configuration $\theta$, but in general AC it also includes an instance $\pi$, and it can also include a random seed and computational resource limits, such as a runtime cutoff $\kappa$. Given these inputs, the wrapper executes the target algorithm with configuration $\theta$ on instance $\pi$, and measures and returns the desired cost metric $c(\theta, \pi)$.
### 3. Pitfalls and Best Practices Concerning Algorithm Execution
In this and the next section, we describe common pitfalls in algorithm configuration and illustrate their consequences on existing benchmarks from the algorithm configuration library AClib (Hutter et al., 2014a). Based on the insights we acquired in thousands of algorithm configuration experiments over the years, we propose best practices to avoid these pitfalls.
Throughout, we will use the state-of-the-art configurator SMAC (Hutter et al., 2011) as an example, typically optimizing PAR10. Where not specified otherwise, we ran all experiments on the University of Freiburg’s META cluster, each of whose nodes shares 64 GB of RAM among two Intel Xeon E5-2650v2 8-core CPUs with 20 MB L3 cache and runs Ubuntu 14.04 LTS 64 bit.
---
3. As a side note, we remark that for model-based methods the internal model needs to handle dynamic timeouts arising from adaptive capping and PAR10 scores for guiding the search are based on predictions of that model. Furthermore, evaluations of incumbents for validation purposes are done purely with a fixed timeout $\kappa_{\text{max}}$, making PAR10 values comparable across configurators.
4. An alternative to a general wrapper would be a programming language-specific reliable interface for the communication between configurator and target algorithm (Hoos, 2012), which would make it easier for users to apply algorithm configuration to new target algorithms. However, the design of such an interface would also need to consider the pitfalls identified in this paper.
5. See www.aclib.net
6. Data and scripts for the experiments in this paper are available at http://www.automl.org/best-practices-in-algorithm-configuration/.
3.1 Pitfall 1: Trusting Your Target Algorithm
Many state-of-the-art algorithms have been exhaustively benchmarked and tested with their default parameter configuration. However, since the configuration space of many algorithms is very large, we frequently observed hidden bugs triggered only by rarely-used combinations of parameter values. For example, Hutter et al. (2010a) reported finding bugs in mixed integer programming solvers and Manthey and Lindauer (2016) bugs in SAT solvers. Due to the size of the associated configuration spaces (e.g., 214 parameters and a discretized space of $10^{86}$ configurations in the state-of-the-art SAT solver Riss, Manthey, 2014b), exhaustive checks are infeasible in practice.
Over the years, the types of bugs we have experienced even in commercial solvers (that are the result of dozens of person-years of development time) include:
- Segmentation faults, Null pointer exceptions, and other unsuccessful algorithm terminations;
- Wrong results (e.g., claiming a satisfiable SAT instance to be unsatisfiable);
- Not respecting a specified runtime cutoff that is passed as an input;
- Not respecting a specified memory limit that is passed as an input;
- Rounding down runtime cutoffs to the next integer (even if that integer is zero); and
- Returning faulty runtime measurements (even negative ones!)
Effects The various issues above have a multitude of negative effects, from obvious to subtle. If the algorithm run does not respect its resource limits this can lead to congested compute nodes (see Pitfall 3) and to configurator runs that are stuck waiting for an endless algorithm run to finish. Wrongly reported runtimes (e.g., close to negative infinity in one example) can lead to endless configuration runs when trusted. Rounding down cutoff times can let configurators miss the best configuration (e.g., when they use adaptive capping to cap runtimes at the best observed runtime for an instance – if that runtime is below one second then each new configuration will fail on the instance due to using a cutoff of zero seconds).
Algorithm crashes can be fairly benign when they are noticed and counted with the highest possible cost, but they can be catastrophic when not recognized as crashes: e.g., when blindly minimizing an algorithm’s runtime the configurator will typically simply find a configuration that crashes quickly. While this can be exploited to quickly find bugs (Hutter et al., 2010a; Manthey & Lindauer, 2016), obtaining faulty configurations is typically the worst possible result of using algorithm configuration in practice. Bugs that lead to wrong results tend to be discovered by configurators when optimizing for runtime, since (at least for $NP$-hard problems) we found that such bugs often allow algorithms to find shortcuts and thus shorten runtimes. Therefore, blindly minimizing runtime without solution checking often yields faulty configurations.
Detailed Example In 2012, we used algorithm configuration to minimize the runtime of the state-of-the-art solver glucose (Audemard & Simon, 2009). We quickly found a parameter configuration that appeared to yield new state-of-the-art performance on the industrial instances of the SAT Challenge 2012; however, checking this configuration with
7. http://baldur.iti.kit.edu/SAT-Challenge-2012/
Pitfalls and Best Practices for Algorithm Configuration

Figure 3: Difference in test set performance as judged when trusting the target algorithm (green) and using external solution checking (orange). We plot the penalized average runtime (PAR10) scores of *Glucose* v2.1 on the industrial instances from the SAT Challenge 2012, as a function of time spent for configuration, when the configuration process trusted *Glucose* v2.1 to be correct. We ran 12 SMAC runs and at each time step show the median and quartiles of their incumbents’ scores. The green curve computes these scores trusting the solutions *Glucose* returns, while the orange curve penalizes faulty configurations with the worst value of 3000 (where faulty configurations are those that yield at least one wrong result on the test instances; such configurations would, e.g., be disqualified in the SAT competition). We emphasize that both curves are based on exactly the same set of 12 SMAC runs (which were broken in that they trusted *Glucose* rather than applying solution checking) and only differ in their validation.
the authors of *Glucose* revealed that it led to a bug which made *Glucose* falsely report some satisfiable instances as unsatisfiable.\(^8\)
In Figure 3 we reconstruct this behaviour. We ran SMAC on *Glucose* v2.1 and evaluated configurations found over time when trusting *Glucose*’s correctness at configuration time: The green curve shows *Glucose*’s (buggy) outputs on the test instances, whereas the orange curve scored each configuration using solution checking, and returning the worst possible score for configurations that returned a wrong solution. After 300 to 3000 seconds, SMAC found configurations that seemed better when trusting *Glucose*’s outputs, but that actually sometimes returned wrong solutions, resulting in the true score (orange curve) going up (getting worse) to the worst possible PAR10 score.
**Best Practice** Most of the issues above can be avoided by wrapping target algorithm runs with a reliable piece of code that limits their resources and checks whether they yield correct results. Cast differently, the job of this wrapper is to actually measure the cost function \(c(\theta, \pi)\) of interest, which should intuitively heavily penalize any sort of crashes or bugs that lead to wrong results.
If enough computational time is available, we recommend to first run systems such as *SpyBug* (Manthey & Lindauer, 2016) to find bugs in the configuration space, and to
---
8. The bug in *Glucose* version 2.1 was fixed after we reported it to the developers, and we are not aware of any bugs in the newest *Glucose* version 4.1.
either fix them or to exclude the faulty part of the configuration space from consideration. Regardless of whether this is done or not, since it is infeasible to perfectly check the entire configuration space, we always recommend to check the returned solution of the target algorithms during the configuration process. For example, for SAT instances, our example wrapper exploits the standard SAT checker tool routinely used in the SAT competitions to verify the correctness of runs. For solvers that output unsatisfiability proofs, there are also effective tools for checking these proofs (Heule, Hunt, & Wetzler, 2014).
3.2 Pitfall 2: Not Terminating Target Algorithm Runs Properly
Given the undecidability of the halting problem, target algorithm runs need to be limited by some kind of runtime cutoff \( \kappa_{\text{max}} \) to prevent poor configurations from running forever. In many AI communities, it is a common practice to set a runtime cutoff as part of the cost metric and measure the number of timeouts with that cutoff (e.g., \( \kappa_{\text{max}} = 5000 \) seconds in the SAT race series). In algorithm configuration, the ability to prematurely cut off unsuccessful runs also enables adaptive capping (see Section 2). Therefore, it is essential that target algorithm runs respect their cutoff. This pitfall is related to Pitfall 1 as the user also needs to trust the target algorithm to work appropriately. While for Pitfall 1 we focus on the returned solution, here we draw attention to the resource limitations.
Effect Consequences of target algorithm runs not respecting their cutoffs can include:
1. If the target algorithm always uses the maximal cutoff \( \kappa_{\text{max}} \) and ignores an adapted cutoff \( \kappa < \kappa_{\text{max}} \), the configuration process is slowed down since the benefits of adaptive capping are given up;
2. If the target algorithm completely ignores the cutoff, the configuration process may stall since the configurator waits for a slow target algorithm to terminate (which, in the worst case, may never happen);
3. If a wrapper is used that fails to terminate the actual algorithm run but nevertheless returns the control flow to the configurator after the cutoff time \( \kappa \), then the slow runs executed by the configurator will continue to run in parallel and overload the machine, messing up the cost computation (e.g., wallclock time).
Example The latter (quite subtle) issue actually happened in a recent publication that compared \textsc{GGA++} and \textsc{SMAC}, in which a wrapper bug caused \textsc{SMAC} to perform poorly (Ansótegui et al., 2015). The authors wrote a wrapper for \textsc{SMAC} that tried to terminate its target algorithm runs (here: \textsc{Glucose} or \textsc{Lingeling}) after the specified cutoff time \( \kappa \) by sending a KILL signal, but since it ran the target algorithm through a shell (using \texttt{subprocess.Popen(cmd, shell=True)} in Python) the KILL signal only terminated the shell process but not the actual target algorithm (which continued uninterrupted until successful, sometimes for days). When attempting to reproduce the paper’s experiments with the original wrapper kindly provided by the authors, over time more and more target algorithms were spawned without being terminated, causing our 16-core machine to slow...
Figure 4: Effect of a broken wrapper that does not terminate target algorithm runs properly. We show PAR10 test set performance for optimizing Cryptominisat with SMAC on Circuit Fuzz instances, when using a correct and a broken wrapper during configuration, respectively. We show median test performance (measured using a correct wrapper) with quartiles across 80 runs of SMAC. Not terminating target algorithm runs properly eventually slowed down the machine affecting runtime measurements.
down and eventually become unreachable. This issue demonstrates that SMAC heavily relies on a robust wrapper that automatically terminates its target algorithm runs properly. To illustrate this issue in isolation, we compared SMAC using a working wrapper and a broken version of it that returns the control flow to the configurator when the runtime cutoff is reached, without terminating the target algorithm run process. Figure 4 shows the performance achieved when SMAC is run with either wrapper to configure Cryptominisat (Soos, 2014) for penalized average runtime (PAR10) to solve Circuit Fuzz instances (Brummayer, Lonsing, & Biere, 2012) as used in the CSSC 2014 (Hutter et al., 2017). We executed 80 SMAC runs for each wrapper, with 16 independent parallel runs each on five 16-core machines. Both SMAC versions performed equally well until too many target algorithm processes remained on the machines and prevented SMAC from progressing further. Only on one of the five machines that ran SMAC with the broken wrapper, the runs terminated after the specified wallclock-limit of 2 days; after an additional day, three of the remaining machines were still frozen caused by overload and the fourth could not be reached at all.
Best Practice To avoid this pitfall, we recommend to use some well-tested, external piece of code to reliably control and terminate target algorithm runs.
3.3 Pitfall 3: Slow File System
Related to Pitfall 2, another way to ruin runtime measurements by slowing down a machine is to overload the used file system. Each target algorithm run typically has to read the given
---
9. In contrast to SMAC, GGA++ does not require a wrapper; in the experiments by Ansótegui et al. (2015), GGA++ directly sent its KILL signal to the target algorithm and therefore did not suffer from the same problem SMAC suffered from, which confounded the paper’s comparison between GGA++ and SMAC. Additionally, there was also a simple typo in the authors’ wrapper for SMAC in parsing the target algorithm’s output (here: Glucose) that caused it to count all successful runs on unsatisfiable instances as timeouts. Receiving wrong results for all unsatisfiable instances (about half the instance set) severely affected SMAC’s trajectory; this issue was only present in the wrapper for SMAC (and therefore did not affect GGA++), confounding the comparison between GGA++ and SMAC further.
problem instance and writes some log files; thus, executing many algorithm configuration runs in parallel can stress the file system.
**Effect** Slowdowns caused by an overloaded file system can have a severe impact on run-time measurements; in particular this is problematic because most algorithm configurators measure their own configuration budget as wallclock time. Furthermore, these problems are often not immediately recognizable (because everything runs fine when tested at small scale) and sometimes only affect parts of a large set of experiments (as the overload might only happen for a short time).
**Example 1** Over the years, we have experienced file system issues on a variety of clusters with shared file systems when target algorithm runs were allowed to write to the shared network file system. When executing hundreds (or on one cluster, even thousands) of algorithm configuration runs in parallel, this stressed the file system to the point where the system became very slow for all users and we measured 100-fold overheads in individual target algorithm evaluations. Writing target algorithm outputs to the local file system fixed these issues.
**Example 2** Distributing configuration runs across multiple nodes in a compute cluster (e.g., in *GGA*, *irace*, or *dSMAC*) can be error-prone if the configurators communicate via the file system. In particular, we experienced issues with several shared network file systems with asynchronous I/O; e.g., on one compute node a file was written, but that file was not immediately accessible (or still empty) on other compute nodes. Often a second read access resolved the problem, but this solution can be brittle; a change of parallelization strategy may in that case yield more robust results.
**Example 3** Even when writing target algorithm output to the local file system, we once experienced 200-fold overheads in target algorithm runs (invocations of sub-second target algorithm runs hanging for minutes) due to a subtle combination of issues when performing hundreds of algorithm configuration experiments in parallel. On the Orcinus cluster (part of Compute Canada’s Westgrid cluster), which uses a Lustre file system, we had made our algorithm configuration benchmarks read-only to prevent accidental corruption. While that first seemed like a good idea, it disallowed our Python wrapper to create `.pyc` bytecode files and forced it to recompile at every invocation, which in turn triggered a stats call (similar to `ls` on the Linux command line) for each run. Stats calls are known to be slow on the Lustre file system, and executing them for each sub-second target algorithm run on hundreds of compute nodes in parallel led to extreme file system slowdowns. After testing many other possible reasons for the slowdowns, removing the read-only condition immediately fixed all issues.
**Best Practice** Issues with shared file systems on compute clusters can have subtle reasons and sometimes require close investigation (as in our Example 3). Nevertheless, most issues can be avoided by using the faster local file system (typically `/tmp/`, or even better, a temporary job-specific subdirectory thereof) for all temporary files, and by measuring CPU time instead of wallclock time (at least for sequential algorithms).
---
10. We note that on some modern Linux distributions, `/tmp/` can be a RAM disk and therefore may use resources allotted to the algorithm runs; in general, we recommend to make the choice about a fast temporary directory specific to the compute cluster used.
3.4 Pitfall 4: Handling Target Algorithm Runs Differently
The required functionalities of the target algorithm wrapper differ slightly for different configurators. For example, SMAC and ParamILS trust the wrapper to terminate target algorithms, but GGA sends a KILL signal on its own (see also Pitfall 2). Therefore, sometimes configurators are compared by using different target algorithm calls and measurements. However, if this is not done properly, it can lead to a biased comparison between configurators.
**Effect** Calling the target algorithm differently for different configurators can lead to different behaviors of the target algorithm and hence, to different returned performance values for the same input. If the configurators receive different performance measurements, they will optimize different objective functions and their runs become incomparable.
**Example** During the early development of SMAC (before any publication), we used the same wrappers for ParamILS and SMAC but an absolute path to the problem instance for one and a relative path for the other. Even this tiny difference lead to reproducible differences of runtime measurements of up to 20% when optimizing an algorithm implemented in UBCSAT 1.1.0 (Tompkins & Hoos, 2005). The reason was that that version of UBCSAT stored its callstring in its heap space such that the number of characters in the instance name affected data locality and therefore the number of cache misses and the runtime (whereas the number of search steps stayed the same).\(^{11}\) This subtle issue demonstrates the importance of using the same wrapper for all configurators being compared such that exactly the same target algorithm calls are used.
**Best Practice** We recommend to use a single wrapper when comparing configurators against each other, in order to guarantee that all configurators optimize the same objective. For studies comparing configurators, it is also paramount to use tried-and-tested publicly available benchmark scenarios (lowering the risk of typos, etc; see also Footnote 9); our algorithm configuration benchmark library AClib (Hutter et al., 2014a) provides a very broad collection of such benchmarks.
4. Pitfalls and Best Practices Concerning Over-Tuning
A common issue in applying algorithm configuration is the over-tuning effect (Birattari, 2004; Hutter, Hoos, & Stützle, 2007b; Birattari & Kacprzyk, 2009; Hutter et al., 2009) Over-tuning is very related to the concept of over-fitting in machine learning and denotes the phenomenon of finding parameter configurations that yield strong performance for the training task but do not generalize to test tasks. We emphasize that over-tuning effects are not necessarily only related to the set of training instances used, but can also include the characteristics of the experimental setup such as the resource limitations and bugs in the solver (see Pitfall 1). To safeguard against over-tuning effects, it is crucial to evaluate generalization performance (typically, using a set of benchmark instances disjoint from the benchmarks used for training). In the following, we discuss three pitfalls related to over-tuning.
---
\(^{11}\) This issue is fixed in later versions of UBCSAT.
4.1 Pitfall 5: Over-tuning to Random Seeds
Many algorithms are randomized (e.g., SAT solvers or AI planners). However, in many communities, the random seeds of these algorithms are fixed to simulate a deterministic behavior and to ensure reproducibility of benchmark results.
**Effect** Ignoring the stochasticity of an algorithm in algorithm configuration by fixing the random seed can lead to over-tuning effects to this seed, i.e., finding a configuration that yields good performance with this fixed random seed (or set of seeds) but poor performance when used with other random seeds. The extreme case is not to only fix the random seed, but to tune the random seed, which can lead to an even stronger over-tuning effect.\(^\text{12}\)
**Example** To illustrate over-tuning to a random seed in its purest form, independent of a difference between training and test instances, we optimized the parameters of the local-search SAT solver *Saps* (Hutter, Tompkins, & Hoos, 2002) on a single instance, the only difference between training and test being the set of random seeds used. We used different settings of *SMAC* to handle random seeds: We compared *SMAC* using a fixed set of 1, 10 or 100 random seeds for each target algorithm run and standard *SMAC*, which handled the random seed itself (using a larger number of seeds to evaluate the best configurations).
As a cost metric, we minimized the average number of local search steps (the solver’s so-called runlength) since this is perfectly reproducible. For the parameter configurations recommended at each step of each *SMAC* run, we measured *SMAC*’s training cost (as the mean across the respective sets of seeds discussed above) as well as its test cost (the mean
\(^{12}\) We note that, in principle, one could construct situations where fixing or even optimizing the seed could lead to good performance if that seed is used in all future experiments and a large number of instances is available to obtain generalization to other instances. However, we believe that the potential misuse of tuning seeds outweighs any potential benefits.
runlength across 1000 fixed random seeds that were disjoint from the sets of seeds used for configuration).
Figure 5 shows median costs across 10 SMAC runs, contrasting training cost (left) and test cost (right). On training, SMAC, using 1 seed per evaluation quickly improved and achieved the best training cost on its one random seed, but its performance does not generalize to the test seeds. SMAC, using 10 or 100 seeds per evaluation were slower but generalized better, and standard SMAC was both fast and generalized best by adaptively handling the number of seeds to run for each configuration.
**Best Practice** For randomized algorithms, we recommend to tune parameter configurations across different random seeds—most configurators will take care of the required number of random seeds if the corresponding options are used. If a configuration’s performance does not even generalize well to new random seeds, we expect it to also not generalize well to new instances. Furthermore, the number of available instances is often restricted, but there are infinitely many random seeds which can be easily sampled. Likewise, when there are only few test instances, at validation time we recommend to perform multiple runs with different random seeds for each test instance.
### 4.2 Pitfall 6: Over-tuning to Training Instances
The most common over-tuning effect is over-tuning to the set of training instances, i.e., finding configurations that perform well on training instances but not on new unseen instances. This can happen if the training instances are not representative for the test instances; in particular this is often an issue if the training instance set is too small or the instances are not homogeneous (Hutter, Hoos, & Leyton-Brown, 2010b; Schneider & Hoos, 2012), i.e., if there exists no single configuration with strong performance for all instances.
**Effect** In practice, over-tuned configurations that only perform well on a small finite set of instances are of little value, because users are typically interested in configurations that also perform well on new instances. Phrasing this more generally, research insights should also generalize to experiments with similar characteristics.
**Example** To illustrate this problem, we studied training and test performance of various configurations for three exemplary benchmarks (see Figure 6):
**Clasp on N-Rooks** We studied the runtime of the solver Clasp (Gebser et al., 2012) on N-Rooks instances (Manthey & Steinke, 2014a), a benchmark from the Configurable SAT Solver Challenge (CSSC 2014; Hutter et al., 2017). In this case, the runtimes on the training and test set were almost perfectly linearly correlated, with a Spearman correlation coefficient of 0.99, i.e., the ranking of the configurations on both sets is nearly identical; this is also visualized in Figure 6a. This is a very good case for applying algorithm configuration, and, correspondingly, in the CSSC 2014 algorithm configuration yielded large improvements for this benchmark.
**Lingeling on mixed SAT** We reconstructed a benchmark from Ansótegui et al. (2015) in which they optimized Lingeling (Biere, 2014) on a mixed set of industrial SAT
---
13. Note that Hutter et al. (2007b) used the median to aggregate across the 1000 seeds, resulting in slightly lower training and test runlengths.
instances. Instead of randomly splitting the data into train and test instances, they first created a training set by removing hard instances (i.e., not solved within the cutoff time by reference solvers) and used these remaining hard instances as test instances. Figure 6b shows that SMAC improved the runtime of Lingeling on the training set but that these improvements did not generalize to the test instances. In fact, the training and test scores of the optimized configurations (orange squares) are only weakly correlated (Spearman correlation coefficient of 0.15). The benchmark’s heterogeneity and the mismatch between training and test set make this benchmark poorly suited for algorithm configuration.
**Clasp on LABS** Figure 6c shows another benchmark from the CSSC: configuration of Clasp on SAT-encoded low autocorrelation binary sequence (LABS) benchmarks (Muegrauer & Balint, 2013). This illustrates a rare worst case for algorithm configuration, in which performance even degrades on the training set, which is possible due to SMAC’s (and any other configurator’s) racing approach: the configurator already changes the incumbent before all training instances have been evaluated, and if a subset is not representative of the full set this may lead to performance degradation on the full set.
While we have occasionally observed such strong heterogeneity on instances with very heterogeneous sources, it was very surprising to observe this in a case where all instances stemmed from the same instance family. We therefore analyzed this benchmark further (Hutter et al., 2017), showing that twice as many SMAC runs with a fivefold larger configuration budget managed to improve training performance slightly. However, that improvement on the training set still did not generalize to the test set due to the benchmark’s heterogeneity. (Although visually not apparent from Figure 6c, for this benchmark, the correlation between scores on training and test instances was quite low (0.42) for the 20% best-performing randomly sampled configurations). Again, for such heterogeneous benchmarks we recommend the usage of portfolio approaches.
**Best Practice** Over-tuning is often not easy to fully rule out by design, since the effect can only be measured by assessing test performance after the configuration process completed (for example by scatter plots, such as in Figure 6). Nevertheless, the following strategies minimize the risk of over-tuning (see also Section 5):
1. The training instances should be representative of the test instances;
2. The training set should be relatively large (typically hundreds to thousands of instances) to increase the chance of being representative;
3. The instance sets should stem from a similar application, use context, etc., increasing the likelihood that they have similar structures which can be exploited with similar solution strategies;
4. If the instance set is heterogeneous, portfolio approaches (Xu et al., 2008; Kadioglu et al., 2011; Malitsky et al., 2012; Lindauer et al., 2015) or instance-specific algorithm configuration (Xu et al., 2010; Kadioglu et al., 2010) should be used.
876
Pitfalls and Best Practices for Algorithm Configuration
Figure 6: Comparing training and test performance of different configurations to study whether these performances on both sets are correlated. Green dots indicate randomly sampled configurations, the black cross marks the performance of the default configuration of the solver, and orange squares correspond to incumbent configurations of 16 SMAC runs.
4.3 Pitfall 7: Over-tuning to a Particular Machine Type
In the age of cloud computing and large compute clusters, an obvious idea is to use these remotely-accessible compute resources to benchmark algorithms and configure them. However, in the end, these remote machines are not always the production systems the algorithms are used on in the end. Geschwender, Hutter, Kotthoff, Malitsky, Hoos, and Leyton-Brown (2014) indicated in a preliminary study that it is possible in principle to configure algorithms in the cloud, and that the found configurations perform well on another machine. Unfortunately, recent other experiments showed that this does not hold for all kinds of algorithms – for example, the performance of solvers for SAT (Aigner, Biere, Kirsch, Niemetz, & Preiner, 2013) and mixed integer programming (Lodi & Tramontani, 2014; Koch et al., 2011) can depend strongly on the used machine type (including hardware, operating system and installed software libraries).
Effect Some algorithms are machine-dependent and obtain different results depending on the hardware they run on. Being unaware of this can ruin both, a successful application and a comparison of configuration methods, in two ways: Firstly, when configuring on one system the best found configuration might perform poorly on another system. Secondly, the ranking of the best found configurations of target algorithms on one system might change when rerunning the experiments on a different system.
Example An example for such machine-dependent algorithms are SAT solvers that are often highly optimized against cache misses (Aigner et al., 2013). To study the effect of different machines, we optimized three SAT solvers from the configurable SAT solver challenge (Hutter et al., 2017), namely Minisat-HACK-999ED (Oh, 2014), Clasp (Gebser et al., 2012) and Lingeling (Biere, 2014) on Circuit Fuzz instances (Brummayer et al., 2012). As different machine types, we used AWS m4.4xlarge instances with 2.4-GHz Intel Xeon E5-2676 v3 CPUs with 30MB level-3 cache and the META-cluster at the University
Table 1 lists the ranking and the PAR10 scores of the solvers on each machine (showing the test cost of the configuration performing best on training); we note that the PAR10 scores are only comparable on the same system. In both environments, Lingeling ended up on rank 3, but the ranks of Clasp and Minisat-HACK-999ED differed between the two environments: if the AWS cloud would be our environment for running AC experiments, we would decide for Minisat-HACK-999ED, but this would not be the best choice on the META-cluster. We note that, since we picked the best of 12 SMAC runs, due to the high variance of extremal statistics, the exact numbers of this experiments might vary in a rerun. Since we did not have enough compute resources on AWS for carrying out multiple runs, to gain additional confidence in our conclusions, we carried out an additional experiment: we validated the configurations found on AWS on the META-cluster and found that in that setting the configured Minisat-HACK-999ED performed even worse than Lingeling and Clasp. Therefore, we conclude that the ranking of configured algorithms depends on the hardware.
**Best Practice** We note that this pitfall exists only for machine-sensitive algorithms. Therefore, we recommend to investigate whether an algorithm at hand has machine-dependent performance, for example, by validating the performance of various configurations on both the system used for configuration and the production system.
5. Further Recommendations for Effective Configuration
In the following, we describe recommendations for users of algorithm configuration systems to obtain parameter configurations that will perform better in production. Some of these recommendations are rules of thumb, since the involved factors for a successful configuration can be very complex and can change across configuration scenarios. For general empirical algorithmics, McGeoch (2012) recommends further best practices, including design, reports and analysis of computational experiments.
5.1 Training and Test Sets
As discussed before, following standard practice, we strongly recommend to split the available instances into a training and a test set to obtain an unbiased estimate of generalization performance from the test set (Birattari & Kacprzyk, 2009). To obtain trivial parallelization of randomized configuration procedures, we recommend to run \( n \) independent configuration runs and use the training set to select the best of the \( n \) resulting configurations (Hutter et al., 2012). Only that single chosen configuration should be evaluated on the test set; we explicitly note that we cannot select the configuration that performs best on the test set, because that would amount to peeking at our test data and render performance estimates on the test set biased.
5.2 Representative Instances and Runtime Cutoff
Intuitively, instances for which every parameter configuration times out do not help the configurator to make progress. One strategy can be to remove these from the training set. However, this comes with the risk to bias the training set towards easy instances and should be used with caution. Generally, we therefore recommend to use training instances for the configuration process that are representative of the ones to be solved later. Using training instances from a range of hardness can also often help yield configurations that generalize (Hoos, Kaufmann, Schaub, & Schneider, 2013). If feasible, we recommend to select instances and runtime cutoffs such that roughly 75% or more of the training instances used during configuration can be solved by the initial parameter configuration within the cutoff. We emphasize that – while the configuration protocol may in principle choose to subsample the training instances in arbitrary ways – the test set should never be touched and not pre-evaluated to ensure an unbiased cost estimate of the optimized configurations in the end (see Pitfall 6). To select a good training instance set, Bayless, Tompkins, and Hoos (2014) proposed a way to quantify whether an instance set is a good proxy for another instance set. Furthermore, Styles and Hoos (2015) proposed a splitting strategy of the instances for better scaling to hard instances: They split the instances into a training, validation and test set to use easy instances during configuration for fast progress and select a configuration on the harder validation set such that the configuration will perform well on the hard test set.
5.3 Homogeneous vs Heterogenous Instance Sets
Sometimes configurators are used to obtain well-performing and robust configurations on a heterogeneous instance set. However, we know from algorithm selection (Rice, 1976; Kotthoff, 2014) that often no single configuration exists that performs well for all instances in a heterogeneous set, but a portfolio of configurations is required to obtain good performance (Xu, Hutter, Hoos, & Leyton-Brown, 2011; Kadioglu et al., 2010). Furthermore, the task of algorithm configuration becomes a lot harder if all instances can be solved best with very different configurations. Therefore, we recommend to use algorithm configuration mainly on homogeneous instance sets. Furthermore, the size of the used instance set should be adjusted accordingly to the homogeneity of the instance set: on homogeneous instance sets, 50 instances might suffice for good generalization performance to new instances, but
on fairly heterogeneous instance sets, we recommend to use at least 300 or, if possible, more than 1000 instances to obtain a robust parameter configuration.
5.4 Appropriate Configuration Settings
To use configurators, the user has to set the budget available for the configurator. If the configuration budget is too small, the configurator might make little or no progress within it. In contrast, if the configuration budget is too large, we waste a lot of time and computational resources because the configurator might converge long before the budget is used up. A good rule of thumb in our experience is to use a budget that equals at least the expected runtime of the default configuration on 200 to 1000 instances. In practice, an effective configuration budget strongly depends on several factors, including heterogeneity of the instance set (more heterogeneous instance sets require a larger configuration budget) or size of the configuration space (larger configuration spaces require more time to search effectively, Hutter et al., 2017). Finally, if the configurator finds better performing configurations quickly, then the estimate of the total runtime based on the runtime of the default configuration might be too conservative.
5.5 Efficient Use of Parallel Resources
Some configurators (such as GGA, irace and dSMAC) can make use of parallel resources, while others (such as ParamILS and SMAC) benefit from executing several independent parallel runs\textsuperscript{14} (and using the result from the one with the best training set performance; see, e.g., Hutter et al., 2012). In the special case of GGA, using more parallel resources can actually improve the adaptive capping mechanism. Given $k$ cores, we therefore recommend to execute one GGA run with $k$ cores, but $k$ independent ParamILS or SMAC runs with one core each. While this protocol was not used in early works\textsuperscript{15}, it has been used in more recent evaluations (Ansótegui et al., 2015; Hutter et al., 2017).
5.6 Reasonable Configuration Space
Another challenge in using algorithm configuration systems is to find the best configuration space. The user has to decide which parameters to optimize and which ranges to allow. The optimal set of parameters to configure is often not clear and in case of doubt, we recommend to add more parameters to the configuration space and to use generous value ranges. However, we note that unreasonably large configuration spaces are hard to configure and require substantially larger configuration budgets. For example, the state-of-the-art SAT solver Lingeling (Biere, 2013) has more than 300 parameters and most of them have a value range between 0 and 32bit maxint, but most of these parameters are either not really relevant for optimizing Lingeling’s runtime or the relevant value ranges are much smaller. Even though Lingeling can already substantially benefit from configuration we expect that with a more carefully designed configuration space even better results could be
\textsuperscript{14} In order to perform $k$ independent runs with ParamILS or SMAC, one should use a different seed (equivalent to the numRun parameter) for each run.
\textsuperscript{15} Hutter et al. (2011) only used a single core per run of GGA, but still followed the protocol by Ansótegui et al. (2009) to race groups of 8 runs in parallel per core; therefore, GGA’s adaptive capping mechanism was the same in that work as in Ansótegui et al. (2009).
obtained. Therefore, we recommend to avoid including such parameters and to use smaller value ranges if corresponding expert knowledge is available.
Nevertheless, configurators have already been successfully applied to such large configuration spaces: GGA++ has been used to optimize over 100 parameters of Lingeling (Ansótegui et al., 2015), irace has been used to optimize over 200 parameters of the mixed integer programming solver SCIP (López-Ibáñez & Stützle, 2014; Achterberg, 2009) and with SMAC, we have optimized configuration spaces with over 900 parameters (Lindauer, Hoos, Leyton-Brown, & Schaub, 2017a).
5.7 Which Parameters to Tune
Parameters should never be part of the configuration space if they change the semantics of the problem to be solved; e.g., do not tune the allowed memory or parameters that control whether a run is counted as successful (such as the allowed optimality gap in an optimization setting). Furthermore, to obtain an unbiased estimate of a configuration’s performance across seeds one should not include the seed (or parameters with a similar effect) as a tunable parameter.
5.8 Runtime Metrics
A common cost metric in algorithm configuration is runtime. Obtaining clean runtime measurements is a problem that is by no means limited to algorithm configuration and also appears in general empirical algorithmics (McGeoch, 2012). However, in algorithm configuration, this problem can be even more tricky, because benchmark machines can be influenced by heavy I/O load on a shared file system created by multiple configuration runs (see Pitfall 3). Furthermore, other running processes on the same machine can influence the measurements. The latter issue can be fixed by using processor affinity to bind processes to a certain CPU. Therefore, we recommend to measure CPU time instead of wallclock time. However, binding processes does not grant exclusive usage of the assigned cores; thus other interfering factors such as operation system load and shared caches remain. Also, CPU time can sometimes be brittle; e.g., its resolution can be insufficient for very short target algorithm runs, such as milliseconds. We note that algorithm configuration can be used to optimize runtime at such very small scales, but extreme care needs to be taken to avoid any pitfalls associated to measuring runtimes. When possible, a better solution for this case is to measure and optimize elementary operations, such as search steps of a local search algorithm or MEMS (number of memory accesses, Knuth, 2011); however, it has to be ensured that such proxy metrics correlate well with runtime. Additionally, expensive one-time operations, such as downloading files or setting up should not be part of the measured runtime and need to be ignored, e.g. via the wrapper. Finally, it remains an open question how robust are different ways to measure runtime and related metrics and how do they influence algorithm configuration.
5.9 Monitoring Experiments
Even a well-designed experiment can go wrong because of software and hardware issues. This makes conducting a flawless experiment challenging. However, the risk for falling for a pitfall can be minimized when carefully monitoring ongoing experiments.
Investigating at the first bad sign can save a lot of time and resources. An unexpectedly high load on a machine or swapping memory can be signs of misconfigured scripts. More subtle effects that should also raise one’s attention include the following: (1) the target algorithm uses much more wallclock time than the CPU time reported to the configurator; (2) many configurations crash; or (3) there is a large variation between the performances of independent configuration runs that only differ in their seeds.
We recommend to analyze ongoing experiments with respect to these signs and make use of automated tools, e.g. CAVE (Biedenkapp et al., 2018), to analyze and visualize experimental results in a common and unified way independently of the underlying configurator and problem.
5.10 Comparing Configurators on Existing, Open-Source Benchmarks
Although algorithm configuration has been well established for over a decade, nearly every new paper on this topic uses a new set of benchmarks to compare different configurators. This makes it harder to assess progress in the field, and every new benchmark could again suffer from one of the pitfalls described above. Therefore, we recommend to use existing and open-source algorithm configuration benchmarks that are already well tested and can be freely used by the community. The only existing library of such benchmarks we are aware of is the algorithm configuration library AClib (Hutter et al., 2014a), which comprises 326 benchmarks (in version 1.2) based on open-source scripts and allows users to pick benchmarks from different domains (e.g., mixed integer programming, AI Planning, SAT, and machine learning) and with different characteristics (e.g., small or large configuration spaces).
6. A Generic Wrapper: Towards a Reliable and Unified AC Interface
Learning from the pitfalls above, our conclusion is that most of these pitfalls can be either completely prevented or their risk of occurrence can be substantially reduced by using a generic wrapper which wraps the executions of all target algorithm runs and has the following features:
1. Parsing the input arguments provided by the configurator in a uniform way such that a user only needs to implement a function to translate them into a call of the target algorithm;
2. Reliably limiting the run’s computational resources (runtime and memory consumption);
3. Measuring the cost metric in a standardized way (for which a user only needs to implement a function to parse the output of the target algorithm); and
4. Returning the output in a standardized way.
We note that some pitfalls cannot be tested easily. E.g., the user is still responsible for domain-dependent solution checking and checking whether the configurator is used as intended. However, if using a wrapper with the features above most pitfalls can be avoided. To demonstrate the usefulness of such a generic wrapper, and to provide a practical proposal for avoiding many of the described pitfalls, we implemented such a wrapper and are already using it in the algorithm configuration library AClib (Hutter et al., 2014a), to wrap 20 different target algorithms.¹⁶ To address the pitfalls mentioned above, our generic wrapper implements the following best practices:
**Resource Limitation** The tool runsolver (Roussel, 2011) has been used for several years by the SAT community, in SAT competitions and by many SAT developers, to limit the runtime and memory consumption of an algorithm run.¹⁷ We also use this tool in the generic wrapper to reliably limit such resources and to measure algorithm runtimes. This addresses both Pitfall 1 (“Trusting Your Target Algorithm”) and Pitfall 2 (“Not Terminating Target Algorithm Runs Properly”).
**Solution Checking for SAT** One of the exemplary instantiations of the generic wrapper we provide for SAT solvers implements solution checking to avoid issues of algorithm correctness (Pitfall 1: “Trusting Your Target Algorithm”).
**Writing to $TMPDIR** On most high-performance clusters these days, the environment variable $TMPDIR specifies a temporary directory on a local file system (not on a shared file system) of a compute node that allows for fast write and read access without affecting the remaining cluster. If this environment variable is set, the generic wrapper writes all temporary files (e.g., log files of the runsolver) to this folder. It only copies these files to a permanent file system in case of a crash of the target algorithm to allow debugging of these crashes. This addresses Pitfall 3 (“Slow File System”).
Furthermore, the use of the generic wrapper has the following advantages compared to implementing the same features directly in an algorithm configurator (which is nevertheless a feasible approach for some use cases):
**Fair Comparisons** As discussed in Pitfall 4 (“Handling Target Algorithm Runs Differently”), to compare different configurators, using a uniform wrapper will ensure that all configurators optimize the same objective function. Even if a wrapper turns out to have a bug, at least all configurators would be affected in the same way.
**Easy Use of Different Configurators** So far, most configurators implement different interfaces to call target algorithms. Therefore, users often implement only one of the interfaces and have not explored which of the available configurator is in fact the best one for their configuration problem. Using a generic wrapper (implementing
---
¹⁶. Our package called GenericWrapper4AC is available at https://github.com/automl/GenericWrapper4AC.
¹⁷. The runsolver uses process group IDs to keep track of running processes. For example, if the memory or time limit is exceeded, it traverses the process tree bottom-up to terminate all processes that run. However, we note that it is possible to bypass this procedure if a process forks itself or starts a process on a different machine, which can neither be detected nor monitored by the runsolver.
either a unified interface or several configurator-specific interfaces) will also help users to easily use several configurators for their target algorithms.
**Easier Implementation of New Configurators** The implementation of new configurators is not an easy task, mainly because the handling of target algorithm runs may require many lines of code and is often still brittle. To reduce the burden on configurator developers, the generic wrapper can take over some of the functions required in this setting (e.g., resource limitations). Also, when translating a configurator to a new programming language, one can ensure that functionalities regarding handling that target algorithm remain exactly the same.
**Open Source and Community** Since the generic wrapper is an open-source implementation, we believe that the community will improve the code base and thus improve its quality and robustness over time.
Appendix A provides additional details about our generic wrapper, and an example wrapper for a SAT solver.
7. Conclusion
Empirically comparing algorithms correctly is hard. This is well known and true for almost every empirical study that involves running third-party code, stochastic algorithms and computationally expensive computations and therefore also applies to algorithm configuration. Subtle mistakes, such as measuring the wrong metric or running parallel experiments without meticulous resource management, can heavily bias the outcome. In this work, we pointed out several pitfalls that can occur in running algorithm configuration experiments and provide concrete examples of how these can impact results. We found that many of these pitfalls result from treating the objective function differently in different configurators, from issues in allocating and monitoring resource consumption, and from various issues concerning over-tuning. To prevent most of these pitfalls we share recommendations and best practices for conducting algorithm configuration experiments, which we hope to be useful for both novices and experts. We also provide an open-source implementation of a generic wrapper that provides a unified interface for the communication between target algorithms and configurators and for limiting resource consumption.
Acknowledgements
We thank Manuel López-Ibáñez and Kevin Tierney for adapting the interfaces of irace and GGA to work together with GenericWrapper4AC, Yuri Malitsky and Horst Samulowitz for providing the wrappers and benchmarks of Ansótegui et al. (2015), and Kevin Tierney, Manuel López-Ibáñez and Lars Kotthoff for very helpful feedback on the first draft of the paper that led to the inclusion of some further possible issues. Some of the recommendations in Section 5 were inspired by a discussion at a Dagstuhl seminar (see Lindauer & Hutter, 2017b, for more details), and we are thankful for the valuable contributions of the attendees of that discussion: Aymeric Blot, Wanru Gao, Holger Hoos, Laetitia Jourdan, Lars Kotthoff, Manuel López-Ibáñez, Nysret Musliu, Günter Rudolph, Marc Schoenauer, Thomas Stützle
Appendix A. Details on GenericWrapper4AC
Listing 1 shows an example for how to extend the GenericWrapper4AC to wrap the well-known SAT Solver MiniSAT (Eén & Sörensson, 2004). Since the output format is standardized in the SAT community, we already provide a domain-specific generic wrapper, called SatWrapper, which can parse and verify the SAT solver’s output using standard tools from the annual SAT competitions. Therefore, SAT solver users only need to implement one method, which constructs a command line call string for their SAT solver from the provided input arguments (parameter settings, instance, cutoff time, seed).
```
class MiniSATWrapper(SatWrapper):
def get_command_line_args(self, runargs, config):
cmd = "minisat -rnd-seed=%d" %runargs["seed"]
for name, value in config.items():
cmd += " %s=%s" % (name, value)
cmd += " %s" %runargs["instance"]
return cmd
```
Listing 1: Example GenericWrapper for SAT Solver MiniSAT, building on our domain-specific SatWrapper
In the example shown, the command line call of MiniSAT consists of passing the random seed (Line 4), adding all parameters in the format parameter=value (Lines 5 and 6), and appending the CNF instance name at the end (Line 7). Importantly, it takes care of all aspects of handling cutoff times, measuring runtimes, etc, to avoid the pitfalls discussed in Section 3.
```
class SimpleWrapper(AbstractWrapper):
def get_command_line_args(self, runargs, config):
...
def process_results(self, fp, exit_code):
try:
resultMap = {'status': 'SUCCESS', 'cost': float(fp.read())}
except ValueError:
resultMap = {'status': 'CRASHED'}
return resultMap
```
Listing 2: Example GenericWrapper from scratch
For users of algorithm configuration outside SAT solving, Listing 2 shows an example for how to write a function process_results to parse algorithm outputs. Let us assume that the target algorithm only prints the target cost to be minimized (similar to the format of irace, López-Ibáñez et al., 2016). Reading the output of the provided file pointer `fp`, the
function builds and returns a dictionary which includes the cost value and a status, which is either **SUCCESS** if the target algorithm printed only a single number or **CRASHED** otherwise. Other states can be **TIMEOUT** for using more than the cutoff time $\kappa$ or **ABORT** to signal the configurator to abort the AC experiment because of major issues. Furthermore, the exit code of the target algorithm run is also provided (but not used in our example). Another possible functionality that is not shown here is to implement a (domain-specific) method to verify the target algorithm’s returned solution.
Except these two target algorithm-specific functions, the **GenericWrapper4AC** handles everything else, including:
- Parsing the input format; native interfaces to **ParamILS**, **ROAR** and **SMAC** are supported right now, and an additional layer to run GGA(++) and irace is available as well. (see AClib\textsuperscript{2}18 for examples).
- Calling the target algorithm and limiting its resource limits using the runsolver tool (Roussel, 2011)
- Measuring the CPU time of the target algorithm run (using runsolver)
- Returning the cost of the target algorithm run to the configurator
The **GenericWrapper4AC** is available at GitHub and can be easily installed via python setup.py install (including the runsolver) and runs on UNIX systems.
References
\textsuperscript{18} https://bitbucket.org/mlindauer/aclib2
Dorigo, M. (2016). Swarm intelligence: A few things you need to know if you want to publish in this journal.
Pitfalls and Best Practices for Algorithm Configuration
Eggensperger, Lindauer, & Hutter
|
{"Source-Url": "https://www.jair.org/index.php/jair/article/download/11420/26488/", "len_cl100k_base": 16178, "olmocr-version": "0.1.53", "pdf-total-pages": 33, "total-fallback-pages": 0, "total-input-tokens": 82337, "total-output-tokens": 24366, "length": "2e13", "weborganizer": {"__label__adult": 0.00044846534729003906, "__label__art_design": 0.0007920265197753906, "__label__crime_law": 0.0005931854248046875, "__label__education_jobs": 0.002902984619140625, "__label__entertainment": 0.00018310546875, "__label__fashion_beauty": 0.0003123283386230469, "__label__finance_business": 0.0006346702575683594, "__label__food_dining": 0.0004429817199707031, "__label__games": 0.0014295578002929688, "__label__hardware": 0.0013399124145507812, "__label__health": 0.0009665489196777344, "__label__history": 0.0007224082946777344, "__label__home_hobbies": 0.00021278858184814453, "__label__industrial": 0.0008616447448730469, "__label__literature": 0.0004992485046386719, "__label__politics": 0.0006442070007324219, "__label__religion": 0.000820159912109375, "__label__science_tech": 0.358642578125, "__label__social_life": 0.00017976760864257812, "__label__software": 0.01308441162109375, "__label__software_dev": 0.6123046875, "__label__sports_fitness": 0.0005197525024414062, "__label__transportation": 0.0010843276977539062, "__label__travel": 0.0002987384796142578}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 95983, 0.02941]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 95983, 0.26113]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 95983, 0.84252]], "google_gemma-3-12b-it_contains_pii": [[0, 2576, false], [2576, 5717, null], [5717, 8670, null], [8670, 11965, null], [11965, 14978, null], [14978, 17654, null], [17654, 21258, null], [21258, 24588, null], [24588, 27265, null], [27265, 30614, null], [30614, 33508, null], [33508, 37083, null], [37083, 40316, null], [40316, 42423, null], [42423, 45776, null], [45776, 48934, null], [48934, 51426, null], [51426, 53449, null], [53449, 56878, null], [56878, 60358, null], [60358, 63310, null], [63310, 66175, null], [66175, 69565, null], [69565, 72642, null], [72642, 74793, null], [74793, 77523, null], [77523, 80547, null], [80547, 83598, null], [83598, 86574, null], [86574, 89533, null], [89533, 92575, null], [92575, 95801, null], [95801, 95983, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2576, true], [2576, 5717, null], [5717, 8670, null], [8670, 11965, null], [11965, 14978, null], [14978, 17654, null], [17654, 21258, null], [21258, 24588, null], [24588, 27265, null], [27265, 30614, null], [30614, 33508, null], [33508, 37083, null], [37083, 40316, null], [40316, 42423, null], [42423, 45776, null], [45776, 48934, null], [48934, 51426, null], [51426, 53449, null], [53449, 56878, null], [56878, 60358, null], [60358, 63310, null], [63310, 66175, null], [66175, 69565, null], [69565, 72642, null], [72642, 74793, null], [74793, 77523, null], [77523, 80547, null], [80547, 83598, null], [83598, 86574, null], [86574, 89533, null], [89533, 92575, null], [92575, 95801, null], [95801, 95983, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 95983, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 95983, null]], "pdf_page_numbers": [[0, 2576, 1], [2576, 5717, 2], [5717, 8670, 3], [8670, 11965, 4], [11965, 14978, 5], [14978, 17654, 6], [17654, 21258, 7], [21258, 24588, 8], [24588, 27265, 9], [27265, 30614, 10], [30614, 33508, 11], [33508, 37083, 12], [37083, 40316, 13], [40316, 42423, 14], [42423, 45776, 15], [45776, 48934, 16], [48934, 51426, 17], [51426, 53449, 18], [53449, 56878, 19], [56878, 60358, 20], [60358, 63310, 21], [63310, 66175, 22], [66175, 69565, 23], [69565, 72642, 24], [72642, 74793, 25], [74793, 77523, 26], [77523, 80547, 27], [80547, 83598, 28], [83598, 86574, 29], [86574, 89533, 30], [89533, 92575, 31], [92575, 95801, 32], [95801, 95983, 33]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 95983, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
cdbb31345e6efaf50b48dc1cb0fd52ed2acd5025
|
[REMOVED]
|
{"Source-Url": "https://www.ethz.ch/content/dam/ethz/special-interest/infk/inst-infsec/information-security-group-dam/people/andreloc/lochbihler12esop.pdf", "len_cl100k_base": 14988, "olmocr-version": "0.1.53", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 63756, "total-output-tokens": 17815, "length": "2e13", "weborganizer": {"__label__adult": 0.0003266334533691406, "__label__art_design": 0.0002243518829345703, "__label__crime_law": 0.0003159046173095703, "__label__education_jobs": 0.0003266334533691406, "__label__entertainment": 4.392862319946289e-05, "__label__fashion_beauty": 0.0001455545425415039, "__label__finance_business": 0.00019097328186035156, "__label__food_dining": 0.0003058910369873047, "__label__games": 0.0006303787231445312, "__label__hardware": 0.0009517669677734376, "__label__health": 0.0003514289855957031, "__label__history": 0.00022089481353759768, "__label__home_hobbies": 7.426738739013672e-05, "__label__industrial": 0.0003609657287597656, "__label__literature": 0.0002510547637939453, "__label__politics": 0.00030112266540527344, "__label__religion": 0.0004730224609375, "__label__science_tech": 0.01155853271484375, "__label__social_life": 5.6743621826171875e-05, "__label__software": 0.0041961669921875, "__label__software_dev": 0.9775390625, "__label__sports_fitness": 0.0003020763397216797, "__label__transportation": 0.0006279945373535156, "__label__travel": 0.0002084970474243164}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 64149, 0.03795]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 64149, 0.45136]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 64149, 0.87647]], "google_gemma-3-12b-it_contains_pii": [[0, 2375, false], [2375, 5403, null], [5403, 8519, null], [8519, 11967, null], [11967, 14586, null], [14586, 17662, null], [17662, 20651, null], [20651, 23616, null], [23616, 26663, null], [26663, 30183, null], [30183, 33744, null], [33744, 37225, null], [37225, 40458, null], [40458, 43581, null], [43581, 47165, null], [47165, 50791, null], [50791, 53869, null], [53869, 57483, null], [57483, 60555, null], [60555, 64149, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2375, true], [2375, 5403, null], [5403, 8519, null], [8519, 11967, null], [11967, 14586, null], [14586, 17662, null], [17662, 20651, null], [20651, 23616, null], [23616, 26663, null], [26663, 30183, null], [30183, 33744, null], [33744, 37225, null], [37225, 40458, null], [40458, 43581, null], [43581, 47165, null], [47165, 50791, null], [50791, 53869, null], [53869, 57483, null], [57483, 60555, null], [60555, 64149, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 64149, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 64149, null]], "pdf_page_numbers": [[0, 2375, 1], [2375, 5403, 2], [5403, 8519, 3], [8519, 11967, 4], [11967, 14586, 5], [14586, 17662, 6], [17662, 20651, 7], [20651, 23616, 8], [23616, 26663, 9], [26663, 30183, 10], [30183, 33744, 11], [33744, 37225, 12], [37225, 40458, 13], [40458, 43581, 14], [43581, 47165, 15], [47165, 50791, 16], [50791, 53869, 17], [53869, 57483, 18], [57483, 60555, 19], [60555, 64149, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 64149, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
786682578d8ab0b9df7003c3ec4e0bf29ba8edd6
|
A NET STRUCTURE BASED RELATIONAL QUESTION ANSWERER: DESCRIPTION AND EXAMPLES
by
Stuart C. Shapiro & George H. Woodmansee
Technical Report #59
March 1969
Computer Sciences Department
1210 West Dayton Street
Madison, Wisconsin 53706
A NET STRUCTURE BASED RELATIONAL QUESTION ANSWERER: DESCRIPTION AND EXAMPLES.*†
ABSTRACT
A question answering system is described which uses a net structure for storage of information. The net structure consists of nodes and labelled edges, which represent relations between the nodes. The labels are also nodes, and therefore definitions of relations may be stored in the net. It is demonstrated that the generality and complexity of this memory structure allows a surprisingly powerful question answering system to be constructed using comparatively simple executive routines. Output from the question answerer, which is currently running on an interactive, time sharing system, is included, showing its range of applicability including question answering, inductive and deductive inference, simple theorem proving and problem solving.
Key words and phrases: relational question answering, question answering, memory net, memory structure, data structure, semantic memory, semantic information retrieval, deductive inference, inductive inference, problem solving, concept formation, relational logic, learning, theorem proving, fact retrieval.
*The research reported herein was partially supported by a grant from the National Science Foundation (GP-7069). Use of the University of Wisconsin Computing Center was made possible through support, in part, from the National Science Foundation, other United States Government agencies and the Wisconsin Alumni Research Foundation (WARF) through the University of Wisconsin Research committee.
†This paper is to be presented at the International Joint Conference on Artificial Intelligence in Washington D.C. (May 7-9, 1969).
1. INTRODUCTION
Our main research interest has been in the organization of data structures for question answering systems, systems that retrieve facts and have deductive and inductive capabilities to derive new information from the facts explicitly given them. Our two main aims have been to maintain as much generality as possible so that no additional programming be needed regardless of the domain of knowledge for which the system is used and to put as much question answering power as possible into the memory structure itself rather than in the executive routines. This latter aim supports the first in that it would allow special instructions for particular domains to be entered into the memory in the same way as any other information. SAMENLAQ II, the system described in this paper represents progress toward reaching these aims. Further progress is being made in a later system (see Section 5).
SAMENLAQ II is based upon binary relations. This was a natural starting point because of the generality of binary relations, and the fact that they provide a reasonable test environment for our ideas. Since our major interest is the memory structure, we have not used natural language input, thus avoiding the attendant problems. Instead, all statements input to the system are in the form \( x \, R \, y \). We hope it will become evident that even with this restriction to binary relations and with basically simple executive routines the system attains a surprising
amount of power and range of applicability. This derives from the following characteristics of the system:
1. The memory is a net structure, with the relations serving as labels on directed edges. Each statement $x \, R \, y$ is also stored in the converse form $y \, R(CNV) \, x$ so that all the information about a name is reachable from the node in the net which represents it.
2. The relations, though used as labels on the edges, are actually also nodes themselves, so information about them may be stored in the memory structure. The major use of this capability is to define a relation in terms of other relations.
3. The system has the ability to use the information stored about a relation when searching memory. Such information may be entered at any time and in the same manner as any other type of data or it may be constructed and entered by the system itself. In our system, any relation may be used as an undefined term, may be defined in terms of other relations, or may be defined recursively. Any single relation may be used in any or all of these ways. Complex relations may be built out of simple relations using the relative product operation and node restrictions to restrict the domains or ranges of the simple relations. Thus, quite complicated relations may be defined.
The following sections give more detailed information about and examples of the SAMENLAQ II system. The final section describes a later system which is being developed to satisfy more completely the goals discussed above.
2. IMPLEMENTATION AND OPERATION
SAMENLAQ II is a revision of SAMENLAQ, "A Semantic Association Memory Net that Learns and Answers Questions". Both programs are written in SNOBOL3 as interactive question answering systems, but SAMENLAQ II, unlike SAMENLAQ which was run in batch mode with simulated interaction on a CDC 3600, is fully interactive and is currently running under the University of Wisconsin B5500 time sharing system. SAMENLAQ II differs from SAMENLAQ in that it provides aids to the user, who inputs data directly via a teletype, allows for storage of input files and memories in disk files, and, most importantly, allows for recursive definitions of relations and allows the user to control in real time how much effort the system should spend searching its memory to discover more information for use in answering a question.
Figures 2a and 2b depict the overall flow of control in SAMENLAQ II. Figure 1 demonstrates its operation. At the top level of operation, three types of input are allowable: statements, questions and requests to the system executive. [B1,2a]* Representative examples are given in fig. 1
*Bracketed references refer to the flow charts in fig. 2. E.g. "[B1,2a]" refers to Box 1 in fig. 2a.
a ALBANY,BUFFALO,NEW,YORK,POUGHKEEPSIE IN NEW,YORK,STATE--
STATEMENT - ALBANY,BUFFALO,NEW,YORK,POUGHKEEPSIE IN NEW.
YORK,STATE
OK?...YES--
WAIT...
READY
BOSTON,WORCESTER IN MASSACHUSETTS--
STATEMENT - BOSTON,WORCESTER IN MASSACHUSETTS
OK?...YES--
WAIT...
READY
b FLAG ECHO OFF--
REQUEST - FLAG ECHO OFF
READY
c BOSTON EAST,OF WORCESTER--
READY
WORCESTER EAST,OF ALBANY--
READY
ALBANY EAST,OF BUFFALO--
READY
ALBANY NORTH,OF Poughkeepsie--
READY
POUGHKEEPSIE NORTH,OF NEW,YORK--
READY
IN MEMBER LOCATION.RELS--
READY
d NORTH,OF,EAST,OF MEMBER COMPASS.RELS--
READY
e SOUTH,OF IMPLY NORTH,OF(CNV)--
READY
WEST,OF IMPLY EAST,OF(CNV)--
READY
WHAT,HAS,WHAT.RELATIONSHIP.TO IS QUESTION--
READY
f BOSTON EAST,OF WHAT*--
I WILL USE THE FOLLOWING RELATIONS:
EAST,OF/
I FIGURE BOSTON EAST,OF/WORCESTER
IF THIS IS SUFFICIENT, RESPOND - OK
OTHERWISE TYPE AN INTEGER FOR FURTHER SEARCH
OR - LIST TROUBLES OR AN INPUT STATEMENT OR - EXAMPLES
g I--
I WILL USE THE FOLLOWING RELATIONS:
NO FURTHER RELATIONS FOUND.
Fig. 1a A Conversation With SAMENLAQ II.
I FIGURE BOSTON EAST.OF/ WORCESTER
IF THIS IS SUFFICIENT, RESPOND - OK
OTHERWISE TYPE AN INTEGER FOR FURTHER SEARCH
OR - LIST TROUBLES OR AN INPUT STATEMENT OR - EXAMPLES
EAST.OF IMPLBY EAST.OF/EAST.OF-
STATEMENT - EAST.OF IMPLBY EAST.OF/EAST.OF
OK?...YES--
WAIT
ENTER ANY OF THE ABOVE OPTIONS.
2--
I WILL USE THE FOLLOWING RELATIONS:
EAST.OF/EAST.OF/
AND EAST.OF/EAST.OF/EAST.OF/
I FIGURE BOSTON EAST.OF/WORCESTER AND ALBANY AND BUFFALO
IF THIS IS SUFFICIENT, RESPOND - OK
OTHERWISE TYPE AN INTEGER FOR FURTHER SEARCH
OR - LIST TROUBLES OR AN INPUT STATEMENT OR - EXAMPLES
OK--
WAIT...
ANSWER - WORCESTER [Answer to Question posed at line "f".]
AND ALBANY
AND BUFFALO
READY
BUFFALO HAS WHAT RELATIONSHIP TO NEW YORK*2--
ENTER COMMA LIST OF RELATION CLASSES TO BE USED OR - "ANY."
ANY--
BUFFALO IN NEW YORK STATE IN(CNV) NEW YORK
ENTER - OK - OR AN INTEGER INDICATING NUMBER OF ADDITIONAL
PATH LINKS.
1--
WAIT...
BUFFALO EAST.OF(CNV) ALBANY NORTH.OF POUGHKEEPSIE NORTH.OF
NEW YORK
ENTER - OK - OR AN INTEGER INDICATING NUMBER OF ADDITIONAL
PATH LINKS.
OK--
READY
FLAG ASK OFF®
READY
FLAG TRACE OFF®
READY
WHAT (IN-NEW YORK STATE,) SOUTHWEST.OF BOSTON*--
I FIGURE BOSTON SOUTHWEST.OF(CNV)/(IN-NEW YORK STATE,)
UNKNOWN
Fig. 1b A Conversation With SAMENL AQ II
WHAT NOW?
LIST TROUBLES—
AT THE FOLLOWING NAMES COULD NOT APPLY THE LISTED RELATIONS.
BOSTON - SOUTHWEST.OF(CNV)
WHAT NOW?
SOUTHWEST.OF IMPLBY SOUTH.OF/WEST.OF, WEST.OF/SOUTH.OF—
STATEMENT - SOUTHWEST.OF IMPLBY SOUTH.OF/WEST.OF, WEST.
OF/SOUTH.OF
OK?...YES—
WAIT...
WHAT NOW?
3--
I FIGURE BOSTON SOUTHWEST.OF(CNV)/(IN-NEW.YORK.STATE,)
POUGHKEEPSIE
WHAT NOW?
SOUTH.OF IMPLBY SOUTH.OF/SOUTH.OF—
STATEMENT - SOUTH.OF IMPLBY SOUTH.OF/SOUTH OF
OK?...YES—
WAIT...
WHAT NOW?
3--
I FIGURE BOSTON SOUTHWEST.OF(CNV)/(IN-NEW.YORK.STATE,)
POUGHKEEPSIE
AND NEW.YORK
WHAT NOW?
OK--
ANSWER - POUGHKEEPSIE [Answer to question posed at line "k"].
AND NEW.YORK
READY
Fig. 1c A Conversation With SAMENLAQ II.
Fig. 2a Overall Flow of Control
*See figure 2b.*
Fig. 2b Flow Chart of Main Question Answering Routines
*This routine is used at [B4,2a], [B5,2a] and [B6,2a].
by lines "a," "f," and "b" respectively. Notice that user inputs are indicated by terminal left arrows whereas unterminated lines indicate SAMENLAQ II responses. The simple statement at line "c" results in the construction of a net substructure containing the nodes "BOSTON", "EAST.OF", "EAST.OF(CNV)", and "WORCESTER" in which "BOSTON" and "WORCESTER" are tied together via the "EAST.OF" node and "WORCESTER" and "BOSTON" are tied together via the "EAST.OF(CNV)" node. This structure is considered in more detail in the next section. More complicated statements such as line "a" are interpreted by the system as a series of simple statements.
The system provides the user with various types of feedback, some of which may be turned off by appropriate requests to the executive.[B7,2a] Lines prior to "f" demonstrate input in the full and limited response modes. Line "b" requests that the full response mode be turned off. Requests are identified by a terminating "<e>". Other request options are indicated in fig. 2.[B2,2a]
Several relation words are built into the system. "MEMBER" allows a particular net search to be limited to a subclass of all the relations represented in the net. Subclass definition may take place at any point during a conversation and is determined solely by the user. Line "d" represents the introduction of the subclass "COMPASS.RELS". Such classes are useful for handling questions involving paths in the net which connect prescribed nodes.
"IMPLBY" allows a given relation to be defined in terms of other relations. It is one of the most important features of SAMENLAQ II. The system has the ability to utilize "IMPLBY" information about a relation during the question answering process by using it as a generalized substitution rule.
Line "e" demonstrates the use of IMPLBY to introduce NORTH. OF(CNV) as an acceptable replacement for SOUTH.OF. The line also results in SOUTH.OF(CNV) IMPLBY NORTH.OF being incorporated in memory.
To enhance readability, the system allows the user to introduce his own interrogatives by means of the form "x IS QUESTION".
Questions are terminated by "*". There are four possible types -- one verification type (x R y *) and three fill in the blank types (x R _ *), (_ R y *) and (x _ y *).[B3,2a] Line "f" illustrates the x R _ * type, line "j" the X _ Y* type and line "k" the _ R Y* type. Notice that non-simple relations can be handled by the system.
There are three types of relations used in SAMENLAQ II, simple, compound and complex. A simple relation is of the form R or R(CNV) where the character string R is not meaningfully decomposable and Y R(CNV) X if and only if X R Y. A compound relation is a simple relation or a relative product of simple relations and is of the form R1/R2/R3, (or sometimes, as a stylistic variant, R1/R2/R3/).
X R1/R2/R3 Y holds if and only if there exists some z and w such
that $X R_1 z$, $z R_2 w$ and $w R_3 Y$. A complex relation is a compound relation or a compound relation with node restrictions. A node restriction is of the form $(R - \emptyset)$ where $R$ is a simple relation and $\emptyset$ is a string of names, each one followed by a comma. Node restrictions are used to restrict the domain or range of a simple relation which forms part of the complex relation. For example, $(R - \emptyset)R_1$ is the relation $R_1$ with a restricted domain, and $R_1(R - \emptyset)$ is $R_1$ with a restricted range.
\[
\begin{align*}
\text{Domain of R}_3 & \times (R_1 - \emptyset_1)(R_2 - \emptyset_2)R_3/R_4(\emptyset_3)(R_5 - \emptyset_4)R_6/R_7(\emptyset_5) \\
\text{Range of R}_3 & \text{ Range of R}_6
\end{align*}
\]
A name $x$, satisfies the node restriction $(R - \emptyset)$ if for all $y$ in the string $\emptyset xRy$ is explicitly stored in memory. Thus the relational statement above holds if $x$ satisfies $(R_1 - \emptyset_1)$ and $(R_2 - \emptyset_2)$, $y$ satisfies $(R_7 - \emptyset_5)$ and there exists some $z$ such that $z$ satisfies $(R_4 - \emptyset_3)$ and $(R_5 - \emptyset_4)$, $xR_3z$, and $zR_6y$.
A complex relation may also consist solely of node restrictions, in which case it is an identity relation on a restricted domain (viz. the set of all names which satisfy all the node restrictions) and is the statement of a conjunctive concept. The executive routines have built into them the ability to deal with converse relations and with compound and complex relations. When a relation serves to label an edge of the net, i.e. in its appearance in the value of a name, it is treated as a
simple relation. Compound and complex relations are used when defining other relations and may themselves have definitions. Lines beginning at "k" illustrate these ideas.
Line "f" represents one of the four question types. To answer it, SAMENLAQ II attempts to apply "EAST.OF" to "BOSTON". Since the system has the statement "BOSTON EAST.OF WORCESTER" represented explicitly in its memory, EAST.OF can be successfully applied — yielding "WORCESTER." Since the system does not know explicitly that "BOSTON EAST.OF ALBANY" or that EAST.OF is a transitive relation, it is incapable of finding further nodes satisfying "BOSTON EAST.OF X". This is illustrated in line "g" where "l" indicates that SAMENLAQ II is to execute one substitution cycle — i.e. substitute for each relation it is currently attempting to apply to a node, all acceptable replacements contained on the relations IMPLBY list.[B1,2h] (The IMPLBY list for a relation R is a list of all relations, X, such that R IMPLBY X.) In line "h" the system is informed that EAST.OF is transitive. Two additional IMPLBY substitution cycles are then requested by typing in "2".
Figure 3 illustrates an application of SAMENLAQ to rudimentary conjunctive concept formation. The statements presented in Fig. 3a supply the system with a small data base concerning the relations of various objects in a room. Note, in line "a" two of these are grouped into the class "SPLREL". In line "b" SAMENLAQ is asked to answer a question concerning a relation it has never seen before. Failing to
CHAIR, SHELF, TABLE HAS PART ELEVATED, HORIZONTAL, SUPPORTING.
SURFACE—
READY
DRESSER, BENCH HAS PART ELEVATED, HORIZONTAL, SUPPORTING, SURFACE—
READY
SHELF HAS PART BRACKET, SUPPORTS—
READY
CHAIR, TABLE, DRESSER, BENCH HAS PART LEGS—
READY
DRESSER HAS PART DRAWERS—
READY
BENCH, CHAIR, SHELF, TABLE, DRESSER ELEMENT FURNITURE—
READY
TV1, TTY1, TEL1, TEL3, PEN1, SIGNAL, LIGHT1, RADIO1 USED FOR
COMMUNICATION—
READY
TV1, TTY1, TEL3, TEL1, TEL2, SIGNAL, LIGHT1, RADIO1 CONSTRUCTION
ELECTRICAL—
READY
PEN1 CONSTRUCTION MECHANICAL—
READY
CHAIR1, CHAIR2 ELEMENT CHAIR—
READY
SHELF1, SHELF2, SHELF3 ELEMENT SHELF—
READY
TABLE1, TABLE2 ELEMENT TABLE—
READY
DRESSER1 ELEMENT DRESSER—
READY
CHAIR1, CHAIR2, SHELF2 CONSTRUCTION METAL—
READY
BENCH1, SHELF1, TABLE1, TABLE2, DRESSER1 CONSTRUCTION WOOD—
READY
RADIO1, PEN1 ON TOP DRESSER1—
READY
SIGNAL, LIGHT1, PICTURE1 ON TOP SHELF1—
READY
TV1 ON TOP TABLE2—
READY
TTY1 ON TOP BENCH1—
READY
BENCH1 ELEMENT BENCH—
READY
Fig. 3a A Conjunctive Concept Formation Example: Data Base Input.
PICTURE3, TEL3 ATTACHED TO WALL1—
READY
TEL1 ON TOP TABLE1—
READY
WHAT IS QUESTION—
READY
a ON TOP ELEMENT MEMBER SPLREL—
READY
b WHAT IS AN ELECTRICAL COMMUNICATION DEVICE THAT IS ON TOP OF A PIECE OF WOODEN FURNITURE*—
I FIGURE FURNITURE IS AN ELECTRICAL COMMUNICATION DEVICE THAT IS ON TOP OF A PIECE OF WOODEN(CNV)/ UNKNOWN WHAT NOW?
EXAMPLES—
RELATION WHOSE DEFINITION IS TO BE FOUND...CONCEPT1—
RELATION CLASS CONCEPT1 IS A MEMBER OF...SPLREL—
DEPTH OF ALTERNATE DEFINITIONS...2—
TYPE PAIRS X Y SUCH THAT X CONCEPT1 Y.
WHEN FINISHED, TYPE - END.
TEL1 TABLE—
RADIO1 DRESSER—
SIGNAL1 LIGHT1 SHELF—
END—
OK...YES—
THANK YOU.
WAIT...
FROM THE EXAMPLES YOU HAVE GIVEN ME, I WOULD GUESS THAT CONCEPT1 IS THE SAME AS
(CONSTRUCTION-ELECTRICAL,)(USED FOR COMMUNICATION,)ON TOP/
(CONSTRUCTION-WOOD,)ELEMENT HAS PART ELEVATED HORIZONTAL.
SUPPORTING SURFACE,)
WHAT NOW?
IS AN ELECTRICAL COMMUNICATION DEVICE THAT IS ON TOP OF A PIECE OF WOODEN IMPLY CONCEPT1/ELEMENT—
STATEMENT—
IS AN ELECTRICAL COMMUNICATION DEVICE THAT IS ON TOP OF A PIECE OF WOODEN IMPLY CONCEPT1/ELEMENT
OK...YES—
WAIT...
WHAT NOW?
2—
Fig. 3b A Conjunctive Concept Formation Example: Interrogation.
I FIGURE FURNITURE IS AN ELECTRICAL COMMUNICATION DEVICE THAT IS ON TOP OF A PIECE OF WOODEN (CNV) TTY1 AND SIGNAL LIGHT1 AND TEL1 AND TV1 AND RADIO1 WHAT NOW?
OK-- WAIT...
ANSWER - TTY1 [Answer to Question posed at line "b".]
AND SIGNAL LIGHT1
AND TEL1
AND TV1
AND RADIO1
READY
Fig. 3c A Conjunctive Concept Formation Example: Interrogation.
apply this relation, it is given a series of examples whose relationship to one another is arbitrarily designated CONCEPT1. Restricting its search to the relation class SPLREL, the system obtains all paths of length 2 or less connecting the example pairs. The properties common to each class of nodes at the same level along the path are also calculated. For example (CONSTRUCTION-ELECTRICAL,), (CONSTRUCTION-WOOD,), and (HAS.PART-ELEVATED,HORIZONTAL,SUPPORTING SURFACE,) are node properties common to the first, second, and third levels respectively. The path connecting the example pairs is "ON,TOP/ELEMENT". This relation is then placed upon the IMPLBY list for CONCEPT1. Finally CONCEPT1 is used to define the original concept IS,AN,ELECTRICAL, COMMUNICATION,DEVICE,THAT,IS,ON,TOP,OF,A,PIECE,OF,WOODEN. The system then has sufficient information to answer the question and responds with the correct answer.
3. SAMENL AQ II DESCRIPTION
Statements entered into the memory are of the form "NAME1 RELATION NAME2" where NAME1 and NAME2 are non-decomposable names and RELATION is a simple relation. Information contained in such statements is stored on paren lists associated with NAME1 and NAME2. Thus, the above statement produces the following paren list for "NAME1": "(RELATION-/1)" where the contents of the slash name "/1" is the comma list "NAME2,". In the example below, the paren pair (WEST,OF-/3) on the paren list for
WORCESTER indicates that Worcester is West of each of the elements found on the comma list named /3. Note that the value of a slash name is a comma list, similarly the value of a name is its paren list.
```
BOSTON = (EAST,OF/-1)
/1 = WORCESTER,ALBANY,BUFFALO,
WORCESTER = (EAST,OF/-2)(WEST,OF/-3)
/2 = ALBANY,BUFFALO,
/3 = BOSTON,
EAST,OF = (IMPLY/-4)(MEMBER/-5)
/4 = EAST,OF/EAST,OF/,
/5 = COMPASS.RELS,
```
Although the paren list looks like a conventional attribute-value list it differs in that both the relations (attributes) and the names on the comma lists (values) are themselves names of paren lists and these various paren lists mutually occur as elements of each other. The memory may be thought of as a directed graph whose nodes are the names (BOSTON, WORCESTER, etc.) and whose edges are labelled by the relations. Since, however, the relations are also names and thus should be thought of as nodes in the graph, we should, perhaps, think of the edges as being labelled by passing through a node, and all edges bearing the same label as passing through the same node.
The statement NAME1 RELATION NAME2 is not only stored as such on NAME1's paren list, but its converse, NAME2 RELATION(CNV) NAME1 is stored on NAME2's paren list. This is done so that the information contained in the statement is recoverable from either name. Although this involves duplicate storage of information, changing the statement
to its converse form for storage under the second argument allows all statements about a name to be stored in the same place (the name's value) regardless of whether the name was the first or second argument in the original statements. This contrasts with the methods for retrieving a relational statement from either argument used by the Relational Data File $^4$ (RDF) and by DEACON $^1, 6$. In RDF, statements are stored in only one direction, but in different files which are ordered on different parts of the statement. Thus to get all information about a single name, either one file must be searched exhaustively or the name must be looked up in all files. In DEACON, the statements are stored in the form of closed "connecting rings" through the three parts of the statement. Thus the statement is reachable from any part of it without recourse to several files, but it is impossible to tell from a connecting ring where the statement should begin, i.e. whether the ring through $x, R, y$ and $y$ represents the statement $xRy$, $Ryx$, or $yxR$.
The generality of SAMENLAQ derives largely from the ability to introduce new relations at any time, to introduce definitions of new relations or relations that had previously been undefined and to extend the definition of a relation. Definitions may also be added at any time and are stored in the memory net structure just like any other data. Definitions are not given in terms of relation properties that have been built into the system, but in terms of other relations. Nevertheless, various standard relation properties can be dealt with, for example:
1. Entering the statement, "RI IMPLYR RI/RI causes RI to be transitive.
2. Entering "RI IMPLYR RI(CNV)" causes RI to be symmetric.
3. It was previously pointed out (section 2) that the complex relation consisting only of a node restriction (R-∅) serves as an identity relation for all names x such that for every name y in the string ∅, xRy is explicitly in memory. If ∅ were the string consisting only of the delimiter ",", (R-∅) would be the identity relation for all x such that for any y, xRy were explicitly stored. If this were true for all x in memory, (R-,) would be the universal identity relation. In that case entering, the statement "RI IMPLYR (R-,)" would cause RI to be reflexive.
Thus, RI might be defined to be an equivalence relation by entering "RI IMPLYR (MEMBER-,), RI(CNV), RI/RI/". In addition to these forms of definition, a relation may be defined a) by using only other relations, and b) by combining other relations along with the relation being defined thus forming a general recursive definition. A wide range of relations may thus be used without programming them into the executive routines as it is done in Raphael's SIR. This method of defining relations also contrasts with that used by Elliott in "GRAIS". In "GRAIS" relational properties are built into the executive routines. In fact, this is done in such a way as to provide
specific routines for 32 classes of relations. The user introduces a new relation by specifying which of the 32 classes it belongs in and this determines how it will be handled. This does not allow a user to use a relation whose properties he either does not know completely or does not wish to make specific initially. A user may also define a relation in terms of a Boolean function of previously introduced relations. However, these relations may not be stored in the data structure, only used for question answering.
It is interesting to note that the user builds his own logic system into SAMENLAQ II when he specifies IMPLYB information (used as the rules of inference) and other statements (the axioms). The only logical structure imposed on the user is the metatheoretic substitution rule embodied in the procedures which apply IMPLYB, and the limits on the form of a rule of inference imposed by the syntax of complex relations. If the user specifies a strange or even self-contradictory "logic" SAMENLAQ II will produce deductions that are equally strange or contradictory; interpretation is in the mind of the user. (For example, in section 4, deductions arising from the relation "IS.PART.OF" only make sense if the interpretation of "x IS.PART.OF y" motivating the rule of inference "IS.PART.OF IMPLYB IS.PART.OF/IS(CNV)" is that every member of y has a part which is a member of x and the interpretation of "IS" is "is subset of."
It would be possible to append an executive to SAMENLAQ II which would constrain the type of logical system to one with certain prescribed properties.
4. SOME VARIED APPLICATIONS OF SAMENLAQ II
The following sample conversations demonstrate SAMENLAQ II's ability to deal with relations arising from a variety of problem areas.
The first conversation (Figure 4a-d) involves a modified subdialogue from SIR. It demonstrates SAMENLAQ's ability to handle relations such as part, subset, owns and element and the interdependence between such relations. Although specific relations and relational properties are not built into the system, SAMENLAQ can utilize the information \((x)(y)(z)\) \((x \text{ IS.PART.OF } y \& z \subseteq y \rightarrow x \text{ IS.PART.OF } z)\) via the IMPLBY statement \(x \text{ IS.PART.OF IMPLBY IS.PART.OF }/\text{IS}(cnv)\). (See comments at the end of section 3.)
In attempting to answer a question, it may be necessary to supply further information to the system. Such a situation is illustrated by line "b" in the conversation starting at line "a".
Notice that in the case of a "WHAT R Y" question, SAMENLAQ II proceeds by attempting to apply the R(CNV) relation to the node Y.
[B4,2a]
Figure 5 shows an application of SAMENLAQ II to census, airplane and airline flight data. Note especially that even though the
IS.PART.OF IMPLBY IS.PART.OF/IS.PART.OF,IS.PART.OF/IS(CNV)←
READY
SOMETIMES IMPLBY IS(CNV)←
READY
IS IMPLBY IS/IS←
READY
NOSE IS.PART.OF PERSON←
READY
NOSTRIL IS.PART.OF NOSE←
READY
PROFESSOR IS TEACHER←
READY
TEACHER IS PERSON←
READY
NOSTRIL IS.PART.OF PROFESSOR*1←
I FIGURE NOSTRIL IS.PART.OF/ NOSE AND PERSON
WHAT NOW?
1←
I FIGURE NOSTRIL IS.PART.OF/ NOSE AND PERSON AND TEACHER
WHAT NOW?
1←
I FIGURE NOSTRIL IS.PART.OF/ NOSE AND PERSON AND TEACHER
AND PROFESSOR
WHAT NOW?
OK←
WAIT...
ANSWER - TRUE
READY
PERSON IS LIVING CREATURE←
BAD INPUT. TRY AGAIN.
READY
PERSON IS LIVING,CREATURE←
READY
HAS,AS,PART IMPLBY IS.PART.OF(CNV)←
READY
LIVING,CREATURE SOMETIMES/HAS,AS,PART NOSTRIL*3←
I FIGURE LIVING,CREATURE SOMETIMES/HAS,AS,PART/ NOSTRIL
WHAT NOW?
OK←
ANSWER - TRUE
READY
CRT IS DISPLAY.DEVICE←
READY
Fig. 4a Learning and Deduction Using Several Relations From SIR
CRT IS.PART.OF B5500—
READY
BRUTUS IS B5500—
READY
SCREEN IS.PART.OF DISPLAY.DEVICE—
READY
d SCREEN IS.PART.OF BRUTUS*1—
I FIGURE SCREEN IS.PART.OF/ DISPLAY.DEVICE AND CRT
WHAT NOW?
1—
I FIGURE SCREEN IS.PART.OF/ DISPLAY.DEVICE AND CRT AND B5500
WHAT NOW?
1—
I FIGURE SCREEN IS.PART.OF/ DISPLAY.DEVICE AND CRT AND
B5500 AND BRUTUS
WHAT NOW?
OK—
WAIT...
ANSWER - TRUE [Answer to question posed at line "d" above]
READY
OWNS IMPLBY IS/OWNS—
READY
FIREMAN OWNS PAIR.OF,RED.SUSPENDERS—
READY
DOCTOR OWNS PAIR.OF,RED.SUSPENDERS*1—
I FIGURE DOCTOR OWNS/ UNKNOWN
WHAT NOW?
OK—
WAIT...
ANSWER - UNKNOWN
READY
FIRECHIEF IS FIREMAN—
READY
FIRECHIEF OWNS PAIR.OF,RED.SUSPENDERS*1—
I FIGURE FIRECHIEF OWNS/ PAIR.OF,RED.SUSPENDERS
WHAT NOW?
OK—
WAIT...
ANSWER - TRUE
READY
a EXAMPLE.OF IMPLBY EXAMPLE.OF/IS—
READY
A IMPLBY EXAMPLE.OF—
READY
Fig. 4b Learning and Deduction Using Several Relations From SIR
STU OWNS LOG,LOG,DECITRIG1--
READY
LOG,LOG,DECITRIG1 EXAMPLE,OF LOG,LOG,DECITRIG--
READY
LOG,LOG,DECITRIG IS SLIDE,RULE--
READY
STU OWNS/A SLIDE RULE*1--
I FIGURE STU OWNS/A/ LOG,LOG,DECITRIG
WHAT NOW?
1--
I FIGURE STU OWNS/A/ LOG,LOG,DECITRIG AND SLIDE,RULE
WHAT NOW?
OK--
ANSWER TRUE
READY
ENGINEERING,STUDENT OWNS SLIDE,RULE--
READY
GEORGE EXAMPLE,OF TECH,MAN--
READY
TECH,MAN IS ENGINEERING,STUDENT--
READY
GEORGE OWNS/A SLIDE,RULE*1--
I FIGURE GEORGE OWNS/A/ UNKNOWN
WHAT NOW?
1--
I FIGURE GEORGE OWNS/A/ UNKNOWN
WHAT NOW?
OK--
ANSWER - UNKNOWN
READY
ENGINEERING,STUDENT EXAMPLE,OF(CNV)/OWNS/EXAMPLE,OF LOG,LOG,DECITRIG*1--
I FIGURE ENGINEERING,STUDENT EXAMPLE,OF(CNV)/OWNS/EXAMPLE,OF/ UNKNOWN
WHAT NOW?
2--
I FIGURE ENGINEERING,STUDENT EXAMPLE,OF(CNV)/OWNS/EXAMPLE,OF/ LOG,LOG,DECITRIG AND SLIDE,RULE
Fig. 4c Learning and Deduction Using Several Relations From SIR.
WHAT NOW?
OK—
ANSWER - TRUE [Answer to question posed at line "c".]
READY
Fig. 4d Learning and Deduction Using Several Relations From SIR
NEW, YORK, LOS, ANGELES, SANTA, BARBARA, ORLANDO IS CITY--
READY
NEW, YORK HAS, POPULATION 7781984--
READY
LOS, ANGELES HAS, POPULATION 2479015--
READY
ORLANDO HAS, POPULATION 88135--
READY
SANTA, BARBARA HAS, POPULATION 58768--
READY
7781984, 2479015 IS, GREATER, THAN 100000--
READY
88135, 58768 IS, LESS, THAN 100000--
READY
BOEING, 707, BOEING, 727, DC8 IS JET, PLANE--
READY
CONVAIR, 240 IS PROP, PLANE--
READY
JET, PLANE, PROP, PLANE SUBSET AIRPLANE--
READY
IS IMPLY IS, SUBSET--
READY
BOEING, 707, DC, 8 CARRIES 150--
READY
BOEING, 727 CARRIES 120--
READY
CONVAIR, 240 CARRIES 50--
READY
150 IS, GREATER, THAN 120--
READY
50 IS, LESS, THAN 120--
READY
a IS, LARGE IMPLY (IS, CITY,) HAS, POPULATION/(IS, GREATER, THAN-
100000,) HAS, POPULATION(CNV)/IS--
READY
b IS, LARGE IMPLY (IS, AIRPLANE,) CARRIES/(IS, GREATER, THAN, 120,)
CARRIES(CNV)/IS--
READY
WHAT IS QUESTION--
WHAT IS AIRPLANE*1--
I FIGURE AIRPLANE IS(CNV)/ BOEING, 707 AND BOEING, 727 AND
DC, 8 AND CONVAIR, 240
Fig. 5a Application to census, airplane and airline flight data with
an ambiguous relation.
WHAT NOW?
OK--
WAIT...
ANSWER - BOEING.707
AND BOEING.727
AND DC.8
AND CONVAIR.240
READY
DC.8 IS LARGE WHAT*1--
I FIGURE DC.8 IS LARGE/JET.PLANE AND AIRPLANE
WHAT NOW?
OK--
WAIT...
ANSWER - JET.PLANE
AND AIRPLANE
READY
WHAT IS LARGE CITY*1--
I FIGURE CITY IS LARGE(CNV)/ NEW.YORK AND LOS ANGELES
WHAT NOW?
OK--
WAIT...
ANSWER - NEW.YORK
AND LOS ANGELES
READY
SANTA.BARBARA, ORLANDO HAS POPULATION WHAT*--
I FIGURE SANTA.BARBARA, ORLANDO HAS POPULATION/ 58768 AND 88135
WHAT NOW?
OK--
WAIT...
ANSWER - 58768
AND 88135
READY
FLT.207 FLIES FROM NEW.YORK--
READY
FLT.207 FLIES TO LOS ANGELES--
READY
DC.8 USED ON FLT.207--
READY
FLT.207 DEPARTS AT 10:00AM--
READY
FLT.207 ARRIVES AT 12:30PM--
READY
FLT.308 FLIES FROM NEW.YORK--
Fig. 5b Application to census, airplane and airline flight data with an ambiguous relation.
READY
FLT.308 FLIES TO ORLANDO—
READY
BOEING.727 USED ON FLT.308—
READY
FLT.45 FLIES FROM LOS ANGELES—
READY
FLT.45 FLIES TO SANTA BARBARA—
READY
CONVAIR.240 USED ON FLT.45—
READY
FLT.45 DEPARTS AT 1:30PM—
READY
FLT.45 ARRIVES AT 2:15PM—
READY
1:30PM IS LATER THAN 12:30PM—
READY
CONNECTS WITH IMPLY ARRIVES AT IS LATER THAN (CNV)/DEPARTS AT (CNV)—
READY
WHAT (FLIES FROM NEW YORK, )CONNECTS WITH FLIES TO SANTA BARBARA*1—
I FIGURE SANTA BARBARA FLIES TO (CNV)/CONNECTS WITH (CNV)/(FLIES FROM NEW YORK, )
FLT.207
WHAT NOW?
OK—
ANSWER — FLT.207
READY
FLT.207 USED ON (CNV) IS LARGE AIRPLANE*—
I FIGURE FLT.207 USED ON (CNV) IS LARGE/JET PLANE AND AIRPLANE
WHAT NOW?
OK—
ANSWER — TRUE
READY
Fig. 5c Application to census, airplane and airline flight data with an ambiguous relation.
relation "IS_LARGE" is defined ambiguously in lines "a" and "b" as to its application to cities or airplanes, SAMENLAQ II can disambiguate it from context (lines "c" and "d").
In figure 6, SAMENLAQ II works with simple logic and set theory. The rule of inference Modus Ponens is entered in 6a line "a", and with this and the transitivity of IMPLIES given in line "b", simple "chain implication" problems can be solved. In 6b the relations SUBSET and ELEMENT are introduced along with the rule \((\forall x (x \in A \& A \subseteq B \rightarrow x \in B))\). Then some set membership problems are solved, and finally, a simple proof is constructed.
Figure 7 shows SAMENLAQ II being taught its first lesson in arithmetic. Although neither numbers nor arithmetic functions have been built into the SAMENLAQ II structure or executive routines, SAMENLAQ II is capable of being taught arithmetic the way school children used to be taught: by first memorizing tables, and then being taught certain rules. Notice especially that divisibility by 2 was defined recursively in lines "a", "b" and "c". Similarly, SAMENLAQ II could have been taught multiplication, division, the recursive definitions for less than and greater than as well as other arithmetic relations.
Figure 8 shows SAMENLAQ II solving the Missionary - Cannibal Problem, with three missionaries, three cannibals and a boat that holds a maximum of two people. The problem was described to SAMENLAQ II as a set of all the legal states in the problem with all the possible
a TRUE IMPLBY IMPLIES(CNV)/TRUE--
READY
FALSE IMPLBY IMPLIES/FALSE--
READY
A IMPLIES B--
READY
A TRUE PROPOSITION--
READY
B TRUE PROPOSITION*1--
I FIGURE B TRUE/ PROPOSITION
WHAT NOW?
OK--
WAIT...
ANSWER - TRUE
READY
Follows FROM IMPLBY IMPLIES(CNV)--
READY
b IMPLIES IMPLBY IMPLIES/IMPLIES--
READY
C IMPLIES D--
READY
D FALSE PROPOSITION--
READY
C FALSE PROPOSITION*1--
I FIGURE C FALSE/ PROPOSITION
WHAT NOW?
OK--
WAIT...
ANSWER - TRUE
READY
FORGET PROPOSITION*2--
FORGET - PROPOSITION
OK?...YES--
WAIT...
READY
B IMPLIES C--
READY
D FALSE PROPOSITION--
READY
A FALSE PROPOSITION*1--
I FIGURE A FALSE/ UNKNOWN
WHAT NOW?
2--
Fig. 6a A Simple Problem in Logic Using Modus Ponens
I FIGURE A FALSE/ PROPOSITION
WHAT NOW?
OKー
WAIT... ANSWER - TRUE
READY ELEMENT IMPLBY ELEMENT/SUBSETー READY S1 SUBSET S2ー READY S2 SUBSET S3ー READY E1 ELEMENT S1ー READY WHAT IS QUESTIONー READY E1 ELEMENT WHAT*1ー I FIGURE E1 ELEMENT/ S1 and S2
WHAT NOW?
1ー I FIGURE E1 ELEMENT/ S1 AND S2 AND S3 WHAT NOW?
OKー WAIT... ANSWER - S1 AND S2 AND S3 READY c D FOLLOWS,FROM A*2ー I FIGURE D FOLLOWS,FROM/ C AND B WHAT NOW?
1ー I FIGURE D FOLLOWS,FROM/ C AND B AND A WHAT NOW?
OKー WAIT... ANSWER - TRUE [Answer to question posed at line "c".]
READY F IS AXIOMー READY FIND,PROOF,FROM IS QUESTIONー READY
Fig. 6b Set Theory and Simple Theorem Proving
A DERIVABLE.FROM F--
READY
D FIND.PROOF.FROM Axiom6--
ENTER COMMA LIST OF RELATION CLASSES TO BE USED OR - "ANY"
ANY--
D FOLLOWS.FROM A DERIVABLE.FROM F IS Axiom
WHAT NOW?
OK--
READY
Fig. 6c Set Theory and Simple Theorem Proving
1 PLUS.1.IS 2←
READY
2 PLUS.1.IS 3←
READY
3 PLUS.1.IS 4←
READY
PLUS.2.IS IMPLY PLUS.1.IS/PLUS.1.IS←
READY
WHAT IS QUESTION←
READY
2 PLUS.2.IS WHAT*1←
1 FIGURE 2 PLUS.2.IS/ 4
WHAT NOW?
OK←
WAIT...
ANSWER - 4
READY
MINUS.2.IS IMPLY PLUS.2.IS(CNV)←
READY
a 1 DIVISIBLE.BY.2 FALSE←
READY
b 2 DIVISIBLE.BY.2 TRUE←
READY
c DIVISIBLE.BY.2 IMPLY MINUS.2.IS/DIVISIBLE.BY.2←
READY
4 DIVISIBLE.BY.2 WHAT*2←
1 FIGURE 4 DIVISIBLE.BY.2/ TRUE
WHAT NOW?
OK←
WAIT...
ANSWER - TRUE
READY
3 DIVISIBLE.BY.2 WHAT*3←
1 FIGURE 3 DIVISIBLE.BY.2/ FALSE
WHAT NOW?
OK←
WAIT...
ANSWER - FALSE
READY
Fig. 7 Arithmetic and Handling Recursive Definitions
<table>
<thead>
<tr>
<th>Command</th>
<th>Argument</th>
</tr>
</thead>
<tbody>
<tr>
<td>3M3CL</td>
<td>1C</td>
</tr>
<tr>
<td>0M1CR--</td>
<td>READY</td>
</tr>
<tr>
<td>3M3CL</td>
<td>1M1C 1M1C</td>
</tr>
<tr>
<td>0M1CR--</td>
<td>READY</td>
</tr>
<tr>
<td>3M3CL</td>
<td>2C 0M2CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>3M3CL</td>
<td>1C 0M1CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>3M2CL</td>
<td>1M 1M1CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>3M2CL</td>
<td>2C 0M3CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>3M1CL</td>
<td>1C 0M3CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>3M1CL</td>
<td>2M 2M2CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>2M2CL</td>
<td>1M1C 2M2CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>2M2CL</td>
<td>2M 3M1CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>1M1CL</td>
<td>1M 3M2CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>1M1CL</td>
<td>1M1C 3M3CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>0M3CL</td>
<td>1C 3M1CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>0M3CL</td>
<td>2C 3M2CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>0M2CL</td>
<td>1C 3M2CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>0M2CL</td>
<td>2C 3M3CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>0M1CL</td>
<td>1C 3M3CR</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>GET, TO</td>
<td>IS QUESTION</td>
</tr>
<tr>
<td>READY</td>
<td></td>
</tr>
<tr>
<td>3M3CL</td>
<td>GET, TO 3M3CR*11</td>
</tr>
</tbody>
</table>
ENTER COMMA LIST OF RELATION CLASSES TO BE USED OR - "ANY"
ANY-- [First solution follows:]
3M3CL 2C 0M2CR 1C(CNV) 3M2CL 2C 0M3CR 1C(CNV) 3M1CL 2M 2M2CR 1M1C(CNV) 2M2CL 2M 3M1CR 1C(CNV) 0M3CL 2C 3M2CR 1M(CNV) 1M1CL 1M1C 3M3CR [Second solution follows:]
3M3CL 2C 0M2CR 1C(CNV) 3M2CL 2C 0M3CR 1C(CNV) 3M1CL 2M 2M2CR 1M1C(CNV) 2M2CL 2M 3M1CR 1C(CNV) 0M3CL 2C 3M2CR 1C(CNV) 0M2CL 2C 3M3CR
Fig. 8a Solving the Missionary-Cannibal Problem
WHAT NOW?
OK—
READY
Fig. 8b Solving the Missionary-Cannibal Problem
transitions between the states. For example, the first line represents the fact that if 3 missionaries and 3 cannibals are on the left bank with the boat on the left bank, then 1 cannibal can take the boat to the right bank, which will result in there being 0 missionaries and 1 cannibal on the right bank with the boat on the right bank. SAMENLAQ II solves the problem by showing how the boat should be used to get from the initial state, 3M3CL, to the final state, 3M3CR. Any problem solving task that can be represented as finding a path from an initial state to a final state through a state transition graph can in theory be solved similarly by SAMENLAQ II.*
5. EXTENSION OF THE SAMENLAQ STRUCTURE
Work is now proceeding on the design and implementation of a memory net structure, MENS, which goes further than SAMENLAQ II toward satisfying goals discussed in the first section of this paper. The two major improvements needed in the SAMENLAQ structure are:
1. the ability to deal with a name which is itself a statement
2. the ability to store names which represent some unspecified other names, i.e. act as variables.
*Since SAMENLAQ II exhaustively searches the state transition graph, the threat of exponential growth is ever present. Thus in certain interesting problems exhaustive search would be infeasible and heuristic search techniques would be necessary.
The first would facilitate the handling of n-ary relations and statements which serve to modify or give further information about other statements. The second would allow generalizations to be stored, and would also permit the storage of statements of the predicate calculus directly in the memory structure. These statements could then be interpreted by the executive and used as rules of inference to direct the memory search routines in a manner similar to the way SAMENLAQ II deals with IMPLBY definitions. The currently extant implementation of MENS (which is programmed in Burroughs Extended ALGOL and uses ASLIP, a SLIP like package of list processing routines) incorporates the first improvement, and work is progressing on the design of the implementation of the second improvement.
The main generalization involved in going from SAMENLAQ to MENS was to let xRy statements be nodes in the net along with arguments and relations. The basic element of the MENS structure is called an item, which may be an unstructured unit or may be a structure consisting of a pair or triple of items. Thus, items are similar to the "events" used by Simmons et al in Protosynthex II. As in SAMENLAQ, a major characteristic of the MENS structure is that there is no duplication of items or structures; the physically same item is used everywhere that the structure it represents is referred to in a containing structure. Several implications of this uniqueness of items are: (1) two structures which have a substructure in common actually overlap in
the net, (2) if there is an item representing logical implication, all
structures interpretable as rules of inference will be discoverable di-
rectly from that item since it will be a central substructure of all of
them, (3) in general statements involving a quantified variable, the
separate occurrences of the variable will all be pointers to a single
item, so that a substitution attached to that item will serve as a sub-
stitution for all occurrences of the variable.
Allowing a structure to be formed from a pair of substructures pro-
vides for the representation of unary relations such as negation and
quantification. Allowing a structure to be formed from a triple of sub-
structures provides for the representation of binary relation, and since
any of the substructures may in fact be structures as well as unstruc-
tured items provides for the representation of n-ary relations. A more
direct representation of n-ary relations is provided in another version
of MENS being implemented*, which will allow any structure to consist
of any number of substructures.
It is demonstrated in the examples given in Section 4 that
SAMENLAQ II is capable of answering questions in formal logic involv-
ing simple chains of inference and basic set theory. With its ability
to store statements of the predicate calculus, the MENS structure
---
*A paper describing this version of MENS and the natural language
question answering system which will use it is forthcoming from the
RAND Corporation as a RAND Memorandum by Martin Kay, Ronald M.
Kaplan, and Stuart C. Shapiro.
should enable simple question answering routines to perform more complicated theorem proving. Although we should perhaps not expect a high powered theorem prover to be developed in this way, the MENS extension of SAMENIAQ will provide an interesting contrast to systems, such as Green and Raphael's QA2\textsuperscript{3}, which use theorem proving techniques to answer questions.
APPENDIX - MEMORY STRUCTURE
READY
DUMP MEMORY ON TELETYPewriter®
THE MEMORY IS ---
2 = (DIV.BY.2/-20)(PLUS.2.IS/-13)(PLUS.1.IS/-4)(PLUS.1.IS(CNV)/-1)
/1 = 1,
/2 = 2,
3 = (DIV.BY.2/-25)(PLUS.1.IS/-6)(PLUS.1.IS(CNV)/-3)
/3 = 2,
/4 = 3,
4 = (DIV.BY.2/-24)(PLUS.2.IS(CNV)/-12)(PLUS.1.IS(CNV)/-5)
/5 = 3,
/6 = 4,
PLUS.1.IS/PLUS.1.IS = (IMPLBY(CNV)/-7)
/7 = PLUS.2.IS,
PLUS.2.IS(CNV) = (IMPLBY(CNV)/-14)(IMPLBY/-8)
/8 = PLUS.1.IS(CNV)/PLUS.1.IS(CNV)/,
PLUS.2.IS = (IMPLBY/-9)
/9 = PLUS.1.IS/PLUS.1.IS/,
QUESTION = (IS(CNV)/-10)
/10 = WHAT
WHAT = (IS/-11)
/11 = QUESTION,
/12 = 2,
/13 = 4,
/14 = MINUS.2.IS,
MINUS.2.IS(CNV) = (IMPLBY/-15)
/15 = PLUS.2.IS/,
MINUS.2.IS = (IMPLBY/-16)
/16 = PLUS.2.IS(CNV)/,
FALSE = (DIV.BY.2(CNV)/-17)
/17 = 1,3,
/18 = FALSE,
TRUE = (DIV.BY.2(CNV)/-19)
/19 = 2,4,
/20 = TRUE,
Fig. 9 Shows SAMENLAQ II's actual memory structure after its arithmetic lesson, which was shown in figure 7. The underlined material was learned after being discovered as implicit information while answering questions.
MINUS.2.IS/DIV.BY.2 = (IMPLBY(CNV)/21)
/21 = DIV.BY.2,
DIV.BY.2(CNV) = (IMPLBY-/22)
/22 = DIV.BY.2(CNV)/MINUS.2.IS(CNV)/,
DIV.BY.2 = (IMPLBY-/23)
/23 = MINUS.2.IS/DIV.BY.2/,
/24 = TRUE,
/25 = FALSE,
READY
Fig. 9 (Cont.)
References
|
{"Source-Url": "http://research.cs.wisc.edu/techreports/1969/TR59.pdf", "len_cl100k_base": 12728, "olmocr-version": "0.1.50", "pdf-total-pages": 46, "total-fallback-pages": 0, "total-input-tokens": 52578, "total-output-tokens": 15321, "length": "2e13", "weborganizer": {"__label__adult": 0.00036025047302246094, "__label__art_design": 0.0007066726684570312, "__label__crime_law": 0.0005426406860351562, "__label__education_jobs": 0.0091552734375, "__label__entertainment": 0.00017940998077392578, "__label__fashion_beauty": 0.00028133392333984375, "__label__finance_business": 0.0005083084106445312, "__label__food_dining": 0.00047969818115234375, "__label__games": 0.0008101463317871094, "__label__hardware": 0.0018930435180664065, "__label__health": 0.0005521774291992188, "__label__history": 0.0005393028259277344, "__label__home_hobbies": 0.0002135038375854492, "__label__industrial": 0.0007476806640625, "__label__literature": 0.0010919570922851562, "__label__politics": 0.0004134178161621094, "__label__religion": 0.0006208419799804688, "__label__science_tech": 0.335693359375, "__label__social_life": 0.00029087066650390625, "__label__software": 0.04559326171875, "__label__software_dev": 0.59814453125, "__label__sports_fitness": 0.00020623207092285156, "__label__transportation": 0.0006871223449707031, "__label__travel": 0.0001976490020751953}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 45510, 0.04322]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 45510, 0.42073]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 45510, 0.88296]], "google_gemma-3-12b-it_contains_pii": [[0, 236, false], [236, 1914, null], [1914, 3392, null], [3392, 4690, null], [4690, 6146, null], [6146, 7200, null], [7200, 8460, null], [8460, 9166, null], [9166, 9216, null], [9216, 9328, null], [9328, 10802, null], [10802, 12213, null], [12213, 13862, null], [13862, 15397, null], [15397, 16424, null], [16424, 17592, null], [17592, 17948, null], [17948, 19377, null], [19377, 20849, null], [20849, 22461, null], [22461, 23826, null], [23826, 25272, null], [25272, 26622, null], [26622, 27493, null], [27493, 28386, null], [28386, 29259, null], [29259, 29399, null], [29399, 30472, null], [30472, 31308, null], [31308, 32090, null], [32090, 33619, null], [33619, 34471, null], [34471, 35109, null], [35109, 35340, null], [35340, 35965, null], [35965, 37296, null], [37296, 37366, null], [37366, 38741, null], [38741, 40283, null], [40283, 41855, null], [41855, 42236, null], [42236, 43261, null], [43261, 43482, null], [43482, 45128, null], [45128, 45510, null], [45510, 45510, null]], "google_gemma-3-12b-it_is_public_document": [[0, 236, true], [236, 1914, null], [1914, 3392, null], [3392, 4690, null], [4690, 6146, null], [6146, 7200, null], [7200, 8460, null], [8460, 9166, null], [9166, 9216, null], [9216, 9328, null], [9328, 10802, null], [10802, 12213, null], [12213, 13862, null], [13862, 15397, null], [15397, 16424, null], [16424, 17592, null], [17592, 17948, null], [17948, 19377, null], [19377, 20849, null], [20849, 22461, null], [22461, 23826, null], [23826, 25272, null], [25272, 26622, null], [26622, 27493, null], [27493, 28386, null], [28386, 29259, null], [29259, 29399, null], [29399, 30472, null], [30472, 31308, null], [31308, 32090, null], [32090, 33619, null], [33619, 34471, null], [34471, 35109, null], [35109, 35340, null], [35340, 35965, null], [35965, 37296, null], [37296, 37366, null], [37366, 38741, null], [38741, 40283, null], [40283, 41855, null], [41855, 42236, null], [42236, 43261, null], [43261, 43482, null], [43482, 45128, null], [45128, 45510, null], [45510, 45510, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 45510, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 45510, null]], "pdf_page_numbers": [[0, 236, 1], [236, 1914, 2], [1914, 3392, 3], [3392, 4690, 4], [4690, 6146, 5], [6146, 7200, 6], [7200, 8460, 7], [8460, 9166, 8], [9166, 9216, 9], [9216, 9328, 10], [9328, 10802, 11], [10802, 12213, 12], [12213, 13862, 13], [13862, 15397, 14], [15397, 16424, 15], [16424, 17592, 16], [17592, 17948, 17], [17948, 19377, 18], [19377, 20849, 19], [20849, 22461, 20], [22461, 23826, 21], [23826, 25272, 22], [25272, 26622, 23], [26622, 27493, 24], [27493, 28386, 25], [28386, 29259, 26], [29259, 29399, 27], [29399, 30472, 28], [30472, 31308, 29], [31308, 32090, 30], [32090, 33619, 31], [33619, 34471, 32], [34471, 35109, 33], [35109, 35340, 34], [35340, 35965, 35], [35965, 37296, 36], [37296, 37366, 37], [37366, 38741, 38], [38741, 40283, 39], [40283, 41855, 40], [41855, 42236, 41], [42236, 43261, 42], [43261, 43482, 43], [43482, 45128, 44], [45128, 45510, 45], [45510, 45510, 46]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 45510, 0.04912]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
0902a56253e0ba86621831824f32200d64a805d8
|
Dynamic and extensible exception handling for workflows: a service-oriented implementation
Citation for published version (APA):
Document status and date:
Published: 01/01/2007
Document Version:
Publisher’s PDF, also known as Version of Record (includes final page, issue and volume numbers)
Please check the document version of this publication:
• A submitted manuscript is the version of the article upon submission and before peer-review. There can be important differences between the submitted version and the official published version of record. People interested in the research are advised to contact the author for the final version of the publication, or visit the DOI to the publisher's website.
• The final author version and the galley proof are versions of the publication after peer review.
• The final published version features the final layout of the paper including the volume, issue and page numbers.
Link to publication
General rights
Copyright and moral rights for the publications made accessible in the public portal are retained by the authors and/or other copyright owners and it is a condition of accessing publications that users recognise and abide by the legal requirements associated with these rights.
• Users may download and print one copy of any publication from the public portal for the purpose of private study or research.
• You may not further distribute the material or use it for any profit-making activity or commercial gain
• You may freely distribute the URL identifying the publication in the public portal.
If the publication is distributed under the terms of Article 25fa of the Dutch Copyright Act, indicated by the "Taverne" license above, please follow below link for the End User Agreement:
www.tue.nl/taverne
Take down policy
If you believe that this document breaches copyright please contact us at: openaccess@tue.nl
providing details and we will investigate your claim.
Dynamic and Extensible Exception Handling for Workflows: A Service-Oriented Implementation
Michael Adams¹, Arthur H. M. ter Hofstede¹, David Edmond¹, and Wil M. P. van der Aalst¹,²
¹ Business Process Management Group
Queensland University of Technology, Brisbane, Australia
{m3.adams,a.terhofstede,d.edmond}@qut.edu.au
² Department of Mathematics and Computer Science
Eindhoven University of Technology, Eindhoven, The Netherlands
w.m.p.v.d.aalst@tue.nl
Abstract. This paper presents the realisation, using a Service Oriented Architecture, of an approach for dynamic, flexible and extensible exception handling in workflows, based not on proprietary frameworks, but on accepted ideas of how people actually work. The approach utilises an established framework for workflow flexibility called worklets and a detailed taxonomy of workflow exception patterns to provide an extensible repertoire of self-contained exception-handling processes which may be applied at the task, case or specification levels, and from which a dynamic runtime selection is made depending on the context of the exception and the particular work instance. Both expected and unexpected exceptions are catered for in real time, so that ‘manual handling’ of the exception is avoided.
Key words: workflow exception handling, workflow flexibility, service oriented architecture, worklet
1 Introduction
Workflow management systems (WfMS) are used to configure and control structured business processes from which well-defined workflow models and instances can be derived [1, 2]. However, the proprietary process definition frameworks imposed by WfMSs make it difficult to support (i) dynamic evolution (i.e. modifying process definitions during execution) following unexpected or developmental change in the business processes being modelled [3]; and (ii) exceptions, or deviations from the prescribed process model at runtime [4–6].
For exceptions, the accepted practice is that if an exception can conceivably be anticipated, then it should be included in the process model. However, this approach can lead to very complex models, much of which will never be executed in most cases, and adds orders-of-magnitude complexities to workflow logic; mixing business logic with exception handling routines complicates the verification and modification of both [7], in addition to rendering the model almost unintelligible to most stakeholders.
Conversely, if an exception occurs that is unexpected, the model is deemed to be simply deficient, and thus needs to be amended to include the previously unimagined event (see for example [8]). This approach, however, tends to gloss over the frequency of such events and the costs involved with their correction. Most often, suspension of execution while the deviation is handled manually or the termination of the entire process instance are the only available options, but since most processes are long and complex, neither option presents a satisfactory solution [9]. Manual handling incurs an added penalty: the corrective actions undertaken are not added to ‘organisational memory’ [10, 11], and so natural process evolution is not incorporated into future iterations of the process. Associated problems include those of migration, synchronisation and version control [4].
These limitations imply that a large subset of business processes do not easily map to the rigid modelling structures provided [12], due to the lack of flexibility inherent in a framework that, by definition, imposes rigidity. This is further supported by our work on process mining. When considering processes where people are expected to execute tasks in a structured way but are not forced by some workflow system, process mining shows that the processes are much more dynamic than expected, that is, people tend to deviate from the “normal flow”, often with good reasons.
Thus, process models are ‘system-centric’, or straight-jacketed [1] into the supplied framework, rather than truly reflecting the way work is actually performed [13]. As a result, users are forced to work outside of the system, and/or constantly revise the static process model, in order to successfully support their activities, thereby negating the efficiency gains sought by implementing a workflow solution in the first place.
Since the mid-nineties many researchers have worked on problems related to workflow flexibility and exception handling (cf. Section 7). This paper is based on and extends the ‘worklets’ approach described in [14] and [15] and applies the classification of workflow exception patterns from [16]. It introduces a realisation of a service that utilises an extensible repertoire of self-contained exception handling processes and associated selection rules, grounded in a formal set of work practice principles called Activity Theory, to support the flexible modelling, analysis, enactment and support of business processes. This approach directly provides for dynamic change and process evolution without having to resort to off-system intervention and/or system downtime. It has been implemented as a discrete service for the well-known, open-source workflow environment YAWL [17, 18] using a Service Oriented Architecture (SOA), and as such its applicability is in no way limited to that environment. Also, being open-source, it is freely available for use and extension.
The paper illustrates aspects of the approach throughout using the organisation of a rock concert as an example process and is organised as follows: Section 2 provides a brief overview of the theoretical underpinnings of the approach. Section 3 provides an overview of the design and operation and service, while Section 4 details the service architecture. Section 5 discusses exception types
handled by the service and the definition of exception handling processes. Section 6 describes how the approach utilises Ripple Down Rules (RDR) to achieve contextual, dynamic selection of handling processes at runtime. Section 7 discusses related work, and finally Section 8 outlines future directions and concludes the paper.
2 Theoretical Framework
In [19], we undertook a detailed study of Activity Theory, a broad collective of theorising and research in organised human activity (cf. [20–22]) and derived from it a set of principles that describe the nature of participation in organisational work practices. Briefly, the principles relevant to this paper are:
1. Activities (i.e. work processes) are hierarchical (consist of one or more actions), communal (involve a community of participants working towards a common objective), contextual (conditions and circumstances deeply affect the way the objective is achieved), dynamic (evolve asynchronously), and mediated (by tools, rules and divisions of labour).
2. Actions (i.e. tasks) are undertaken and understood contextually. A repertoire of applicable actions is maintained and made available to each activity, which is performed by making contextual choices from the repertoire.
3. A work plan is not a prescription of work to be performed, but merely a guide which may be modified during execution depending on context.
4. Deviations from a plan will naturally occur with every execution, giving rise to learning experiences which can then be incorporated into future instantiations of the plan.
Consideration of these principles has delivered a discrete service that transforms otherwise static workflow processes into fully flexible and dynamically extensible process instances by offering full support for realtime handling of both expected and unexpected exceptions, using a Service-Oriented Architecture. The service:
– regards the process model as a guide to an activity’s objective, rather than a prescription for it;
– provides a repertoire (or catalogue) of applicable actions to be made available at each execution of a process model;
– provides for choices to be made dynamically from the repertoire at runtime by considering the specific context of the executing instance; and
– allows the repertoire of actions to be dynamically extended at runtime, thus incorporating unexpected process deviations, not only for the current instance, but for other current and future instantiations of the process model, leading to natural process evolution.
As detailed in the following sections, the service has been implemented directly on top of an Activity Theory framework, and thus provides workflow support for processes from a wide variety of work environments.
3 Worklet Service Description
The Worklet Service (essentially, a worklet is a small, discrete workflow process that acts as a late-bound sub-net for an enabled workitem) comprises two distinct but complementary sub-services: a Selection sub-Service, which enables dynamic flexibility for YAWL process instances [14]; and an Exception sub-Service (the focus of this paper), which provides facilities to handle both expected and unexpected process exceptions (i.e. events and occurrences that may happen during the life of a process instance that are not explicitly modelled within the process) at runtime.
The Selection Service: The Selection Service enables flexibility by allowing a process designer to designate certain workitems to each be substituted at runtime with a dynamically selected worklet, which contextually handles one specific task in a larger, composite process activity. Each worklet instance is dynamically selected and invoked at runtime and may be designed and provided to the Selection Service at any time, as opposed to a static sub-process that must be defined at the same time as, and remains a static part of, the main process model.
An extensible repertoire of worklets is maintained by the Service for each task in a specification. Each time the Service is invoked for a workitem, a choice is made from the repertoire based on the contextual data values within the workitem, using an extensible set of rules to determine the most appropriate substitution.
The workitem is checked out of the workflow enactment engine, the corresponding data inputs of the original workitem are mapped to the inputs of the worklet, and the selected worklet is launched as a separate case. When the worklet has completed, its output data is mapped back to the original workitem, which is then checked back into the engine, allowing the original process to continue.
An extensive discussion of the implementation of the Worklet Selection Service may be found in [14].
The Exception Service: Virtually every process instance (even if it follows a highly structured process definition) will experience some kind of exception (or deviation) during its execution. It may be that these events are known to occur in a small number of cases, but not often enough to warrant their inclusion in the process model; or they may be things that were never expected to occur (or may be never even imagined could occur). In any case, when they do happen, since they are not included in the process model, they must be handled ‘off-line’ before processing can continue (and the way they are handled is rarely recorded). In some cases, the process model will be later modified to capture this unforeseen event, which involves an, often large, organisational cost (downtime, remodelling, testing and so on), or in certain circumstances the entire process must be aborted.
Alternately, an attempt might be made to include every possible twist and turn into the process model so that when such events occur, there is a branch in
the process to take care of it. This approach often leads to very complex models where much of the original business logic is obscured, and doesn’t avoid the same problems when the next unexpected exception occurs.
The Exception Service addresses these problems by allowing designers to define exception handling processes (called *exlets*) for parent workflow instances to be invoked when certain events occur and thereby allow the process to continue unhindered. Additionally, exlets for unexpected exceptions may be added at runtime, and such handling methods automatically become an implicit part of the process specification for all current and future instances of the process, which provides for continuous evolution of the process while avoiding the need to modify the original process definition.
The Exception Service uses the same repertoire and dynamic rules approach as the Selection Service. There are, however, two fundamental differences between the two sub-services. First, where the Selection Service selects a worklet as the result of satisfying a rule in a rule set, the result of an Exception Service selection is an exlet (which may contain a worklet to be executed as a compensation process – see Section 5). Second, while the Selection Service is invoked for certain nominated tasks in a YAWL process, the Exception Service, when enabled, is invoked for *every* case and task executed by the YAWL engine, and will detect and handle up to ten different kinds of process exceptions (these exception types are described in Section 5). As part of the exlet, a process designer may choose from various actions (such as cancelling, suspending, completing, failing and restarting) and apply them at a workitem, case and/or specification level. And, since the exlets can include compensatory worklets, the original parent process model only needs to reveal the actual business logic for the process, while the repertoire of exlets grows as new exceptions arise or different ways of handling exceptions are formulated. Table 1 summarises the differences between the two sub-services (the interfaces are described in the next section).
<table>
<thead>
<tr>
<th>Cause</th>
<th>Interface</th>
<th>Selection</th>
<th>Action Returned</th>
</tr>
</thead>
<tbody>
<tr>
<td>Workitem Enabled</td>
<td>B</td>
<td>Case & item context data</td>
<td>Worklet</td>
</tr>
<tr>
<td>Internal Exception</td>
<td>X</td>
<td>Exception type and Case & item context data</td>
<td>Exlet</td>
</tr>
<tr>
<td>External Exception</td>
<td>–</td>
<td>Exception type and Case & item context data</td>
<td>Exlet</td>
</tr>
</tbody>
</table>
*Table 1. Summary of Service Actions*
An extensible repertoire of exlets is maintained by the service for each workflow specification, and may be applied at the workitem, case or specification level. Each time the service is notified of an event or checkpoint, the service first determines whether an exception has in fact occurred, and if so makes a choice from the repertoire based on the type of exception and the data attributes and values associated with the workitem/case, using a set of rules to select the most appropriate exlet to execute (see Section 6).
If the exlet contains a compensation action (i.e. a worklet to executed as a compensatory process) it is run as a separate case in the enactment engine, so that from an engine perspective, the worklet and its ‘parent’ (i.e. the process that invoked the exception) are two distinct, unrelated cases. The service tracks the relationships, data mappings and synchronisations between cases, and maintains a process log that may be combined with the engine’s process logs via case identifiers to provide a complete operational history of each process. Figure 1 shows the relationship between a ‘parent’ process, an exlet repertoire and a compensatory worklet, using the Organise Concert example.

Any number of exlets can form the repertoire of an individual task or case. An exlet may be a member of one or more repertoires – that is, it may be re-used for several distinct tasks or cases within and across process specifications.
The repertoire for a task or case can be added to at any time, as can the rules base used, including while the parent process is executing. Thus the service provides for dynamic ad-hoc change, exception handling and process evolution, without having to resort to off-system intervention and/or system downtime, and avoiding the need to modify the original process specification.
The Selection and Exception sub-services can be used in combination within particular case instances to achieve dynamic flexibility and exception handling simultaneously. The Worklet Service is extremely adaptable and multi-faceted, and allows a designer to provide tailor-made solutions to runtime process exceptions.
4 Service Architecture
The Worklet Service has been implemented as a YAWL Custom Service [17, 18]. The YAWL environment was chosen as the implementation platform since it provides a very powerful and expressive workflow language based on the workflow patterns identified in [23], together with a formal semantics. It also provides a workflow enactment engine, and an editor for process model creation, that support the control flow, data and (basic) resource perspectives. The YAWL environment is open-source and offers a service-oriented architecture, allowing the service to be implemented completely independent to the core engine. Thus the deployment of the Worklet Service is in no way limited to the YAWL environment, but may be ported to other environments (for example, BPEL engines) by making the necessary links in the service interface. As such, this implementation may also be seen as a case study in service-oriented computing whereby dynamic exception handling for workflows, orthogonal to the underlying workflow language, is provided.
Figure 2 shows the external architecture of the Worklet Service. The YAWL system allows workflow instances and external services to interact with each other in order to delegate work, to signal the creation and completion of process instances and workitems, or to notify of certain events or changes in the status of existing workitems and cases. These services interact with the YAWL engine across a number of interfaces designed for particular purposes, supporting the ability to send and receive both messages and XML data to and from the engine. Three interfaces are used by the Worklet Service (see Figure 2):
- Interface A provides endpoints for process definition, administration and monitoring [18] – the service uses Interface A to upload worklet specifications to the engine;
- Interface B provides endpoints for client and invoked applications and workflow interoperability [18] – used by the service for connecting to the engine, to start and cancel case instances, and to check workitems in and out of the engine after interrogating their associated data; and
- Interface X (‘X’ for ‘eXception’), which has been designed to allow the engine to notify custom services of certain events and checkpoints during the
execution of each process instance where process exceptions either may have occurred or should be tested for. Thus Interface X provides the service with the necessary mechanisms to dynamically capture and handle process exceptions.
In fact, Interface X was created to enable the Exception sub-service to be built. However, one of the overriding design objectives was that the interface should be structured for generic application – that is, it can be applied by a variety of services that wish to make use of checkpoint and/or event notifications during process executions.
Since it only makes sense to have one custom service acting as an exception handling service at any one time, services that implement Interface X have two distinct states – enabled and disabled. When enabled, the engine generates notifications for every process instance it executes – that is, the engine makes no decisions about whether a particular process should generate the notifications or not. Thus it is the responsibility of the designer of the custom service to determine how best to deal with the notifications. When the service is disabled, the engine generates no notifications across the interface. Enabling and disabling an Interface X custom service is achieved via parameter setting in a configuration file.

**Fig. 2. External Architecture of the Worklet Service**
The entities ‘Worklet specs’, ‘Rules’ and ‘Logs’ in Figure 2 comprise the worklet repository. The service uses the repository to store rule sets, worklet specifications for uploading to the engine, and generated process and audit logs. The YAWL editor is used to create new worklet specifications, and may be invoked from the Rules Editor, which is used to create new or augment existing
rule sets, making use of certain selection logs to do so, and may communicate with the Worklet Service via a JSP/Servlet interface to override worklet selections following rule set additions (see Section 6). The service also provides servlet pages that allow users to directly communicate with the service to raise external exceptions and carry out administration tasks.
5 Exception Types and Handling Primitives
This section introduces the ten different types of process exception that have been identified, seven of which are supported by the current version of the Worklet Service. It then describes the handling primitives that may be used to form an exception handling process (i.e. an exlet) for the notified exception event. The exception types and primitives described here are based on and extend from those identified by Russell et. al., who define a rigorous classification framework for workflow exception handling independent of specific modelling approaches or technologies [16].
The Exception sub-service maintains a set of rules (described in detail in Section 6) that is used to determine which exlet, if any, to invoke. If there are no rules defined for a certain exception type for a specification, the exception event is simply ignored by the service. Thus rules are needed only for those exception events that are desired to be handled for a particular task and/or specification.
5.1 Exception Types
Constraint Types Constraints are rules that are applied to a workitem or case immediately before and after execution of that workitem or case. Thus, there are four types of constraint exception:
- CasePreConstraint - case-level pre-constraint rules are checked when each case instance begins execution;
- ItemPreConstraint - item-level pre-constraint rules are checked when each workitem in a case becomes enabled (i.e. ready to be checked out);
- ItemPostConstraint - item-level post-constraint rules are checked when each workitem moves to a completed status; and
- CasePostConstraint - case-level post constraint rules are checked when a case completes.
The service receives notification from the YAWL Engine when each of these constraint events occur within each case, then checks the rule set associated with the case to determine, firstly, if there are any rules of that exception type defined for the case, and if so, if any of the rules evaluate to true using the contextual data of the case or workitem. If the rule set finds a rule that evaluates to true for the exception type and data, an associated exlet is selected and invoked.
**TimeOut** A timeout event occurs when a workitem is linked to the YAWL Time Service and the deadline set for that workitem is reached. In this case, the YAWL Engine notifies the Worklet Service of the timeout event, and passes to the service a reference to the workitem and each of the other workitems that were running in parallel with it. Therefore, timeout rules may be defined for each of the workitems affected by the timeout (including the actual timed out workitem itself).
**Externally Triggered Types** Externally triggered exceptions occur, not through the case’s data parameters, but because of an occurrence outside of the process instance that has an effect on the continuing execution of the process. Thus, these events are triggered by a user; depending on the actual event and the context of the case or workitem, a particular exlet will be invoked. There are two types of external exceptions, CaseExternalTrigger (for case-level events) and ItemExternalTrigger (for item-level events).
These seven types of exceptions are supported by our current implementation. Three more exception types have been identified but are not yet supported:
**ItemAbort** An ItemAbort event occurs when a workitem being handled by an external program (as opposed to a human user) reports that the program has aborted before completion.
**ResourceUnavailable** This event occurs when an attempt has been made to allocate a workitem to a resource and the resource reports that it is unable to accept the allocation or the allocation cannot proceed.
**ConstraintViolation** This event occurs when a data constraint has been violated for a workitem during its execution (as opposed to pre- or post- execution).
### 5.2 Exception Handling Primitives
When any of the above exception events occur, an appropriate exlet, if defined, will be invoked. Each exlet may contain any number of steps, or *primitives*, and is defined graphically using the Worklet Rules Editor.
An example of a definition of an exlet in the Rules Editor can be seen in Figure 3. On the left of the graphical editor is the set of primitives that may be used. The available primitives (reading left-to-right, top-to-bottom) are:
- **Remove WorkItem**: removes (or cancels) the workitem; execution ends, and the workitem is marked with a status of cancelled. No further execution occurs on the process path that contains the workitem.
- **Remove Case**: removes the case. Case execution ends.
- **Remove All Cases**: removes all case instances for the specification in which the workitem is defined, or of which the case is an instance.
Fig. 3. Example Handler Process in the Rules Editor
- **Suspend WorkItem**: suspends (or pauses) execution of a workitem, until it is continued, restarted, cancelled, failed or completed, or the case that contains the workitem is cancelled or completed.
- **Suspend Case**: suspends all 'live' workitems in the current case instance (a live workitem has a status of fired, enabled or executing), effectively suspending execution of the entire case.
- **Suspend All Cases**: suspends all 'live' workitems in all of the currently executing instances of the specification in which the workitem is defined, effectively suspending all running cases of the specification.
- **Continue WorkItem**: un-suspends (or continues) execution of the previously suspended workitem.
- **Continue Case**: un-suspends execution of all previously suspended workitems for the case, effectively continuing case execution.
- **Continue All Cases**: un-suspends execution of all workitems previously suspended for all cases of the specification in which the workitem is defined or of which the case is an instance, effectively continuing all previously suspended cases of the specification.
- **Restart WorkItem**: rewinds workitem execution back to its start. Resets the workitem's data values to those it had when it began execution.
- **Force Complete WorkItem**: completes a 'live' workitem. Execution of the workitem ends, and the workitem is marked with a status of ForcedComplete, which is regarded as a successful completion, rather than a cancellation or failure. Execution proceeds to the next workitem on the process path.
- **Force Fail WorkItem**: fails a 'live' workitem. Execution of the workitem ends, and the workitem is marked with a status of Failed, which is regarded as an unsuccessful completion, but not as a cancellation – execution proceeds to the next workitem on the process path.
- **Compensate**: runs a compensatory process (i.e. a worklet). Depending on previous primitives, the worklet may execute simultaneously to the parent case, or execute while the parent is suspended.
Worklets can in turn invoke child worklets to any depth. The primitives ‘Suspend All Cases’, ‘Continue All Cases’ and ‘Remove All Cases’ may be edited when being added to an exlet definition in the Rules Editor so that their action is restricted to ancestor cases only. Ancestor cases are those in a hierarchy of worklets back to the original parent case – that is, where a process invokes an exlet which invokes a compensatory worklet which in turn invokes an exlet, and so on. Also, the ‘continue’ primitives are applied only to those workitems and cases that were previously suspended by the same exlet.
A compensation primitive may contain an array of one or more worklets – when multiple worklets are defined for a compensation primitive via the Rules Editor, they are launched concurrently as a composite compensatory action when the exlet is executed. Execution moves to the next primitive in the exlet when all worklets have completed.
In the same manner as the Selection sub-service, the Exception sub-service also supports data mapping from a case to a compensatory worklet and back again. For example, if a certain variable has a value that prevents a case instance from continuing, a worklet can be run as a compensation, during which a new value can be assigned to the variable and that new value mapped back to the parent case, so that it may continue execution.
Referring back to Figure 1, the centre tier shows the exlets defined for Item-PreConstraint violations. As mentioned above, there may actually be up to eleven different members of this tier. Also, each exlet may refer to a different set of compensatory processes, or worklets, and so at any point there may be several worklets operating on the upper tier.
**Rollback:** A further primitive identified by Russell et al. is ‘Rollback’ [16], where the execution of the process may be unwound back to a specified point and all changes to the case’s data from that point forward are undone. The term ‘rollback’ is taken from database processing, where it serves the essential purpose of reverting the database to a previous stable state if, for some reason, a problem occurs during an update. Thus, rollback certainly applies in terms of workflow systems at the transactional level. However, for this implementation we considered that a rollback action serves no real purpose at the control-flow level and so has not been included. For tasks that have already completed, erasing the outcomes of those tasks as if they had never been carried out is counterproductive; better to execute a compensation exlet that corrects the problem so that both the original and corrective actions are maintained – that is, a redo is more appropriate than an undo at the control-flow level. In so doing, a complete picture of the entire process is available. There is enough flexibility inherent in the primitives above to accommodate any kind of compensatory action. For example, if a loan is approved before it becomes evident that a error of judgement has been made by the approving officer, it is better to run some compensation to redo the approval process again (so that a record of both approval processes remains), rather than rollback the approval process, and thus lose the details of the original approval.
6 Contextual Selection of Exlets
The runtime selection of an appropriate exlet relies on the type of exception that has occurred and the relevant context of each case instance, derived from case and historical data. The selection process is achieved through the use of modified Ripple Down Rules (RDR), which comprise a hierarchical set of rules with associated exceptions, first devised by Compton and Jansen [24]. The fundamental feature of RDR is that it avoids the difficulties inherent in attempting to compile, a-priori, a systematic understanding, organisation and assembly of all knowledge in a particular domain. Instead, it allows for general rules to be defined first with refinements added later as the need arises [25].
Any specification may have an associated rule set, which consists of a collection of RDR trees stored as XML data. Each RDR tree is a collection of simple rules of the form “if condition then conclusion”, conceptually arranged in a binary tree structure (see Fig. 4). When a rule tree is queried, it is traversed from the root node of the tree along the branches, each node having its condition evaluated along the way. If a node’s condition evaluates to True, and it has a true child (that is, it has a child node connected on a True branch), then that child node’s condition is also evaluated. If a node’s condition evaluates to False, and there is a false child, then that child node’s condition is evaluated [26]. When a terminal node is reached, if its condition evaluates to True then that conclusion is returned as the result of the tree traversal; if it evaluates to False, then the last node in the traversal that evaluated to True is returned as the result.
Effectively, each rule node on the true branch of its parent node is an exception rule of the more general one of its parent (that is, it is a refinement of the parent rule), while each rule node on the false branch of its parent node is an “else” rule to its parent (or an alternate to the parent rule). This tree traversal provides implied locality - a rule on an exception branch is tested for applicability only if its parent (next-general) rule is also applicable.
The hierarchy of a worklet rule set is (from the bottom up):
- **Rule Node**: contains the details (condition, conclusion, id, parent and so on) of one discrete ripple-down rule.
- **Rule Tree**: consists of a number of rule nodes conceptually linked in a binary tree structure.
- **Tree Set**: a set of one or more rule trees. Each tree set is specific to a particular rule type (Timeout, ExternalTrigger, etc.). The tree set of a case-level exception rule type will contain exactly one tree. The tree set of an item-level rule type will contain one rule tree for each task of the specification that has rules defined for it (not all tasks in the specification need to have a rule tree defined).
- **Rule Set**: a set of one or more tree sets representing the entire set of rules defined for a specification. Each rule set is specific to a particular specification. A rule set will contain one tree set for each rule type for which rules have been defined.
Each specification has a unique rule set (if any), which contains between one and eleven tree sets (or sets of rule trees), one for selection rules (used by the Selection sub-service) and one for each of the ten exception types. Three of those ten relate to case-level exceptions (i.e. CasePreConstraint, CasePostConstraint and CaseExternalTrigger) and so each of these will have at most one rule tree in the tree set. The other eight tree sets relate to workitem-level events (seven exception types plus selection), and so may have one rule tree for each task in the specification - that is, the tree sets for these eight rule types may consist of a number of rule trees.
It is not necessary to define rules for all eleven types for each specification, only for those types that are required to be handled; the occurrence any exception types that aren’t defined in the rule set file are simply ignored. So, for example, if an analyst is interested only in capturing pre- and post- constraints at the workitem level, then only the ItemPreConstraint and ItemPostConstraint tree sets need to be defined (i.e. rules defined within those tree sets). Of course, rules for other event types can be added later if required.
Figure 4) shows the ItemPreConstraint rule tree for the Organise Concert example, which represents the rule tree for the exlets shown on the centre tier of Figure 1. The condition part is the rule that is evaluated, and the conclusion is the exlet selected by that rule if the condition evaluates to true.
The third task in the OrganiseConcert specification, Do Show, has a pre-item constraint rule tree (refer Fig. 1), and so when a workitem of the task becomes
enabled (and thus the engine notifies the service), the rule tree is queried. The Rules Editor provides a textual representation of the relevant rule tree (called the effective composite rule) as can be seen in Figure 5.
<table>
<thead>
<tr>
<th>Effective Composite Rule</th>
</tr>
</thead>
<tbody>
<tr>
<td>if TicketsSold ≤ (Seating * 0.75) then suspend workitem; run worklet ChangeToMidVenue; continue workitem</td>
</tr>
<tr>
<td>except if TicketsSold ≤ (Seating * 0.5) then suspend workitem; run worklet ChangeToMidVenue; continue workitem</td>
</tr>
<tr>
<td>end if</td>
</tr>
</tbody>
</table>
Fig. 5. Effective Composite Rule for Do Show’s Pre-Item Constraint Tree
When *Do Show* is enabled and the value of the case data attribute ‘TicketsSold’ is less than 75% of the attribute ‘Seating’ (i.e. the seating capacity of the venue), an exlet is run that suspends the workitem, runs the compensatory worklet ChangeToMidVenue, and then, once the worklet has completed, continues (or unsuspend) the workitem. That is, this pre-constraint exception allows organisers to change the venue of the concert to a mid-sized stadium when there are insufficient tickets sold to fill the original venue. Following the structure of the ripple-down rule, if the tickets sold are also less than 50% of the capacity, then we want instead to suspend the workitem, run the ChangeToSmallVenue worklet, and then unsuspend the workitem. Finally, if less than 20% of the tickets have been sold, we want to suspend the entire case, run a worklet to perform the tasks required to cancel the show, and then remove (i.e. cancel) the case.
The effects of a scenario where 60% of tickets have been sold can be seen in the Available Work screen of the YAWL worklist handler (Figure 6). The *Do Show* workitem is marked as ‘Suspended’ and thus is unable to be selected for execution, while the ChangeToMidVenue worklet has been launched and its first workitem, *Cancel Stadium*, is enabled and may be executed. The ChangeToMidVenue worklet is being treated by the YAWL Engine as just another case, and so the service receives notifications from the engine for pre-case and pre-item constraint events for the worklet also – thus the worklet may also respond to its own exception notifications.
When the ChangeToMidVenue worklet has completed, the engine will notify the service of the case completion, at which time the service completes the third and final part of the exlet, that is continuing (unsuspending) the *Do Show* workitem so that the parent case can continue. Back at the Available Work screen, the *Do Show* workitem will now be shown as enabled and thus will be able to be checked out, and will contain the data values entered in the worklet’s workitems mapped back to the *Do Show* workitem – that is, the changes to venue and capacity data values captured by the worklet are now found in *Do Show*’s input data.
As mentioned previously, the service also allows for external events to be handled on-system by providing a means for exceptions to be raised by users external to the process itself. The service provides a set of servlet pages that
can be invoked directly by the user via add-ins to the YAWL worklist handler, which are visible only when the service is enabled. One of the servlet pages allows a user to raise an exception directly with the service (i.e. bypassing the engine). When invoked, the Exception Service retrieves from the rule set for the selected case the list of existing external exception triggers (if any) for the case’s specification. See Figure 7 for the **Raise Case-Level Exception** page listing the case-level external triggers defined for the Organise Concert specification. Note that these triggers describe events that may be considered either adverse (e.g. Band Broken Up) or beneficial (e.g. Ticket Sales Better Than Expected) to the current case, or may simply represent new or additional tasks that need to be carried out for the particular case instance (e.g. Band Requests Backstage Refreshments). In any case, the methods for handling these kinds of exceptions become an implicit part of the process for all future instantiations, rather than being lost.
This list contains all of the external triggers either conceived when the specification was first designed or added later as new kinds of exceptional events occurred and were added to the rule set for the specification. When a trigger is selected by the user, the conclusion for that trigger’s rule is invoked by the service as an exlet for the current case.
Item-level external exceptions can be raised in a similar way. External exceptions can be raised at any time during the execution of a case - the way they are handled may depend on how far the process has progressed (via the definition of appropriate rule tree or trees which consider, as part of the rule conditionals, the status of the case and/or its workitems).
Notice that at the bottom of the list (Figure 7) the option to add a New External Exception is provided. If an unexpected external exception arises that none of the available triggers represent, a user can use that option to notify an administrator of the new exception, its context and possible ways to handle it. The administrator can then create a new exlet in the Rules Editor and, from the Editor, connect directly to the service to launch the new exlet for the parent case.
New exlets for unexpected internal exceptions are raised and launched using the same approach as that described for the Selection sub-service, as detailed in [14].
7 Related Work
Since the mid-nineties much research has been carried out on issues related to exception handling in workflow management systems. While it is not the intention of this paper to provide a complete overview of the work done in this area, reference is made here to a number of quite different approaches; see [16] for a more systematic overview, where different tools are evaluated with respect to their exception handling capabilities using a patterns-based approach.
Generally, commercial workflow management systems provide only basic support for handling exceptions [9, 27] (besides modelling them directly in the main ‘business logic’), and each deals with them in a proprietary manner: they typically require the model to be fully defined before it can be instantiated, and changes must be incorporated by modifying the model statically. Staffware provides constructs called event nodes, from which a separate pre-defined exception handling path or sequence can be activated when an exception occurs. It may also suspend a process either indefinitely or wait until a timeout occurs. If a work item cannot be processed it is forwarded to a ‘default exception queue’ where it may be manually purged or re-submitted. COSA provides for the definition of external ‘triggers’ or events that may be used to start a sub-process. All events and sub-processes must be defined at design time. MQ Workflow supports time-
outs and, when they occur, will branch to a pre-defined exception path and/or send a message to an administrator. SAP R/3 provides for pre-defined branches which, when an exception occurs, allows an administrator to manually choose one of a set of possible branches.
Among the non-commercial systems, the OPERA prototype [9] incorporates language constructs for exception handling and allows for exceptions to be handled at the task level, or propagated up various ancestor levels throughout the running instance. It also removes the need to define the exception handler a-priori, although the types of exceptions handled are transactional rather than control flow oriented. The eFlow system [28] uses rules to define exceptions, although they cannot be defined separately to the standard model. ADEPT [29] supports modification of a process during execution (i.e. add, delete and change the sequence of tasks) both at the type (dynamic evolution) and instance levels (ad-hoc changes). Such changes are made to a traditional monolithic model and must be achieved via manual intervention. The ADOME system [30] provides templates that can be used to build a workflow model, and provides some support for (manual) dynamic change. A catalog of ‘skeleton’ patterns that can be instantiated or specialised at design time is supported by the WERDE system [5]. Again, there is no scope for specialisation changes to be made at runtime. AgentWork [31] provides the ability to modify process instances by dropping and adding individual tasks based on events and ECA rules. However, the rules do not offer the flexibility or extensibility of Ripple Down Rules, and changes are limited to individual tasks, rather than the task-process-specification hierarchy supported by the Worklet Service. Also, the possibility exists for conflicting rules to generate incompatible actions, which requires manual intervention and resolution.
It should be noted that only a small number of academic prototypes have had any impact on the frameworks offered by commercial systems [32]. Nevertheless, there are some interesting commercial products that offer innovative features with respect to handling exceptions, for example FLOWER supports the concept of case-handling; the process model only describes the preferred way of doing things and a variety of mechanisms are offered to allow users to deviate in a controlled manner [1].
The implementation discussed in this paper differs considerably from the above approaches. Exlets, that may include worklets as compensatory processes, dynamically linked to extensible Ripple Down Rules, provide an novel alternative method for the provision of dynamic flexibility and exception handling in workflows.
8 Conclusion and Future Work
Workflow management systems impose a certain rigidity on process definition and enactment because they generally use frameworks based on assembly line metaphors rather than on ways work is actually planned and carried out. An analysis of Activity Theory provided principles of work practices that were used
as a template on which a workflow service has been built that better supports flexibility and dynamic evolution through innovative exception handling techniques. By capturing contextual data, a repertoire of actions is constructed that allow for contextual choices to be made from the repertoire at runtime to efficiently carry out work tasks. These actions, whether exlets or worklets, directly provide for process evolution, flexibility and dynamic exception handling, and mirror accepted work practices.
This implementation presents several key benefits, including:
- A process modeller can describe the standard activities and actions for a workflow process, and any deviations, using the same methodology;
- It allows re-use of existing process components and aids in the development of fault tolerant workflows using pre-existing building blocks [7];
- Its modularity simplifies the logic and verification of the standard model, since individual worklets are less complex to build and therefore easier to verify than monolithic models;
- It provides for a variety of workflow views of differing granularity, which offers ease of comprehensibility for all stakeholders;
- It allows for gradual and ongoing evolution of the model, so that global modification each time a business practice changes or a deviation occurs is unnecessary; and
- In the occurrence of an unexpected event, the process modeller needs simply to choose an existing exlet or build a new one for that event, which can be automatically added to the repertoire for current and future use as necessary, thus avoiding manifold complexities including downtime, model restructuring, versioning problems and so on.
This implementation uses the open-source, service-oriented architecture of YAWL to develop a service for dynamic exception handling completely independent to the core engine. Thus, the implementation may be viewed as a successful case study in service-oriented computing. As such, the approach and resultant software can also be used in the context of other process engines (for example BPEL based systems, classical workflow systems, and the Windows Workflow Foundation). One of the more interesting things to be incorporated in future work is the application of process mining techniques to the various logs collected by the Worklet service; a better understanding of when and why people tend to “deviate” from a work plan is essential for providing better tool support. Archival and resource data will also be useful for refining the contextual choices defined in the rule set.
All system files, source code and documentation for YAWL and the worklet service, including the examples discussed in this paper, may be downloaded via www.yawl-system.com.
References
|
{"Source-Url": "https://pure.tue.nl/ws/files/2412019/727417797293574.pdf", "len_cl100k_base": 10104, "olmocr-version": "0.1.53", "pdf-total-pages": 23, "total-fallback-pages": 0, "total-input-tokens": 48941, "total-output-tokens": 13264, "length": "2e13", "weborganizer": {"__label__adult": 0.0002853870391845703, "__label__art_design": 0.0009002685546875, "__label__crime_law": 0.000370025634765625, "__label__education_jobs": 0.003231048583984375, "__label__entertainment": 0.00016808509826660156, "__label__fashion_beauty": 0.00020015239715576172, "__label__finance_business": 0.0012950897216796875, "__label__food_dining": 0.00035071372985839844, "__label__games": 0.0006337165832519531, "__label__hardware": 0.0006861686706542969, "__label__health": 0.00045180320739746094, "__label__history": 0.0003814697265625, "__label__home_hobbies": 0.00010830163955688477, "__label__industrial": 0.0005168914794921875, "__label__literature": 0.0005922317504882812, "__label__politics": 0.0003046989440917969, "__label__religion": 0.0003938674926757813, "__label__science_tech": 0.0972900390625, "__label__social_life": 0.00016188621520996094, "__label__software": 0.06329345703125, "__label__software_dev": 0.82763671875, "__label__sports_fitness": 0.00021541118621826172, "__label__transportation": 0.0004210472106933594, "__label__travel": 0.0001957416534423828}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58063, 0.01601]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58063, 0.34467]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58063, 0.90628]], "google_gemma-3-12b-it_contains_pii": [[0, 2163, false], [2163, 4577, null], [4577, 7930, null], [7930, 10669, null], [10669, 13694, null], [13694, 16287, null], [16287, 17797, null], [17797, 20776, null], [20776, 22563, null], [22563, 25135, null], [25135, 27744, null], [27744, 29828, null], [29828, 33105, null], [33105, 36239, null], [36239, 37922, null], [37922, 40977, null], [40977, 43240, null], [43240, 44834, null], [44834, 47901, null], [47901, 50643, null], [50643, 54201, null], [54201, 57823, null], [57823, 58063, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2163, true], [2163, 4577, null], [4577, 7930, null], [7930, 10669, null], [10669, 13694, null], [13694, 16287, null], [16287, 17797, null], [17797, 20776, null], [20776, 22563, null], [22563, 25135, null], [25135, 27744, null], [27744, 29828, null], [29828, 33105, null], [33105, 36239, null], [36239, 37922, null], [37922, 40977, null], [40977, 43240, null], [43240, 44834, null], [44834, 47901, null], [47901, 50643, null], [50643, 54201, null], [54201, 57823, null], [57823, 58063, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58063, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58063, null]], "pdf_page_numbers": [[0, 2163, 1], [2163, 4577, 2], [4577, 7930, 3], [7930, 10669, 4], [10669, 13694, 5], [13694, 16287, 6], [16287, 17797, 7], [17797, 20776, 8], [20776, 22563, 9], [22563, 25135, 10], [25135, 27744, 11], [27744, 29828, 12], [29828, 33105, 13], [33105, 36239, 14], [36239, 37922, 15], [37922, 40977, 16], [40977, 43240, 17], [43240, 44834, 18], [44834, 47901, 19], [47901, 50643, 20], [50643, 54201, 21], [54201, 57823, 22], [57823, 58063, 23]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58063, 0.04808]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
ce0131d655fa6b1d6fb910fe37e6e57be2814490
|
Advances in Parallel-Stage Decoupled Software Pipelining
Feng Li, Pop Antoniu, Albert Cohen
To cite this version:
Feng Li, Pop Antoniu, Albert Cohen. Advances in Parallel-Stage Decoupled Software Pipelining, WIR, Apr 2011, France. <hal-00870687>
HAL Id: hal-00870687
https://hal.archives-ouvertes.fr/hal-00870687
Submitted on 7 Oct 2013
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
Advances in Parallel-Stage Decoupled Software Pipelining
Leveraging Loop Distribution, Stream-Computing and the SSA Form
Feng Li
INRIA
feng.li@inria.fr
Antoni Pop
Centre de recherche en informatique, MINES ParisTech
antoni.pop@mines-paristech.fr
Albert Cohen
INRIA
albert.cohen@inria.fr
Abstract
Decoupled Software Pipelining (DSWP) is a program partitioning method enabling compilers to extract pipeline parallelism from sequential programs. Parallel Stage DSWP (PS-DSWP) is an extension that also exploits the data parallelism within pipeline filters.
This paper presents the preliminary design of a new PS-DSWP method capable of handling arbitrary structured control flow, a slightly better algorithmic complexity, the natural exploitation of nested parallelism with communications across arbitrary levels, with a seamless integration with data-flow parallel programming environments. It is inspired by loop-distribution and supports nested/structured partitioning along with the hierarchy of control dependences. The method relies on a data-flow streaming extension of OpenMP.
These advances are made possible thanks to progresses in compiler intermediate representation. We describe our usage of the Static Single Assignment (SSA) form, how we extend it to the context of concurrent streaming tasks, and we discuss the benefits and challenges for PS-DSWP.
Categories and Subject Descriptors D.3.4 [Programming Languages]: Processors-Compilers, Optimization
General Terms optimization
Keywords automatic parallelization, stream-computing, loop distribution
1. Introduction
In recent years, the CPU manufacturers have embraced chip multiprocessors because of technology, power consumption and thermal dissipation constraints, and because of diminishing returns in instruction-level parallelism. The amount of performance gained by the use of multicore processor depends highly on the software fraction that can be parallelized to run on multiple cores simultaneously. Multiprocessor programming leaves the burden to programmer who faces the extra complexity, heisenbugs, deadlocks and other problems associated with parallel programming. The situation is worse when dealing with the migration of legacy code.
Decoupled Software Pipelining (DSWP) is an automatic thread partitioning method which could partition a sequential program to run on multiple cores, and Parallel-Stage DSWP (PS-DSWP) exposes data parallelism into task pipelines extracted by DSWP. These automatic thread partitioning methods free the programmer from manual parallelization. They also promise much wider flexibility than data-parallelism-centric methods for processors, aiming for the effective parallelization of general-purpose applications.
In this paper, we provide another method to decouple control-flow regions of serial programs into concurrent tasks, exposing pipeline and data parallelism. The power and simplicity of the method rely on the restriction that all streams should retain a synchronous semantics [8]. It amounts to checking the sufficient condition that the source and target of any decoupled dependence are control-dependent on the same node in the control dependence tree (this assumes structured control flow). This restriction may appear as a severe one for experienced parallel programmers; but at the potential expense of adding extra levels of nested parallelism, it does not restrict the degree of pipeline parallelism. In fact, any pair of computational statements can be decoupled and assigned to different concurrent tasks. The partitioning algorithms also handle DOALL parallelization within task pipelines, and arbitrarily nested data-parallel pipelines following the control dependence tree of a structured control flow graph. Unlike existing DSWP algorithms, our method does not explicitly copy conditional expressions and can handle arbitrary backward data and control dependences.
We are using two intermediate representations.
- A conventional SSA-based representation, annotated with natural loop and control dependence trees (for structured control flow).
- And a streaming data-flow extension of the latter representation as a backend for our partitioning algorithm, still in SSA form but with explicit task boundaries (for single-entry single-exit regions) and multi-producer multi-consumer streams to communicate across tasks.
The backend representation streamlines the decoupling of multi-producer multi-consumer data flow through explicit, compiler-controlled sampling and merging stages. Multi-producer multi-consumer semantics is absolutely essential to handle general decoupling patterns where data-parallel stages feature an unbalance in the number of worker threads. Sampling is handled transparently by nesting tasks into enclosing control flow. Merging is captured by \( \Phi \) functions at task boundaries, introducing a minor variant of the SSA form satisfying the so-called task-closed property that multiple incoming flows targeting the same use in a given task should be explicitly merged by a dedicated \( \Phi \) function at the task entry point.
Relying on SSA avoids building the complete program dependence graph; with the exception of the array dependence graph, our method only processes linear-size data structures, as opposed to the worst-case quadratic program dependence graph in DSWP.
2. Related Work
The most closely related work to this paper is decoupled software pipelining and loop distribution. We recall the state-of-the-art in both and present the original finding at the source of this work: by extending loop distribution with pipelining and asserting a synchronous concurrency hypothesis, arbitrary data and control dependences can be decoupled very naturally with only minor changes to existing algorithms that have been proposed for loop distribution [10].
2.1 Decoupled software pipelining
Decoupled Software Pipelining (DSWP) [13] is one approach to automatically extract threads from loops. It partitions loops into long-running threads that communicate via inter-core queues. DSWP builds a Program Dependence Graph (PDG) [7], combining control and data dependences (scalar and memory). Then DSWP
introduces a load-balancing heuristic to partition the graph according to the number of cores, making sure no recurrence spans across multiple partitions. In contrast to DOALL and DOACROSS [4] methods which partition the iteration space into threads, DSWP partitions the loop body into several stages connected with pipelining to achieve parallelism. It exposes parallelism in cases where DOACROSS is limited by loop-carried dependences on the critical path. And generally speaking, DSWP partitioning algorithms handles uncoupled loops, complex control flow and irregular pointer-based memory accesses.
Parallel-Stage Decoupled Software Pipelining [16] (PS-DSWP) is an extension to combine pipeline parallelism with some stages executed in a DOALL, data-parallel fashion. For example, when there are no dependences between loop iterations of a DSWP stage, the incoming data can be distributed over multiple data-parallel worker threads dedicated to this stage, while the outgoing data can be merged to proceed with downstream pipeline stages.
These techniques have a few caveats however. They offer limited support for decoupling along backward control and data dependences. They provide a complex code generation method to decouple dependences among source and target statements governed by different control flow, but despite its complexity, this method remains somewhat conservative.
By building the PDG, DSWP also incurs a higher algorithmic complexity than typical SSA-based optimizations. Indeed, although traditional loop pipelining for ILP focuses on innermost loops of limited size, DSWP is aimed at processing large control flow graphs after aggressive inter-procedural analysis optimizations. In addition, the loops in DSWP are handled by the standard algorithm as ordinary control flow, missing potential benefits of treating them as a special case. To address these caveats, we turned our analysis to the state of the art in loop distribution.
2.2 Loop distribution
Loop distribution is a fundamental transformation in program restructuring systems designed to extract data parallelism for vector or SIMD architectures [10].
In its simplest form, loop distribution consists of breaking up a single loop into two or more consecutive loops. When aligning loop distribution to the strongly connected components of the data-dependence graph, one or more of the resulting loops expose iterations that can be run in parallel, exposing data parallelism. Barriers are inserted after the parallel loops to enforce precedence constraints with the rest of the program. An example is presented in Figure 1.
```
for (i = 1; i < N; i++) {
S1 A[i] = B[i] + 1;
S2 C[i] = A[i-1] + 1;
}
```
Figure 1. Barriers inserted after loop distribution.
3. OpenMP Extension for Stream-Computing as a Code Generation Target
A recently proposed stream-computing extension to OpenMP [14] allows the expression of pipeline parallelism by making explicit the flow dependences, or producer-consumer patterns, between OpenMP tasks. It provides a simple way for explicitly building dynamic task graphs, where tasks are connected through streams that transparently privatize the data.
The extension consists of two additional clauses, input and output to the task construct, that define the producer-consumer relationships between tasks. The OpenMP language, with this extension, is a natural fit as a target for our code generation. It provides for dynamic task creation and connection in the task graph, it handles arbitrary nesting of pipelined tasks in control-flow, and it allows the hierarchical nesting of tasks.
The task construct is extended with input and output clauses as presented on Figure 2. Both clauses take a list of items, each of which describes a stream and its behavior w.r.t. the task to which the clause applies. In the abbreviated item form, stream, the stream can only be accessed one element at a time through the same variable s. In the second form, stream >> window, the programmer uses the C++ flavoured << > > stream operators to connect a sliding window to a stream, gaining access, within the body of the task, to horizon elements in the stream.
One of the main issues that needs to be addressed in order to distribute a PDG to the OpenMP stream-computing extension is that, in the latter, the data flow bypasses the control flow. In other words, when a task produces values on an output stream, these values will all reach the consumers of the stream, even if, in the serial semantics, the values would have been overwritten before reaching the consumers. This means that the only case where a direct annotation scheme would work is if all tasks are in the same control flow. There are multiple ways this issue can be handled, the most systematic one being to always ensure that every producer-consumer pair share the same control dependence.
This is achieved by sinking all control flow surrounding the tasks, and not shared by both producer and consumer, in the tasks. To avoid the loss of parallelization opportunities, each task’s body can be further partitioned into nested pipelines.
The GCC implementation of the OpenMP extension for stream computing has been shown to be efficient to exploit mixed pipeline- and data-parallelism, even in dynamic task graphs [14]. It relies on compiler and runtime optimizations to improve cache locality and relies on a highly efficient lock-free and atomic operation-free synchronization algorithm for streams.
4. Observations
It is quite intuitive that the typical synchronization barriers in between distributed data-parallel loops can be weakened, resulting into data-parallel pipelines. We aim to provide a comprehensive treatment of this transformation, generalizing PS-DSWP in the process.
4.1 Replacing loops and barriers with a task pipeline
In the previous example, we could remove the barriers between two distributed loops with pipelining so that the two loops could run in parallel.
```
/* Initialize the stream, inserting a delay. */
void INIT_STREAM() {
produce(stream, A[0]);
/* Decoupled producer and consumer. */
for (i = 1; i < N; i++) {
S1 A[i] = B[i] + 1;
produce(stream, A[i]);
}
}
```
Figure 3. Pipelining inserted between distributed loops. Initialize the stream (left), producer and consumer thread (right).
Figure 3 showed that pipelined execution is possible: the INIT_STREAM function inserts one delay into a communication stream; the
produce/consume primitives implement a FIFO, enforcing the precedence constraint of the data dependence on array A and communicating the value in case the hardware needs this information.
When distributing loops, scalar and array expansion ( privatization) is generally required to eliminate memory-based dependences. The conversion to a task pipeline avoids this complication through the usage of communication streams. This transformation can be seen as an optimized version of scalar/array expansion in bounded memory and with improved locality [15].
4.2 Extending loop distribution to PS-DSWP
The similarity between DSWP and distributed loops with data-parallel pipelines is striking. First, both of them partition the loop into multiple threads. Second, both of them avoid partitioning the loop iteration space: they partition the instructions of the loop body instead. But four arguments push in favor of refining DSWP in terms of loop distribution.
1. Loop distribution leverages the natural loop structure, where the granularity of thread partitioning can be easily controlled. Moreover, it is useful to have a loop control node to which to attach information about the iteration of the loop, including closed forms of induction variables; this node can also be used to represent the loop in additional transformations.
2. Using a combination of loop distribution and fusion, then replacing barriers with pipelining leads to an incremental path in compiler construction. This path leverages existing intermediate representations and loop nest optimizers, while DSWP relies on new algorithms and a program dependence graph.
3. Considering the handling of control dependences, a robust and general algorithm already exists for loop distribution. McKinley and Kennedy’s technique handles arbitrary control flow [10] and provides a comprehensive solution. The same methods could be applied for DSWP, transforming control dependences into data dependences, and storing boolean predicates into stream. After restructuring the code, updating the control dependence graph and data dependence graph, the code generation algorithm for PDGs [2, 5, 6] can be used to generate parallel code. This solution would handle all cases where the current DSWP algorithm fails to clone a control condition.
4. Since loop distribution does not partition the iteration space, it can also be applied to uncounted loops. Unfortunately, the termination condition needs to be propagated to downstream loops. This problem disappears through the usage of a conventional communication stream when building task pipelines.
From this high-level analysis, it appears possible to extend loop distribution with pipelining to implement PS-DSWP and handle arbitrary control dependences. Yet the method still seems rather complex, especially the if-conversion of control dependences and the code generation step from the PDG. We go one step further and propose a new algorithm adapted from loop distribution but avoiding these complexities.
4.3 Motivating example
Our method makes one more assumption to reduce complexity and limit risks of overhead. It amounts to enforcing the synchronous hypothesis on all communicating tasks in the partition [8]. A sufficient condition to check if the source and target of any decoupled dependence is dependent on the same control node.
Consider the example in Figure 4. S1 and S7 implement the loop control condition and induction variable, respectively. S2, S3 and S6 are control dependent on S1. S3 is a conditional node; S4, S5 and L1 are control dependent on it. In the inner loop, L2 and L3 are control dependent on L1. When we apply DSWP to the outer loop, the control dependences originating from S1 must be if-converted by creating several streams (the number of streams depends on the number of partitions). When decoupling along the control dependence originating from S3, a copy of the conditional node must be created as well as another stream.
```plaintext
S1 while (p != NULL) {
S2 x = p->value;
S3 if(c1) {
S4 x2 = p->value/2;
S5 ip = p->inner_loop;
L1 while (ip) {
L2 do_something(ip);
L3 ip = ip->next;
}
S6 ... = x;
S7 p = p->next;
}
```
Figure 4. Uncounted nested loop before partitioning.
```plaintext
S1 while (p1 = φ^lwp(p0,p2)) {
S2 x1 = p1->value;
S3 if(c1) {
S4 x2 = p1->value/2;
S5 ip1 = p1->inner_loop;
L1 while (ip2 = φ^lwp(ip1, ip3)) {
L2 do_something(ip2);
L3 ip3 = ip2->next;
}
S6 x3 = φ^lwp(x1, x2);
S7 p2 = p1->next;
}
```
Figure 5. Uncounted nested loop in SSA form.
```plaintext
//task0-0(main task)
S1 while (p1 = φ^lwp(p0, p2)) {
//persistent-task1-1
#pragma task firstprivate (p1) output(x1)
{ S2 x1 = p1->value;
//persistent-task1-2
#pragma task firstprivate (p1) output(c1, x2)
{ S3 if(c1) {
//persistent-task2-1
#pragma task firstprivate (p1) output(ip1) lastprivate(ip1, x2)
{ S4 x2 = p1->value/2;
S5 ip1 = p1->inner_loop;
}
//persistent-task2-2
#pragma task input(ip1)
{ L1 ... = x1;
L2 while (ip2 = φ^lwp(ip1, ip3)) {
//parallel - task3-1
#pragma omp task firstprivate (ip2)
{ L2 do_something(ip2);
L3 ip3 = ip2->next;
}
}
//persistent-task1-3
#pragma task input(c1, x1, x2)
{ x3 = φ^lwp(x1, x2);
S6 ... = x3;
S7 p2 = p1->next;
}
```
Figure 6. Loops after partitioning and annotated with OpenMP stream extension.
Figure 5 shows the conversion to SSA form. Just like GCC, we use a loop-closed SSA form distinguishing between loop-Φ and cond-Φ nodes. The latter take an additional condition argument, appearing as a subscript, to explicit the selection condition. The
partitioning technique will build a stream to communicate this condition from its definition site to the cond-\Phi node’s task.
We build on the concept of treegion, a single-entry multiple-exit control-flow region induced by a sub-tree of the control dependence graph. In the following, we assume the control flow is structured, which guarantees that the control dependence graph forms a tree. Every sub-tree can be partitioned into concurrent tasks according to the control dependences originating from its root. Any data dependence connecting a pair of such tasks induces communication over a dedicated stream. We call taak\textsubscript{M,N} the N-th task at level M of the control flow tree.
In Figure 5, after building the control dependence tree, one may partition it into 3 tasks (taak\textsubscript{1,1}, taak\textsubscript{1,2} and taak\textsubscript{1,3}) at the root level, and for taak\textsubscript{1,2}, one may further partition this task into inner nested tasks taak\textsubscript{2,1} and taak\textsubscript{2,2}. One may then check for data parallelism in the inner loops: if they do not carry any dependence, one may isolate them in additional data-parallel tasks, such as taak\textsubscript{3,1} in this example.
Figure 6 shows the task and stream-annotated code using an OpenMP syntax. Figure 7 shows the nested pipelining and data parallelization corresponding to the partitioned code. The main task will be executed first, and a pipeline will be created for the main task and its inner tasks three taak\textsubscript{1,1}, taak\textsubscript{1,2} and taak\textsubscript{1,3}. Among these, the same variable \textit{x} used to be defined in the control flow regions of both taak\textsubscript{1,1} and taak\textsubscript{1,2}, to be used in taak\textsubscript{1,3}. This output dependence must be eliminated prior to partitioning into tasks, so that taak\textsubscript{1,1} and taak\textsubscript{1,2} could be decoupled, while taak\textsubscript{1,3} may decide which value to use internally.
Nested tasks are introduced to provide fine grained parallelism. It is of course possible to adapt the partition and the number of nesting levels according to the load balancing and synchronization overhead. The generated code will be well structured, and simple top-down heuristics can be used.
In the execution model of OpenMP 3.0, a task instance is created whenever the execution flow of a thread encounters a task construct; no ordering of tasks can be assumed. Such an execution model is well suited for unbalanced loads, but the overhead of creating tasks is significantly more expensive than synchronizing persistent tasks. To improve performance, we use the persistent task model for pipelining, in which a single instance will handle the full iteration space, consuming data on the input stream and producing on the output stream [14]. In Figure 7, all the tasks except taak\textsubscript{3,1} use the persistent model to reduce the overhead of task creation; taak\textsubscript{3,1} is an ordinary task following the execution model of OpenMP 3.0 (instances will be spawned every time the control flow encounters the task directive). All these tasks will be scheduled by the OpenMP runtime.
One problem with the partitioning algorithms is the fact that the def-use edges (scalar dependences) can become very large, sometimes quadratic with respect to the number of nodes [9]. Figure 8 (left) presents an example that illustrates this problem. Statements S1, S2 define the variable \textit{x}. These definitions all reach the uses in the statements S3, S4 by passing through S5. Because each definition could reach every use, the number of definition-use edges is proportional to the square of the number of statements. These dependences constitute the majority of the edges in a PDG. SSA provide a solution to this problem. In SSA form, each assignment creates a different variable name and at point where control flow joins, a special operation is inserted to merge different incarnations of the same variable. The merge nodes are inserted just at the place where control flow joins. Figure 8 (right) is the original program under SSA form. A merge node (\Phi) is inserted at S3, and killed the definition of S1 and S2. We could see here, in the SSA form, we could reduce the definition-use edges from quadratic to linear.
The systematic elimination of output dependences is also facilitated by the SSA form, with a \Phi node in taak\textsubscript{3,1}. Notice that the conditional expression from which this \Phi node selects one or another input also needs to be communicated through a data stream.
When modifying loop distribution to rely on tasks and pipelining rather than barriers, it is not necessary to distribute the loop control node and one may run them all in the master task, which in turn will activate tasks for the inner partitions. The statements inside each partition form a treegion whose root is the statement that is dependent on the loop control node. With pipelining inserted, distributed loops could be connected with pipelining when there are data dependences.
One concern here is that loop distribution with task pipelines may not provide expressiveness to extract pipeline parallelism. This is not a problem however, since we may apply the same method to every conditional statement rooted treegion, with some special care to the nested tasks, we could get fine grained parallelism without explicitly decoupling the control dependences. Considering again the example in Figure 4, its control dependence tree is given in Figure 9. The root treegion includes all the nodes in the control dependence graph, treegion\textsubscript{1,2} represents the treegion at conditional level 1 and its root is node 2, treegion\textsubscript{1,3} is at conditional level 1 and includes nodes (S3, S4, S5, L1, L2, L3). treegion\textsubscript{2,1} is in conditional level 2 and its root is node (L1), which is the loop control node of the inner loop.
So following our approach, we may start from the treegion at conditional level 0, which is the whole loop, an implicit task will be created as the master task. For the treegions at level 1, we could create them as sub-tasks running at the context of the main task. If there are data dependences between the treegions at the same level and without recurrence, we will connect them with communication streams. If there is a dependence from the master task to one inner task, the value from the enclosing context can be forwarded to the inner task like in a firstprivate clause of OpenMP. Dependences from an inner task to the master task are also supported, although lastprivate (x) is associated with a synchronization point at the end of the task and makes the value of \textit{x} available to the enclosing context. The same algorithms could be recursively applied to the treegion at the next inner level. e.g. For treegion\textsubscript{1,3} at level 1, the sub treegion at level 2 is
The canonical definition of a treegion is a non-linear, single-entry multiple-exit region of code containing basic blocks that constitute a sub-graph of the CFG. We alter this definition to bear on the Control-Dependence Graph (CDG) instead, so we will be looking at single-entry multiple-exit sub-graphs of the CDG.
**Loop Control Node** In the representation we employ later, we will use the loop control node to represent the loop. The loop control node include statements which will evaluate the loop control expression and determines the next iteration.
**Conditional Level** The control dependence graph of the structured code is a tree after building the loop control node. The root of the tree is the loop control node at the loop’s outermost level. We define the conditional level for every node in the control dependence graph as the depth of the node in the tree. The root of the tree with depth 0 has conditional level 0.
We define the conditional level for the treegion as the conditional level of the root node of the treegion (subtree). We define treegion2.1 to identify a treegion where it is the conditional level of the treegion and it is the root node number of the treegion.
**5.2 The algorithm**
The algorithm takes an SSA representation of a single function, and returns a concurrent representation annotated with tasks and communication streams.
**Step 1: Transform Conditional Statements to Conditional Variables** To achieve fine-grained pipelining, conditional statements are split to conditional variables. As showed in Figure 10. Full conversion to three-address SSA form is also possible (as it is performed in GCC or LLVM, for example).
```latex
\begin{verbatim}
if (condition(i))
// is transformed to
c1 = condition(i)
if (c1)
\end{verbatim}
```
**Step 2: Build the Program Dependence Graph under SSA** By building the program dependence graph, the control dependence graph, data dependence graph (through memory) and scalar dependence graph (through registers) are built together.
The control dependence graph for the structured code is a tree, the root of the tree is the loop control node. The leaves of the tree are non-conditional statements and the other nodes inside the tree are the conditional statements or the loop control node of the inner loops. We start from building the control dependence graph, and evaluate the conditional level for each node in the graph. Every node inside the control dependence graph is an statement from the compiler’s intermediate representation of the loop except for the loop control node. The loop control node will be built by searching the strongly connect component started from the loop header node (at each loop nest level) in the program dependence graph.
The data dependence graph could be built by the array dependence analysis [9] for the loop. We should analyse every pair of data dependences to mark the irreducible edges in a later step if there are recurrence.
**Step 3: Marking the Irreducible Edges** A partition can preserve all dependences if and only if there exists no dependence cycle spanning more than one output loop [1, 12]. In our case, for the treegion at the same conditional level, if there are dependences that form a cycle, we mark the edges in between as irreducible. If we have statements in different conditional level, we promote the inner one to its ancestor until both of them are in the same treegion, mark the promoted root node and the other root node as irreducible. The algorithms is presented in Figure 11.
**Step 4: Structured Typed Fusion** Before partitioning, to reveal data parallelism, we type every node in the dependences graph as parallel or !parallel. If there are loop-carried dependence inside this node, then it should be typed as !parallel, otherwise, typed as parallel.
The parallel type nodes are candidates for data parallelization. The goal is to merge this type of nodes to create the largest parallel loop, reducing synchronization overhead and (generally) improving data locality. Further partitioning can happen in the following step, starting from this maximally type-fused configuration. Given a DAG with edges representing dependences and the vertices representing statements in the loop body, we want to produce an equivalent program with minimal number of parallel loops. We want it to be as large as possible to balance the synchronization.
level 0 for our partitioning algorithms, and for all of its child trees at conditional level 1, we should decide where to partition. The partition point could be any point between each of these trees at the same level except the irreducible edges that we have created in step 3. The algorithm may decide at every step if it is desirable to further partition any given task into several sub-tasks.
Look at the example Figure 13:
\[
\begin{align*}
&\text{for}(i,...) \\
&\begin{array}{l}
S1 \quad x = \text{work}(i) \\
S2 \quad \text{if } (c1) \quad \begin{cases}
S3 \quad y = x + i; \\
S4 \quad \text{...} = y;
\end{cases}
\end{array}
\end{align*}
\]
Figure 13. Before partitioning (left), and After partitioning (right). Loop with control dependencies.
The code in Figure 13 (left) is partitioned into 2 tasks, and one task (task1_2) is partitioned further into 3 sub-tasks.
6. Code Generation
After the partitioning algorithms, we have decided the partition point between the original trees, with the support of the stream extension of OpenMP. We ought to generate the code by inserting the input output directives. With the support of nested tasks, relying on the downstream, extended OpenMP compilation algorithm (called OpenMP expansion). But some challenges remain, especially in presence of multiple producers and consumers. We are using SSA form as an intermediate representation and generating the streaming code.
6.1 Decoupling dependences across tasks belonging to different trees
Clearly if we decouple a dependence between tasks in the same tree, the appropriate input and output clauses can be naturally inserted. But what about the communication between tasks at different level?
Considering the example in Figure 14, if we decide to partition the loop to 3 main tasks: task1_1 with $1$, task1_2 with $(S2, S3)$, and task1_3 with $S4$, task1_2 is further divided to task2_1 with $S3$. If we insert the produce and consume directly into the loop, unmatched production and consumption will result.
\[
\begin{align*}
&\text{for}(i,...) \\
&\begin{array}{l}
S1 \quad x = \text{work}(i) \\
S2 \quad \text{if } (c1) \\
S3 \quad y = x + i; \\
S4 \quad \text{...} = y;
\end{array}
\end{align*}
\]
Figure 14. Normal form of code (left) and using streams (right).
The answer comes from following the synchronous hypothesis and slightly modifying the construction of the SSA form in presence of concurrent streaming tasks.
6.2 SSA representation
We are using the Static Single Assignment (SSA) form as an intermediate representation for the source code. A program in SSA form if every variable used in the program appears a single time in the left hand side of an assignment. We are using the SSA form to eliminate the output dependences in the code, and to disambiguate the flow of data across tasks over multiple producer configurations.
Consider the example in Figure 15, if we partition the statements into (S1), (S2,S3), (S4), we need to implement precedence constraints for the output dependence between partition (S1) and (S2,S3), which decreases the degree of parallelism and induces synchronization overhead.
Eliminating the output dependences with the SSA form leads to the introduction of multiple streams in the partitioned code. In order to merge the information coming from different control flow branches, a Φ node is introduced in the SSA form. The Φ function is not normally implemented directly, after the optimizations are completed the SSA representation will be transformed back to ordinary one with additional copies inserted at incoming edges of (some) Φ functions. We need to handle the case where multiple producers in a given partition reach a single consumer in a different partition. When decoupling a dependence whose sink is a Φ node, the exact conditional control flow leading to the Φ node is not accessible for the out-of-SSA algorithm to generate ordinary code.
Task-closed Φ node In SSA loop optimization, there is a concept called loop-closed Φ node, which implements the additional property that no SSA name is used outside of loop where it is defined. When enforcing this property, Φ nodes must be inserted at the loop exit node to catch the variables that will be used outside of the loop. Here we give a similar definition for task-closed Φ node: if multiple SSA variables are defined in one partition and used in another, a phi node will be created at the end of the partition for this variable. This is the place where we join/split the stream. We need to make sure that different definitions of the variable will be merged in this partition before it continues to a downstream one. This node will be removed when converting back from SSA.
Task-closed stream Our partitioning algorithms generate nested pipelining code to guarantee that all communications follow the synchronous hypothesis. For each boundary, if there are one or more definitions of a variable coming through from different partitions, we insert a consumer at this boundary to merge the incoming data, and immediately insert a producer to forward the merged data at the rate of the downstream control flow.
1. When partitioning from a boundary, if inside the treegion, there are multiple definitions of a scalar and it will be used in other treegions which has the same conditional level, we create a Φ node at the end of this partition to merge all the definitions, and also update the SSA variable in later partitions.
2. If there is a Φ node at the end of a partition, insert a stream named with the left-hand side variable of the Φ node.
3. At the place where this variable is used, which is also a Φ node, add a special stream-Φ node to consume.
4. To generate code for the stream-Φ, use the boolean condition associated with the conditional phi node it originates from.
Let us consider the SSA-form example in Figure 15 where we partition the code into (S1,S2,S3) and (S4,S5). A Φ node will be inserted at the end of the first partition, \( r_{14} = \phi(r_{1,1}, r_{1,2}) \), the Φ node in a later partition should be updated from \( r_{1,3} = \phi(r_{1,1}, r_{1,2}) \) to \( r_{1,5} = \phi(r_{1,4}) \). In the second step, we find out that in partition (S1,S2,S3), there is a Φ node at the end, so we insert a stream to produce there. And in partition (S4,S5), after the Φ node there is a use of the variable, so we insert a stream consume. The generated code will look like Figure 16.
This example illustrates the generality of our method and shows how fine-grain pipelines can be built in presence of complex, multi-level control flow.
If we decide to partition the statements into (S1), (S2,S3), (S4,S5), which is the case for multiple producers, the generated code will look like in Figure 17.
For multiple consumers, the stream extension of OpenMP will broadcast to its consumers, which is appropriate for our case.
7. Conclusion
In this paper, we propose a method to decouple independent tasks in serial programs, to extract scalable pipelining and data-parallelism. Our method leverages a recent proposition of a stream-processing extension of OpenMP; with a persistent task semantics to eliminate the overhead of scheduling task instances each time a pair of tasks need to communicate. Our method is inspired by the synchronous hypothesis: communicating concurrent tasks share the same control flow. This hypothesis simplifies the coordination of communicating tasks over nested levels of parallelism. Synchrony also facilitates the definition of generalized, structured typed fusion and partition algorithms preserving the loop structure information. These algorithms have been proven to be essential to the adaptation of the grain of parallelism to the target and to the effectiveness of compile-time load balancing. These partitioning algorithms also handle DOALL parallelization inside a task pipeline. We are using a combination of SSA, control dependence tree and (non-)
...
References
|
{"Source-Url": "https://hal.archives-ouvertes.fr/hal-00870687/file/A-462.pdf", "len_cl100k_base": 8418, "olmocr-version": "0.1.53", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 27958, "total-output-tokens": 10010, "length": "2e13", "weborganizer": {"__label__adult": 0.00031948089599609375, "__label__art_design": 0.00026607513427734375, "__label__crime_law": 0.0002982616424560547, "__label__education_jobs": 0.0003371238708496094, "__label__entertainment": 5.525350570678711e-05, "__label__fashion_beauty": 0.0001329183578491211, "__label__finance_business": 0.0001703500747680664, "__label__food_dining": 0.00032401084899902344, "__label__games": 0.0005803108215332031, "__label__hardware": 0.0013637542724609375, "__label__health": 0.00038695335388183594, "__label__history": 0.00021255016326904297, "__label__home_hobbies": 7.873773574829102e-05, "__label__industrial": 0.0004405975341796875, "__label__literature": 0.000156402587890625, "__label__politics": 0.00025963783264160156, "__label__religion": 0.00046372413635253906, "__label__science_tech": 0.02130126953125, "__label__social_life": 6.008148193359375e-05, "__label__software": 0.005344390869140625, "__label__software_dev": 0.96630859375, "__label__sports_fitness": 0.0003027915954589844, "__label__transportation": 0.0005540847778320312, "__label__travel": 0.00019860267639160156}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 41462, 0.03803]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 41462, 0.6279]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 41462, 0.86752]], "google_gemma-3-12b-it_contains_pii": [[0, 883, false], [883, 7063, null], [7063, 13536, null], [13536, 19072, null], [19072, 26057, null], [26057, 30457, null], [30457, 32895, null], [32895, 38389, null], [38389, 41462, null]], "google_gemma-3-12b-it_is_public_document": [[0, 883, true], [883, 7063, null], [7063, 13536, null], [13536, 19072, null], [19072, 26057, null], [26057, 30457, null], [30457, 32895, null], [32895, 38389, null], [38389, 41462, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 41462, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 41462, null]], "pdf_page_numbers": [[0, 883, 1], [883, 7063, 2], [7063, 13536, 3], [13536, 19072, 4], [19072, 26057, 5], [26057, 30457, 6], [30457, 32895, 7], [32895, 38389, 8], [38389, 41462, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 41462, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
8e8da90ec29175548b9d10ef5871865d095115ed
|
Comparing Product Development Processes and Managing Risk
The MIT Faculty has made this article openly available. Please share how this access benefits you. Your story matters.
As Published: http://dx.doi.org/10.1504/IJPD.2009.025253
Publisher: Inderscience Enterprises
Persistent URL: http://hdl.handle.net/1721.1/67011
Version: Final published version: final published article, as it appeared in a journal, conference proceedings, or other formally published context
Terms of Use: Article is made available in accordance with the publisher’s policy and may be subject to US copyright law. Please refer to the publisher’s site for terms of use.
Comparing product development processes and managing risk
Darian W. Unger*
School of Business
Howard University
2600 6th St. NW, Office 453
Washington, DC 20059, USA
Fax: 1+(202)–806–1642
E-mail: dwunger@howard.edu
Steven D. Eppinger
Sloan School of Management
Massachusetts Institute of Technology
50 Memorial Drive, Room E52-474
Cambridge, MA 02142, USA
E-mail: eppinger@mit.edu
Abstract: Product Development Processes (PDPs) require careful design to reduce development time, create better products and manage the risks of bringing new products to market. This paper investigates the relationship between product development risk and PDP management. We begin by identifying risks and proposing several iteration- and review-based metrics by which PDPs can be effectively identified and compared.
Data from ten company case studies demonstrate the utility of the proposed metrics and exemplify how different PDPs manage different risks. The cases also show that software companies face different risks and employ more flexible PDPs than manufacturing companies. We conclude that PDPs vary more than previously documented, that the proposed metrics are useful in distinguishing PDPs and that companies can tailor their PDP designs to suit their unique risk profiles.
Keywords: Product Development Process; PDP; risk management; innovation; technology management.
Biographical notes: Dr. Darian W. Unger earned his PhD from the Massachusetts Institute of Technology (MIT) and is an Assistant Professor at the Howard University School of Business. His research and teaching interests include project management, technology strategy and new product development in the energy industry.
Dr. Steven D. Eppinger is the Deputy Dean and Professor of Management Science at the MIT Sloan School of Management. He also holds the General Motors Leaders for Manufacturing Chair and has a joint appointment in the
Comparing product development processes and managing risk
1 Introduction
Successful Product Development (PD) is critical to industrial performance. Rapid and innovative PD can provide critical competitive advantages to firms (Jachimowicz et al., 2000; Ulrich and Eppinger, 2004). Despite the importance of PD, companies currently have difficulty designing or choosing from an extensive array of PD processes. If companies design their processes poorly, they may endanger the success of their products, their competitiveness and possibly, their survival. There are currently no established criteria for comparing, selecting or designing PD processes, nor is any single process ideal for all circumstances and companies.
This article explains a variety of Product Development Processes (PDPs) and aims to help companies better design their own PDPs. A literature review reveals that current categorisations of PDPs are insufficient for effective management comparison or application. Using a combination of existing literature and case study research, we propose that two risk management activities – development iterations and reviews – can be used as metrics to describe and compare different PDPs. We then use the case studies to examine the variation among PDPs and to demonstrate the utility of the proposed metrics.
2 Background and literature review
Published literature and industry practice in product development management provide useful background for this study. This review examines PDP characteristics and explains different PDPs.
2.1 PDP steps and risks
PDPs are not uniform, but they often use similar actions to manage development risks. Prevailing literature and industry practices present PDPs that involve a common series of actions, steps or stages. Most companies follow at least some form of the following steps: product planning, project planning, concept creation, system-level design, detailed design, testing/prototyping, and release. The purpose of PDPs that include these steps is to provide a structure for managing the many uncertainties and risks that companies face. Segregating the process into smaller actions is one way of controlling risks.
Risk management is a fundamental PD concern because risk, defined as exposure to danger or loss, is prevalent in all development projects. Balancing risks and potential rewards is an enduring theme of engineering and programme management (Ansell and
D.W. Unger and S.D. Eppinger
Wharton, 1992; Foster and Kaplan, 2001; MacCrimmon and Wehrung, 1986). The risks of PD can lead to several forms of development failure: a slow or late product may miss a market opportunity and incur too many development costs; a technically challenging product might be impossible to design, may lack the expected features, or be of poor quality; and a product with misguided specifications may not fulfil customer needs and therefore completely miss a market niche (Awny, 2006).
Existing literature suggests several ways of categorising PD risks. This research uses a traditional categorisation of risk by source of uncertainty underlying the risk (Cross and Sivaloganathan, 2005). A successful PD process should be able to manage or mitigate the following four major types of risk:
1. Technical risk – uncertainty regarding whether a new product is technologically feasible and whether it will perform as expected, given clear and valid product specifications.
2. Market risk – uncertainty regarding whether a new product accurately addresses changing customer needs and whether the product is well positioned relative to competition. Unlike the technical difficulty of building ‘to a specification’, market risk concerns whether an achievable specification brings the wrong product to market.
3. Schedule risk – uncertainty regarding whether a new product can be developed in the time available.
4. Financial risk – uncertainty regarding whether a new product can be developed on budget and whether the project will pay back the investment.
These four general types of risk are neither comprehensive nor entirely independent of each other.
Many other factors may also present uncertainty, but they can be subsumed by the larger risks detailed above (Bstieler, 2005). For example, quality assurance or integration risk may be considered technical risk. The risks are also occasionally interdependent and overlapping. For example, ‘scope creep’, a common problem involving feature addition during development, frequently occurs in an attempt to address market risk, but tends to increase technical, schedule, and budget risks. It is therefore impossible to completely separate the types of risks faced in PD, although the categorisations are useful in planning PDPs.
Prior research explores the roles, categorisations, and management of risk. De Meyer et al. organise risk by type and warn of the need to observe these risks carefully in order to improve project and development management (De Meyer et al., 2002; Hartmann and Myers, 2001). More general risk literature stresses the importance of maximising expected values and introduces traditional risk management methods such as hedging, decision analysis, and parallel development (Ansell and Wharton, 1992; De Neufville, 1990). Other sources point towards the importance of managing information flows in mitigating risks and improving design efficiency (Varma Citrin et al., 2007; Blanco et al., 2007).
Building on this literature, we will consider how different PDPs address risk through product development iterations, integrations, and reviews. Iterations often address market risk while reviews often address technical risk. Planned iterations – often in the form of early prototypes, simulations, or analytical models – provide feedback for improved
Comparing product development processes and managing risk
As later sections show, the cost, time, and fidelity or quality of integrations vary widely across industries. For example, some hardware-based prototypes are difficult or expensive to build because they require tooling, construction, and complex electro-mechanical integrations. In contrast, computer-based, soft prototypes may be easier to build and integrate, but may not provide as much quality feedback if the computer models do not capture key real-world aspects of the product. PD managers must weigh the benefits and costs of system integrations to ensure that they reduce more risks than they create; early integrations or prototypes are not always practical or possible. However, information gained from system integrations, tests, and feedback generally improves the evolving product. Similarly, performance validations and testing are critical to reducing companies’ technical risks (Boehm, 1988; Otto and Wood, 2001; Cooper, 2001). Both integrations and validations manage risk, although the risks they manage are often different.
2.2 The spectrum of PDPs
PD literature provides many examples of how companies manage development risks. This section presents and describes two common PDPs that constitute the two ends of a spectrum of PDPs. At one end of the spectrum is the staged process, the traditional and dominant PDP in American industry. The spiral process, at the other end, incorporates cross-phase iteration and is commonly used in the software industry.
The most widely used type of PDP, and the standard for comparison in this research, is the traditional staged process shown in Figure 1 (Cooper, 2001; Smith and Reinertsen, 1992; Ulrich and Eppinger, 2004). This process, also called waterfall, stage gate, phase gate, toll gate, checkpoint, life cycle, or structured PD by various authors and practitioners, has been dominant in US industry for almost 30 years. Variants of this process have also evolved into design-to-schedule and design-to-budget processes (McConnell, 1996).
Figure 1 The traditional, staged PDP (see online version for colours)
The ideal staged process proceeds in distinct stages, or phases, from product planning to product release. The intermediate phases include concept design and specification analysis, system-level design, detailed design, and testing or prototyping. At the end of each phase is a review, or gate, to evaluate whether the previous phase was successfully completed. If the project is reviewed positively, work proceeds to the next phase. If not, then the project iterates within that phase until it can successfully pass the review or the project may be terminated.
In Figure 1, the reverse arrows, or cross-phase iterations, indicate that it is possible to revisit earlier phases, but such iterations are difficult and costly. These major unplanned iterations are generally avoided whenever possible. Instead, most iterations occur within stages; these narrow iterations result in both advantages and disadvantages.
One major advantage is that staged processes impose structure on development by reaching sharp product definitions and specifications early in PD. Technical risk is reduced because narrow iterations and reviews freeze specifications early. Rigid specifications and stable product definitions help to avoid errors by avoiding midstream corrections.
Staged processes perform well in cases when products have stable product definitions and use well-understood technologies (as in the case of upgrades or maintenance improvements to existing product). In these cases, staged processes help to find errors in the early stages of a project, when costs of changes are low (McConnell, 1996). Staged processes also work well for projects that are dominated by quality requirements rather than cost or schedule requirements. In these cases, where quality and error-avoidance are high priorities, the most attractive path is a direct one with early specification freeze and no subsequent changes that increase the likelihood of mistakes.
Inflexibility is the main disadvantage of narrow iterations constrained within phases. Because they do not cross phase boundaries, narrow iterations cannot incorporate feedback from later phases. It is difficult to fully specify requirements in the beginning of a project, especially in a dynamic market. Poor or misleading specifications can lead to great market risk. Failure may result if early specifications and assumptions are proven wrong by subsequent market research or prototyping. The staged process does not handle these midstream changes well and can be ill-suited for projects in which requirements are poorly understood in the beginning. They may also face difficulty handling parallel tasks within stages (Smith and Reinertsen, 1992).
The spiral PDP differs from the staged process because of its emphasis on flexibility and comprehensive iteration. Unlike the staged processes, the spiral process includes a series of planned iterations that span several phases of development. It is a relatively recent PDP that has been adopted by many in the software industry, with its notably different design requirements (Easingwood et al., 2006). Spiral process proponents assert that it reduces burdensome and expensive rework in software, thus lowering development time and cost (Boehm, 1988; Gilb, 1988; McConnell, 1996).
The spiral PDP can lead to the development of a competitive product on schedule and within budget by managing risks early. As shown in Figure 2, its spiralling form repeats regular steps, including concept development, system level design, detailed design, and integration and testing. The process is flexible; the actual number and span of loops can vary.
Comparing product development processes and managing risk
Figure 2 The spiral PDP (see online version for colours)
The spiral process requires managers to evaluate risk early in the project, when costs are still relatively low. Risk in this context entails all four major areas of risk described earlier, including poorly understood requirements and architecture, performance problems, market changes, and potential problems in developing specific technologies. These risks can all threaten a project, but the spiral process helps to screen them early, before major costs are incurred (Boehm, 1988).
A simple spiral process with minimal uncertainty and only one loop would closely resemble a staged process. However, most projects entail uncertainty; companies that evaluate and manage their risks with multiple cross-phase iterations choose a significantly different path. By going through many stages with the full expectation of returning to them later, the spiral process allows a brief glimpse into the future which is not allowed by staged processes. This glimpse yields information from later stages that can be incorporated in early concepts, requirement specifications, and architectures, thus reducing risk. The risk reduction comes at the cost of more flexible product specifications, but this flexibility can be advantageous in dynamic environments. In this way, the spiral process overcomes difficulties presented by unclear initial product requirements, a challenge which is poorly handled by the classic staged process.
The spiral process has several disadvantages. First, it is more sophisticated and complex than other processes, and thus requires more management attention. Second, the lack of rigid specifications can potentially lead to delays in developing complex subsystems. Finally, the spiral process may be overkill for simple projects that could use a simpler waterfall process (Boehm and Bose, 1994).
A key distinguishing feature of the spiral process is the planned, large-scale nature of iterations. Risks are assessed in each iteration, allowing managers to plan an effective approach for the next iteration. Unlike the expected small iterations which occur within individual stages of staged processes, and unlike the large but unplanned and unwanted feedback loops which can occur in less successful staged processes, iterations in the spiral process are planned and span several phases of the development process. Despite this distinction, critics may consider it similar to a staged process if the milestones and deliverables between each spiral round act merely as stage reviews.
Recent literature sources often recognise PDPs as risk management structures but often focus on one process rather than comparative PDPs. For example, Cooper argues persuasively for the effectiveness of the stage gate PDP (Cooper, 2001). Other sources, including those general sources mentioned in the beginning of this section, implicitly endorse this point of view (Pahl and Beitz, 1996; Smith and Reinertsen, 1992). Boehm advocates the use of the spiral process in software development, and is joined by others who denounce the deficiencies of rigid waterfall processes and call for flexible prototyping (Boehm, 1988; Boehm and Bose, 1994; Gilb, 1988; Hekmatpour and Ince, 1988). This stream of PD literature is strengthened by many studies of individual companies’ PD efforts, ranging from software designers to automobile manufacturers (Cusumano and Selby, 1995; Cusumano and Nobeoka, 1998; MacCormack, 2000; Ward et al., 1995).
Finally, some sources begin to compare different PDPs. Krubasik (1998) argues for the need to customise PD, suggesting that “product development is not monochromatic...not all product development is alike. Each situation has a different context...[implying] different managerial actions.” Other authors offer brief and balanced comparisons of different PDPs, but limit the scope to theoretical examples (McConnell, 1996). Finally, some sources use comparative empirical studies to suggest a method of matching PDPs and context, but do not relate these to PD success (MacCormack, 2000; MacCormack et al., 2001).
2.3 PDP variation and problem definition
Staged PDPs facilitate managerial control while spiral PDPs allow more flexibility, and there are many other PDP variations that fall between these extremes. The array of variants includes modified staged processes, evolutionary prototyping and delivery processes, and design-to-schedule or design-to-budget processes. Each of these PDP variants has distinct advantages and disadvantages, but PDP differences are poorly understood and not yet fully acknowledged in existing literature and practice. As a result, companies have difficulty designing or selecting PDPs.
Our research has two goals, both of which help to bridge the knowledge gap in existing literature and industrial decision making. First we seek to identify different PDPs and establish that variety exists. To do so, we define parameters that allow for evenhanded comparisons between PDPs. Second, we demonstrate how different PDPs can address different risks through integrations, iterations, and reviews. Our overall research goal is to help academics and business managers with the difficult task of identifying, comparing, and successfully designing PDPs for risk management.
Comparing product development processes and managing risk
3 Characteristics for specific PDP comparison
This section proposes characteristics by which different PDPs can be defined, compared, and contrasted. Companies try to balance structure and flexibility in their PDPs, but have difficulty measuring degrees of either structure or flexibility. Characterising PDPs requires identifying basic traits that are shared by all processes: all PDPs employ design reviews, which uphold standards and/or mark milestones; and all PDPs include iterations, which incorporate changes and feedback between design groups or project phases. Characterising PDPs also requires tenets that set PDPs apart: although all PDPs use reviews and iterations, the manner of reviews and iterations varies dramatically. They may vary in rigidity, frequency, scope, or several other parameters that affect risk management. Thus, reviews and iterations – incorporating specifications, milestones, integrations, and tests – are advanced as useful characteristics for distinguishing PDPs. These two characteristics are useful metrics for PDP comparison because all PDPs include some combination of iterations and reviews.
3.1 Design/Integrate/Test cycles
We focus our attention on the design-build-test-redesign and the design-analyse-redesign cycles (the hard and soft forms of design iteration, respectively). Given the uncertainties inherent in PD, iteration is inevitable and must be managed effectively. Iteration is technically defined as the repetition of an action or process. This definition can be perceived positively (as in renewal and improvement) or negatively (as in wasteful repetition). Our research defines iterations broadly to include almost any kind of stepwise work that involves correction or feedback between interdependent parts, people, or processes. Integrations and tests are methods of iteration that allow feedback from early versions of products.
Interdependent and complex tasks that require feedback introduce the potential of burdensome and expensive rework if poorly managed. Rework, a combination of feedback and corrective action, is also a type of iteration but is generally wasteful because it is a response to avoidable mistakes. Although rework can be considered a specific and unfortunate type of iteration, iteration is not synonymous with rework. Instead, well-managed design iteration can prevent rework and therefore reduce technical, schedule and budget risks. Other types of iteration, such as presenting a customer with a prototype to gauge consumer demands, can also alleviate market risk. Effective iteration can prevent waste and overcome the uncertainties inherent in interdependent tasks.
Iterations in PD can vary in three main ways. First, they can vary in breadth or scope of iteration. Second, they can vary in the number of inter-phase loops they entail. Finally, iterations can vary in degree of planning. These three parameters are shown in Figure 3, along with the scales we use to measure each one.
The first parameter, the breadth or scope of iteration, is a critical descriptor of a company’s PDP. Breadth can range from narrow to comprehensive. Narrow iteration is within phases, exemplified by several rounds of interdependent detailed design tasks. Comprehensive iteration is across phases, exemplified by processes that cycle not just around a specific stage, but rather over a range of process stages from concept to prototyping.
Figure 3 Three parameters for measuring PDP iterations
The number of iterations can also greatly affect the nature of a PDP and its success in managing risks. Whether a design is considered several times or just once is a major distinguishing feature between processes. Only the cross-phase loops are of importance to this part of the study because intra-phase loops are so common (and often automated in CAD programs) that they can barely be distinguished from one another.
Finally, the degree to which cross-phase iterations are planned also varies. Processes may have unplanned, anticipated, or scheduled iterations. Unplanned iterations occur when mistakes or feedback loops unexpectedly require a step backward, often in the form of rework. Anticipated iterations are iterations that are planned or expected, but that do not have specific schedules and which may not happen at all. For example, a manager who expects several rounds of detailed design on a specific component may be familiar with the design process and expect to succeed on the third try. A fourth try is not out of the question, and a lucky estimation might allow for success on the first try. Here, the iteration is anticipated – it is tacitly expected and the routine is known – but the number and time of iterations is not planned. Finally, scheduled iterations are both anticipated and planned. The number of cross-phase cycles may be planned, may be subject to time and budget constraints, or may be dependent on customer satisfaction and quality assurance.
3.2 Design reviews
Design reviews are critical to product development. Like iterations, they are present but different in all PDPs. Design reviews can be termed gates, checkpoints, approvals, or milestones, but always involve a decision or assessment of progress. Reviews examine the deliverable of previous action and decide whether to continue on to the next step, stage, or series of stages.
Companies developing products handle reviews in different ways. The goal of some reviews is to assess completion, while the role of others is to ensure that there are no technical design problems. Sometimes the reviews are internal and performed by the design groups themselves, while other times reviews are performed by upper management or by peers from other projects. The level of formality of the reviews also varies dramatically.
Figure 4 shows two parameter scales, rigidity and frequency, which we use as metrics to characterise design reviews. Rigidity of review is defined by the degree to which deliverables are held to previously-established criteria. In a rigid review, a project is probed for problems and not allowed to continue until each deliverable meets established criteria. In more flexible situations, projects or designs may conditionally pass reviews, subject to assurances of future change. In the most flexible cases, reviews can be merely a team check-in or a project status report.
Frequency of reviews also affects the character of the PDP. Some companies have reviews at rigid time intervals, thus forcing the completion of activities or integrations on a regular schedule. However, most companies schedule design reviews at the planned completion of certain deliverables. Deliverable-based reviews have the advantage of always having deliverables in existence to judge, but may occur at irregular intervals. Irregular timing can be due to schedule delays, to variation in the amount of time it takes to complete different phases, or to variation in whether the deliverables are the result of either one or several phases. For example, in staged PDPs, reviews occur after each stage. In spiral PDPs, reviews may occur after each spiral, or series of stages.
3.3 Identifying and distinguishing PDPs
We distinguish the variety of PDPs as combinations of iterations and reviews. For example, staged processes entail narrow iterations and rigid reviews after each stage. Conversely, spiral processes employ more comprehensive iterations and flexible reviews after several stages. These measures of iteration and review allow PDPs to be compared more precisely than before. Earlier investigations of PDPs either identified
only one main process or identified a few and distinguished them only with descriptions of their diagram shapes or broad generalisations of their perceived strengths and weaknesses. Here, the characterisations of iterations and reviews become the basis on which all PDPs can be distinguished.
Each iteration/review combination also manages risk differently; no single PDP is suitable for all risk circumstances. A product with many interfaces and interdependencies between hardware and software may face a high degree of technical uncertainty. That technical uncertainty might be best addressed with predictable, early iterations that test the technological feasibility of the concept design and early specifications. In contrast, a product in an immature industry may face entirely different risks if specifications are defined and frozen early. A company in this situation may opt to employ early market tests to make sure that the specifications accurately reflect rapidly changing customer needs.
4 Research method
This section explains the methodology of the company case studies that underlie our research findings. Case study methodology suits the goals of this research for four reasons. First, it provides empirical data to help build theory about the complex and poorly understood relationship between PDPs and risk. Second, it demonstrates the utility of using quantitative iteration and review metrics to characterise PDPs and distinguish them from each other. Third, the resulting understanding of several real PDPs provides counterexamples to conventional wisdom regarding the applicability of certain processes. Finally, case study research is useful for understanding phenomena and building theory, especially in the immature field of PD management. The case study methodology also supports our proposal of new, quantitative characteristics that describe and distinguish different PDPs while comparing them to earlier qualitative process information (Judd et al., 2001). The limitations of case study research were of relatively minor consequence to this research. Case study methodology has difficulty in proving causality because cases demonstrate only their own existence. However, this research does not attempt to prove causality between development risk management and PDP design. Rather, its main goal is to establish the existence and identities of different PDPs and to build grounded theory relating PDP design to effective management (Dougherty, 2002).
4.1 Case study method
The goal of each case study was to gain a rich understanding of the company’s risks and PDPs. The challenges were to identify what type of subjective risks were greatest and to learn of any differences between official company PDPs and the processes that were actually implemented. Meeting those challenges required conducting interviews, administering questionnaires, reviewing public company literature, and studying private company PDP documentation.
In most cases, one company manager served as a lead contact and provided process documents and lists of employees working on specific product development teams. In some cases, the lead contact would also recommend studying certain product lines in
response to the request to examine both ‘new’ and ‘variant’ products. When available, official process documents were always read first. Later, project team members were interviewed or given questionnaires about their PDPs.
Interviews followed the procedures for semi-structured “interview-conversations” described by Blum (1952), Buchanan et al. (1988) and Burgess (1984). Some common PDP questions were asked consistently in all interviews, but in most interviews the latter half was conversational and varied according to the person interviewed. Areas of questioning included both the PDP and the development context. PDP questions dealt with review and iteration characteristics, implementation of the official PDP, and perceived problems and advantages of the PDP. Contextual questions probed the types and timing of prototypes, tests and validations, programme schedules, budgets, and major risks.
Most interviews were one-on-one discussions with employee expectations of anonymity. Anonymity remains important because of the sensitivity of some questions about PDP implementation. In some cases, official PDPs were not followed faithfully or were criticised by interviewees, who were more at ease making admissions or accusations because they were assured that they would not be personally identified. In addition to private interviews, case studies at two companies also included public group discussions of the companies’ PDPs, prompting open and lively debate on the implementation, merits and disadvantages of their development processes. Some companies were investigated with the help of public data in addition to interviews. In these cases, such as Microsoft and Ford, existing literature and previous sources were considered first, followed by data from interviews.
Because the case studies attempt to paint a realistic, ‘as-is’ portrait of the PDPs, they do not simply repeat official company process documentation. What companies say they do is not always what they actually do. The case studies in this section reach beyond formal company descriptions to include individual engineers’ and managers’ assessments of how the PDPs are actually implemented.
5 PDP case studies and analysis of results
This section presents nine of our company PDP case studies. The primary company case studies examine Siemens Westinghouse Power Generation (SWPG), Integrated Development Enterprise (IDE), ITT Industries, Aviation Technology Systems (ATS), Ford Motor Company, United Technologies Corporation (UTC), DeskArtes, and Microsoft. A tenth company was used for method validation.
These case study companies represent several different industries and operating environments. Four of the case study companies produce mostly software. Five of the case study companies produce mostly manufactured goods, although several include important software components in their products. Most case study subjects are large corporations, although three of them are smaller companies with hundreds of employees rather than tens of thousands. Some of the companies provided multiple case studies. ITT Industries and Xerox, for example, have units following different processes. In those cases, two different projects were investigated. ITT was also included because it was anticipated that its role as a defense contractor would lead it to have a uniquely different risk profile from most other companies in this study. Two of the case studies, Ford and
Microsoft, were selected in part because of the availability of public PDP information. This use of public data provides other researchers or reviewers with the means of independently examining source data. It also allows readers who are familiar with these companies’ PDPs – which have been extensively investigated by several other researchers – to compare these research findings to their own knowledge and interpretations. The case study companies can be seen together in Table 1.
5.1 Company and process descriptions
The first case study company, SWPG, is a large engineering and manufacturing company that employs a strict staged PDP to develop turbomachinery for power generation. It faces major technical risk, especially in the areas of quality assurance and thermal efficiency. Market risks are mitigated by early contracts and system of guaranteed liquidated damages, which effectively transfer market risk to technical risk by driving up engineering requirements. Cycle times for this company are slow, with up to several years between the introduction of new products.
The second case study company, IDe, is a small software company that employs an evolutionary delivery PDP to develop its internet-based development management products. It faces major market risk, frequently must customise products to customers’ specifications, and operates with a very fast cycle time of only a few months.
The third case study company, ITT Industries, is a large defense contractor whose products include military electronics. ITT faces technical and schedule risks, but market risk is often limited by the monopsonistic nature of the defense industry. The company uses a staged PDP with ‘progressive freezes’, but applies it differently to different products. Progressive freezes mean that specifications can be set in a piecewise fashion without delaying the entire development programme. Subsequent work can start on those requirements or design aspects that are known to be solidly defined and unlikely to change. ITT’s experimentation with PDPs yielded two different results because one process was used for development of a Global Positioning Satellite (GPS) product while another process was used in developing a military Special Unit Operations (SUO) radio.
The fourth case study company, Xerox, is a large manufacturing and software company that develops copiers and document centres. Its considerable market risk forces a corporate culture of on-time delivery and thus translates to schedule risk among design engineering groups. The company uses a hybrid PDP that employs a staged process to develop the electro-mechanical systems and a spiral process for the software systems.
The fifth case study, a collaboration of DeskArtes and Arabia, investigates small software and manufacturing companies that use ray-tracing CAD software developed by DeskArtes to design ceramic tableware manufactured by Arabia. The major risks include market risk inherent in visual product aesthetics and industrial design. Use of the software allows an evolutionary prototyping PDP and extensive customer testing of electronic prototypes.
### Table 1
Comparative summary of findings
<table>
<thead>
<tr>
<th>Company</th>
<th>SWPG</th>
<th>IDc</th>
<th>ITT</th>
<th>Xerox</th>
<th>ATS</th>
<th>Markem</th>
<th>UTC</th>
<th>Ford</th>
<th>DeskArtes/Arabia</th>
<th>Microsoft</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>Iteration</td>
<td>Planning</td>
<td>Review</td>
<td>Frequency</td>
<td>Risk profile</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Tech risks dominate (i.e., heat rate). Mild risk muted by contract structure</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Major risk is market -- new company is highly customer sensitive</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Major risks are tech and schedule, depending on product. Military contracting limits market risk</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Market risk translates to schedule concerns</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Major risks are tech, primarily quality assurance</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Market on one project, tech on the other from late integration</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Major risks are tech -- FAA regulations and quality requirements</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Market risk greatest, but complex tech and budget risk also high</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>Market risk dominate -- customer aesthetics</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td>All risks muted by market dominance; features driven by mkt. risk, tech risk dominates</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
*Table continues...*
The sixth and seventh case studies, ATS and Microsoft, both examine companies that use spiral PDPs to develop software. Both faced primarily technical risk because industry dynamics subdued market risk for both companies. The final case studies, UTC and Ford, both analyse large manufacturing companies which employ staged PDPs despite facing considerably different risks. Ford is more concerned with market risk and meeting disparate customer needs, while UTC is more concerned with technical risks stemming from quality assurance.
5.2 Comparative case study findings
Qualitative comparison between individual case studies reaffirms the difficulty companies have in designing PDPs. The quantitative data applies the parameters proposed in Section 3 to actual company processes. The resulting view demonstrates the existence of considerable variety among company processes. The PDP distinctions also suggest that PDPs address risk and integration differently, a principal finding that will be used later in the development of a PDP design method.
Qualitatively, the case studies reveal management difficulty in designing and implementing PDPs. The cases demonstrate various reasons and inconsistent methods for choosing PDPs. The cases also display frequent discrepancies between companies’ written and implemented processes.
There appears to be no consistent method by which companies design or select their PDPs. Although the case studies did not examine the underlying philosophy of management decisions that led to PDP definitions, several disparate paths were evident. First, some companies changed their PDPs due to organisational shifts. For example, SWPG formalised and added rigidity to its process after a corporate acquisition and merger. Second, some companies redesigned their processes when leading individuals perceived and wanted to address specific problems. For example, IDe progressed slowly from a loosely-designed and flexible process to a more rigid evolutionary development decision as its four lead managers determined that the company’s rapidly-growing workforce required more order. Similarly, the Xerox process was reformed by the company’s chief engineer in part to overcome persistent PD delays. Finally, some companies had their own idiosyncratic reasons for PDP designs. Several of these companies hired consultants to help them design or redesign their PD efforts; one of them specifically adopted a process ‘as a management fad’ that was promoted by a consultant. On the other hand, Microsoft modelled its PDP after the culture of its developers by retaining ‘hacker’ traits of frequent changes in development code. In summary, some companies carefully consider the PDPs they implement, but others employ PDPs with little regard for the suitability of those processes to company-specific risks or challenges. Companies seem to have based their PDP decisions on many different factors, including their disparate risk scenarios, but none had an analytical process to follow.
We observed that companies also have difficulty implementing the official processes they design. The case studies investigate and probe actual, implemented PDPs because they frequently differ from companies’ written processes. One of the few commonalities among all case studies is that every one reveals discrepancies between written and implemented processes. Sometimes, those differences are due to informal improvements to the written PDP, such as when ITT allows programme managers to omit process sections that they deem extraneous. Other times, differences between
written and actual PDPs are harmful and the result of poor implementation of good ideas. These discrepancies must be noted in order to gain accurate understanding of companies’ PDPs.
Quantitatively, the case studies demonstrate the utility of the proposed metrics and display differences among multiple, distinct processes. Section 3 reasoned that all companies use iterations and reviews, and these findings confirm that this is true for each of the case study companies. No two columns of defining characteristics are identical in Table 1, suggesting that the corresponding PDPs are also different. As described in Section 3.3, each column represents a PDP’s ‘signature’ and identifies a different PDP.
Figure 5 plots company process data by composite review flexibility and iteration. High iteration and review values indicate a process favouring flexibility while low iteration and review values indicate a process favouring control. Companies are plotted individually, except for Xerox and ITT, which are represented by two points each due to their internal divisions. In the figure, manufacturing companies tend to be positioned in the lower left of the chart because they employ more rigid reviews and have fewer cross-phase iterations. Arabia and ITT SUO are notable exceptions, and demonstrate the use of flexible processes in the development of manufactured products. Software companies tend to be positioned in the upper right of the chart, while companies with mixed manufacturing and software components fall in the middle. However, the considerable scatter in the plot that suggests a key finding: although software developers are more likely than manufacturers to favour flexibility in their PDPs, the software versus manufacturing distinction alone does not predict PDP flexibility.
Figure 5 Overall PDP flexibility by iteration and review (see online version for colours)
5.3 Case study analysis
Our case studies suggest that a company’s ability to integrate and test products can be a critical descriptor and determinant of PDPs. Our analysis moves beyond the useful but imperfect software/manufacturing distinctions of Figure 5 to better understand how companies can operate more widely across the spectrum of PDPs. Product integration often includes an early model, test, simulation, or prototype involving interdependent components or modules. A key question for companies is whether the value of the information they gain from such a test or feedback loop is worth the cost and time of the integration. Some integrations, such as prototypes of complex mechanical products, may require too much time or be too costly to afford multiple tests. One example of integration difficulty can be seen in the SWPG case study, where the company often sells its first ‘prototype’ to a customer willing to take a risk on buying an untested product in exchange for a reduced price or service guarantees. SWPG of course also uses computer simulations to model overall systems, but the fidelity and quality of these simulations are not as good as the first actual prototype. Other companies, such as DeskArtes/Arabia, Microsoft and ATS, can test their products more easily because integrations of their software products require no physical construction or major production expense. Their simulations are not merely models of reality; they are actual parts of the code that later become the final product.
The importance of integration differences among companies can be seen in Figure 6, which offers additional insight into why companies use certain PDPs. Figure 6 resolves the case studies into clear groups or clusters by categorising the companies by the ease with which they can integrate and test prototypes as part of the PDP. The ease of integration categorisation reflects our understanding of the feasibility, cost, and time required for integrations relative to the value of information gained from such tests. Some companies call these integrations ‘prototypes’, while others call them ‘builds’ or ‘stubs’, but they all represent a form of iteration and attempted risk reduction. Such integrations not only address certain risks by providing information feedback from prototypes, but also represent risks of their own by potentially costing a company time, funds and effort. Thus, these integrations become particularly useful lenses by which to view and categorise companies. Figure 6 shows distinct clusters of companies in the two corners of the chart, each grouped with other companies in the same category. This suggests that ease of integration is a powerful determinant of the process type that companies may employ.
These comparative charts of Figures 5 and 6 suggest that many PDPs in commercial practice cluster towards two corners. Exceptions tend to occupy the lower right hand quadrant, suggesting that companies are more flexible with their iterations than their reviews. It may be possible in the future to find PDPs that would occupy the upper left corner of the charts, but such are likely rare because of the difficulty of maintaining rigid iterations while simultaneously loosening reviews.
Comparing product development processes and managing risk
5.4 Additional research findings
The case studies and application of PDP metrics have already demonstrated the existence of multiple PDP variants. Our research also suggests that risk and integration characteristics are useful indicators of which process can be applied most effectively. The ensuing section points out two additional, but related, findings. First, the proposed metrics are a useful means of comparing processes. Second, the relationship between PD cycle times and design flexibility is counterintuitive.
The proposed iteration and review parameters are found to be useful metrics for several reasons. First, they fill a gap in PDP practice in literature. The metrics are necessary because previous literature provided no equitable way of comparing or contrasting PDPs on a common scale. Indeed, prior literature that attempted to compare PDPs did so based on either diagram shape or subjective advantages and disadvantages. These proved to be difficult criteria by which to compare the initial case studies. These iteration and review metrics provided a much-needed common language in which different PDPs could be identified. Every PDP encountered could be described in terms of review and iteration metrics. Once described quantitatively, the PDPs could be uniquely identified, compared, and contrasted. Finally, these metrics have shown to be both understood and welcomed by practitioners, who valued metrics as a way to better understand their own processes. The metrics are easy to communicate and access: managers are frequently able to describe major iterations and engineers are intimately familiar with the character of design and development reviews. Together, the conceptual ease of communication and general applicability of the metrics made them useful.
The data also suggest that PD cycle time can be a misleading indicator of PDP choice. One might expect that companies with long cycle times would be particularly attuned to market risk because market needs can change over the duration of PD. Thus, companies with long cycle times would emphasise prototyping, customer involvement, and cross-phase iterations. Conversely, one would expect that companies with short cycle times, software companies for example, could afford to avoid such market feedback efforts because customer testing would take valuable time and any potential improvements could be included in the next product version, usually already in the pipeline.
Such assumptions about PDP choice would be misleading. Although most companies face the common difficulty of writing specifications, companies in fast-paced markets tend to favour flexible processes, such as the spiral process or evolutionary delivery process, that incorporate frequent customer interaction or testing. This preference may be because the benefits of market feedback outweigh the costs of prototyping and testing the product. Meanwhile, manufacturing companies that release products less frequently tend to use fewer planned, cross-phase iterations and therefore build fewer integrated prototypes. This occurs because of product complexity, steep prototyping costs, and the long lead times necessary to build physical models. The counterintuitive result of this mismatch is that companies with the greatest need for market flexibility are sometimes the least likely to generate customer feedback during a PD cycle. Companies that are less sensitive to market changes because of short cycle times nevertheless frequently incorporate market feedback.
6 Implications and conclusions
Product development is a necessary risk for innovative companies. Although it holds the promise of increased sales, market share and profits, PD can fail due to technical difficulties, cost overruns, and missed market opportunities. PDPs must therefore not only focus on the final outcome – a new product – but also on mitigating the many development risks. We exhibit and explain PDPs as risk management structures. In exploring the relationships between risk management and PDP design, we make two key contributions.
First, we analyse several PDPs both theoretically and empirically to demonstrate how PDPs substantively differ from each other. We build upon previous literature that either does not adequately distinguish between different processes or makes comparisons based on subjective criteria. We also contribute to the field by proposing and supporting new metrics with which PDPs can be identified and compared. The metrics are based on design reviews and iterations, which are characteristics of all PDPs.
Second, we describe how various PDPs manage different development risks. PDPs with planned iterations and integrations can generate valuable data that are fed back to early process stages and reduce risk. Software companies tend to favour such processes, but the root cause of differences in PDP applicability lies not in whether a product is manufactured with parts or written with code, but rather in a company’s ability to integrate or prototype effectively.
Comparing product development processes and managing risk
References
|
{"Source-Url": "https://dspace.mit.edu/bitstream/handle/1721.1/67011/Eppinger-2009-Comparing%20product%20development.pdf?sequence=2&isAllowed=y", "len_cl100k_base": 10290, "olmocr-version": "0.1.53", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 52461, "total-output-tokens": 12564, "length": "2e13", "weborganizer": {"__label__adult": 0.0005216598510742188, "__label__art_design": 0.0008993148803710938, "__label__crime_law": 0.00043702125549316406, "__label__education_jobs": 0.0089874267578125, "__label__entertainment": 0.00010985136032104492, "__label__fashion_beauty": 0.0002810955047607422, "__label__finance_business": 0.00689697265625, "__label__food_dining": 0.0004949569702148438, "__label__games": 0.00090789794921875, "__label__hardware": 0.0012216567993164062, "__label__health": 0.0006580352783203125, "__label__history": 0.00044846534729003906, "__label__home_hobbies": 0.00017821788787841797, "__label__industrial": 0.0012617111206054688, "__label__literature": 0.0006995201110839844, "__label__politics": 0.00041794776916503906, "__label__religion": 0.0004935264587402344, "__label__science_tech": 0.032867431640625, "__label__social_life": 0.00014138221740722656, "__label__software": 0.00754547119140625, "__label__software_dev": 0.93310546875, "__label__sports_fitness": 0.00036978721618652344, "__label__transportation": 0.0008759498596191406, "__label__travel": 0.0002340078353881836}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58300, 0.02928]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58300, 0.26099]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58300, 0.93712]], "google_gemma-3-12b-it_contains_pii": [[0, 867, false], [867, 2983, null], [2983, 5832, null], [5832, 9181, null], [9181, 11321, null], [11321, 14957, null], [14957, 16891, null], [16891, 20315, null], [20315, 23792, null], [23792, 25727, null], [25727, 27979, null], [27979, 31189, null], [31189, 34638, null], [34638, 37775, null], [37775, 39746, null], [39746, 43329, null], [43329, 45221, null], [45221, 48464, null], [48464, 50309, null], [50309, 53562, null], [53562, 56825, null], [56825, 58300, null]], "google_gemma-3-12b-it_is_public_document": [[0, 867, true], [867, 2983, null], [2983, 5832, null], [5832, 9181, null], [9181, 11321, null], [11321, 14957, null], [14957, 16891, null], [16891, 20315, null], [20315, 23792, null], [23792, 25727, null], [25727, 27979, null], [27979, 31189, null], [31189, 34638, null], [34638, 37775, null], [37775, 39746, null], [39746, 43329, null], [43329, 45221, null], [45221, 48464, null], [48464, 50309, null], [50309, 53562, null], [53562, 56825, null], [56825, 58300, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58300, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58300, null]], "pdf_page_numbers": [[0, 867, 1], [867, 2983, 2], [2983, 5832, 3], [5832, 9181, 4], [9181, 11321, 5], [11321, 14957, 6], [14957, 16891, 7], [16891, 20315, 8], [20315, 23792, 9], [23792, 25727, 10], [25727, 27979, 11], [27979, 31189, 12], [31189, 34638, 13], [34638, 37775, 14], [37775, 39746, 15], [39746, 43329, 16], [43329, 45221, 17], [45221, 48464, 18], [48464, 50309, 19], [50309, 53562, 20], [53562, 56825, 21], [56825, 58300, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58300, 0.06915]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
7e6600404648c4fdad7c76df645c5d6f99dcd3e7
|
ABSTRACT
We present Raceboat, a novel framework for developing and managing censorship circumvention channels. The Raceboat framework simplifies using signaling channels for low-bandwidth and/or latency-tolerant tasks like bridge distribution and authentication. We further develop a novel decomposition of application tunneling circumvention channels that is well suited to signaling channel usage. This decomposition enables modular components that are reusable across varied channels. We demonstrate the flexibility and extensibility of Raceboat for signaling uses by mixing-and-matching seven different channels.
KEYWORDS
Censorship, networks, privacy, application tunneling
1 INTRODUCTION
Censorship of user access to the internet is growing increasingly sophisticated and common across the globe. In response, many approaches to circumventing this network censorship have been developed. To combat network-level censorship (blocking at the level of individual network connections) a number of censorship circumvention channels have been developed over the past years to facilitate users reaching applications from behind their censors’ firewalls. The canonical use-case is enabling browsing censored websites, but enabling instant messaging, video streaming, and other arbitrary internet-connected applications is also prevalent. To serve this use case, most of these circumvention channels emphasize low latency and high bandwidth, and so often rely on a direct IP connection to a circumvention channel proxy server or, in Tor parlance, a bridge (we will use bridge throughout this paper to avoid ambiguity, although we do not assume the server necessarily connects the user to the Tor network). This creates a second-order problem: censors can enumerate bridges based on a variety of techniques and then block any connections to those IPs. Creating new bridges or changing their IPs is easy, but the problem lies in how to communicate these new bridge-addresses to users that have no connectivity to the uncensored internet (because their bridges were blocked).
This is known as the bridge distribution problem [34] or, more generally, a rendezvous problem (illustrated in Fig. 1): users need a circumvention channel that remains available even when the user and the adversary have all the same information (i.e. no secrecy is required to prevent blocking). We refer to channels that can serve this purpose as signaling channels. A number of channels have been developed for this purpose; the most prominently deployed is Domain Fronting [14] but prototype systems like Raven and SWEET that use email [37], CloudTransport [4] that uses cloud-storage, and even MoneyMorph that uses cryptocurrencies [24], have been demonstrated.
A necessary aspect of signaling channels is to remain available when the censor is employing IP-blocking; therefore these channels rely on the client-side of the channel (the user’s device) making IP connections to a third-party server that services some non-circumvention use-case the censor is reticent to block. We refer to such channels as indirect since they avoid direct IP connections between channel endpoints. Indirectness implies the channel must make a legitimate connection on the protocol served by the intermediary server and the messages are transmitted as content.
within that protocol. Therefore, all signaling channels of this sort fit within the application protocol tunneling [19] approach: e.g. Domain Fronting tunnels within the TLS protocol; Raven within email; CloudTransport within cloud-stored documents; and MoneyMorph within cryptocurrency transactions. In each case the application protocol is unaltered but the content conveyed by the protocol is an encoded version of the signaling channel’s messages, indiscernible to the censor either because of application-layer encryption preventing inspection or through the use of steganography.
Development of signaling channels has often been ad hoc; e.g. TorBrowser’s Moat functionality uses meek [14] (domain fronting) to fetch bridge addresses and is packaged as a Pluggable Transport [25]. However Snowflake, another Pluggable Transport that uses short-lived proxies, also needs rendezvous functionality and thus re-packages its own implementation of domain fronting within its codebase. The alternative signaling channels listed above are each implemented as standalone prototypes and, at best, adopt the Pluggable Transports (PT) specification as their application interface. Even the use of the PT interface is a hindrance because it is designed for continuous socket-like connections (and, in its widely adopted original interface, is explicitly supposed to expose a SOCKS-proxy interface). In contrast, signaling channel tasks are often much lighter-weight - e.g. sending a single client request and receiving a single response. Additionally, there is no reason why the same channel needs to be used for both directions of communication (indeed some signaling use cases may only need a unidirectional push of information). However, existing solutions always seek to function as standalone channels providing bidirectional connectivity even when this does not suit the channel.
In the case of application protocol tunneling signaling channels, development is also hampered by an ad hoc one-off approach. Looking across many such systems we observe that there are marked similarities in internal functionality that hint at opportunities for abstraction, modularization, and re-use.
Fig. 2 illustrates the end-to-end usage of the Raceboat system and highlights our contributions: we formalize the properties and functionalities of signaling channels and then design and implement the Raceboat framework\(^1\) to provide a more flexible interface for signaling channels that supports a variety of use cases, including seamless mixing-and-matching of unidirectional channels. Additionally, we formalize a decomposition of application protocol tunneling channels into components and design and implement the decomposed application tunneling framework for dynamically assembling these components to synthesize functional channels. These contributions can both drastically increase re-use of research products and decrease developer effort when extending the existing capabilities of channels.
Our contributions in this paper are as follows:
- Formalization of signaling channel functionalities
- Implementation of a generic multi-channel interface for multiple signaling use cases
- Formal decomposition of application tunneling channels into modular components
- Implementation of a framework for runtime construction and use of application tunneling channels based on modular components
- Implementations of exemplar components providing varying signaling functionality over email, AWS S3, and redis services
2 BACKGROUND
In this section we provide background on the censorship circumvention layers Raceboat innovates in: the bridge distribution problem; the generalized type of circumvention channels needed; and application tunneling.
2.1 Bridge Distribution and Signaling
Most circumvention channels are developed with an assumption that some secret information can be exchanged out-of-band and assumed unknown by the censor. These “shared secrets” can vary in number and nature but, for almost any channel that uses a direct network connection between endpoints, it includes the IP address of the bridge. If the censor knows this IP then they can apply an
\(^{1}\)https://github.com/tst-race/raceboat-pets2024
IP-based block of all connections to it. This is indeed the way most circumvention communications are blocked - by enumerating and blocking bridges by their IP address [12].
Bridge servers can rotate to new IP addresses, but this creates the problem that they must share this new secret to their users. This forms the lowest-layer of the bridge distribution problem: how to get users a bridge address when the reason they need the bridge is because their internet access is censored. Further concerns exist around how to decide to allocate bridge addresses [8, 30], but these are out of the scope of Raceboat’s contribution.
Existing methods for bootstrapping bridge information include a mix of “out-of-band” methods and domain fronting. Examples of the former include Tor-run email and telegram accounts which users can message specific “bridge request” messages to, receive responses back, and copy these responses into their Tor bridge settings [11]. These require many specific user interactions: getting the information about where to send a message and the exact content of that message; manually sending the message; and manually copying data back from the response to their circumvention application. Domain fronting approaches, specifically meek [14] for the TorBrowser, offer more automation: they automatically run the meek pluggable transport to a static domain-fronted URL and then employ a CAPTCHA service to try to prevent censors from obtaining all the bridge addresses. In this case, the request and response are automatically handled by the user clicking a button. Overall, bridge distribution is provided by a very small number of channels and relies entirely on domain fronting for the most usable one.
Other circumvention apps and services similarly rely on domain fronting for these types of tasks. We generalize the bridge distribution problem to the problem of signaling: trying to communicate “control plane” types of information with circumvention app clients when they may have no user-level connection to the uncensored internet. Examples of other kinds of signaling could be communicating information about bridges being blocked, or trying to perform authentication of some kind. In general, we assume these communications are smaller, more latency tolerant, and less frequent, than the user-driven circumvention channel uses like messaging, browsing the web, or streaming content.
2.2 Signaling Channels
We target this signaling use case to be fulfilled by signaling channels. We consider these to be a subset of circumvention channels that have greater leeway for high latency, low bandwidth, and/or sparse use. In turn, we require greater degrees of blocking resilience. Specifically, we require two related properties: first, we require indirect channels, meaning endpoints connect to one or more neutral intermediate hosts and not directly to one another at the IP-layer; second, we require public addressability, meaning there is no information that must be kept secret from the censor to initiate the connection. Indirectness is actually a requirement produced by public addressability if the censor is assumed to be able and willing to block the IP of any signaling server it learns about. Public addressability is necessary because it allows for indiscriminate distribution of the signaling server address without compromising its reachability.
Various systems meeting this signaling channel definition exist: meek and other domain fronting channels achieve this despite not satisfying indirectness; refraction routing approaches use routing-level indirection [16]; various channels use email [20, 32, 37]; some use cloud storage services [4]; some use video streaming platforms [23]. A common factor to signaling channel unblockability requirements is a reliance on some existing service that the censor has either not thought to block or finds too costly to block. E.g. hiding with an international communication application used by large segments of the population, or hiding within hosting or routing infrastructure shared by many important internet services. This leaves the functioning of any given channel ultimately up to the censor deciding blocking the channel is worthwhile after all (or convincing the utilized service to take action on the censor’s behalf). Individual signaling channels, then, are inherently tenuous and it is therefore valuable to have more of them to reduce the impact to circumvention (and thus also reduce the value to the censor) of blocking any individual channel.
2.3 Application Tunneling
Application tunneling refers to a circumvention channel that functions by running a legitimate application and tunneling covert messages through its existing network connections in some manner. This can and has been done in many different and application-specific ways [2, 3, 15, 17, 18, 21, 23, 26–28, 31, 32, 37]. However, to satisfy our signaling channel properties we are particularly interested in indirect application tunnels: the covert data is actually tunneled not just from the application client to a server, but through that server and on to another instance of the application client, while assuming no modification to the intermediary server(s).
This distinction is important because it requires the content to be fully valid application content that is properly processed by a server. E.g. this excludes approaches like balboa [26] or rook [31] that inject and extract covert data below the application layer, as well as those that rely on directly peer-to-peer applications like FreeWave [18]. We observe that these content-based channels largely perform four operations:
• Control running an application
• Encode and decode (often steganographically) covert messages into/out-of valid application content
• Inject encoded content into the application input interface
• Address content to enable the other side to receive it
We make use of this abstraction in Section 4 to decompose application tunneling channels into modularized interoperable components to decrease development time and multiply the impact of novel research developments.
2.4 Domain Fronting and Refraction Routing
Domain fronting and refraction routing both function by "tunneling" a connection at or below the IP-layer. These are both valid methods for obtaining the unblockable public addressability requirements stated above. However, they also both rely on at least tacit cooperation from the relevant infrastructure providers (hosting and internet service providers, respectively). Therefore, we seek to improve the development of additional signaling channels.
3 RACEBOAT CHANNEL MANAGER
This section describes the design and implementation of the channel management layer of Raceboat, which handles using multiple channels simultaneously.
3.1 Definitions and Concepts
First, we define a few higher-level concepts used to abstract signaling channels and design Raceboat (but applicable to communications more broadly):
3.1.1 Channel. We use the term channel to refer to an abstract implementation of communications such that two or more instances of the channel can instantiate a link between them and communicate (potentially only unidirectionally).
3.1.2 Creators, Loaders, Addresses, and Directionality. We define two new terms, loader and creator, to capture the roles in establishing a link. These terms avoid the ambiguity of the overloaded terms client and server, and establish semantics that information is only ever needed to be transferred from one side (the creator side) to the other (the loader side). This required information is called a link-address and is generated by the creator and consumed by the loader. This is analogous to a traditional socket connection: the creator is the server-side that binds the socket, the link-address is the <IP, port, protocol> tuple, and the loader is the client-side which connects to the socket. The channel is publicly addressable if this link-address can be known by the censor without harm; in this traditional socket analogy, the channel is not publicly-addressed because if the censor knew the link-address they could block access via IP/port-blocking.
The second concept is the directionality of links relative to the creator and loader. That is, is the link bidirectional, or unidirectional from creator to loader or from loader to creator. These seemingly trivial semantics actually have significant implications on how channels can be used in practice when out-of-band communication is limited and public vs. secret information is critical to viability.
We define "Out-of-band" message
Creator-to-Loader
Loader-to-Creator
Bidirectional
Figure 4: The semantics of link creation: a creator called to instantiate a new link creates new Link-Address that is shared via some pre-existing method to the loader; the loader uses just this Link-Address as a source of shared secrets for completing the link. The directionality of the new link is based on the directionality of the channel.
3.2 Bridge Request/Response Use Case
We examine how these semantics impact signaling channels in the common use-case of bridge distribution: a client within the censor’s sphere of control, with a copy of publicly available software and no shared secret information, needs to successfully send a request to a broker and get a response containing a bridge-address back. The lack of shared secrets immediately imposes several conditions:
The client cannot use a direct IP connection to the broker because the censor would know this IP and block connections to it. Using an indirect connection implies an application tunneling approach because there must be (one or more) 3rd-party servers facilitating the client-to-broker connection. The request and response need not traverse the same channel. The broker-to-client connection can use a shared secret, so long as the client creates it.
Current approaches take this scenario and a novel application tunnel and develop a bespoke set of messages over that tunnel in both directions to provide a publicly-addressable link. On occasion, some systems [33] have explicitly designed asymmetric hybrid connections that employ different uplink/downlink channels but these are always rigidly built to support a specific pair of channels, effectively just creating a doubly-complex channel.
In contrast, Raceboat abstracts over arbitrary signaling channels implementing the creator/loader semantics above. We are able to state exactly the channel primitives sufficient to handle the use-case - which are not restricted to a publicly-addressable bidirectional channel (that is sufficient, but not necessary). Instead, we require a loader-to-creator publicly-addressed channel for the client-to-broker request and either a publicly-addressed creator-to-loader or just a loader-to-creator channel for the response. In the latter case, we can piggyback on the client request to also pass a link-address for the broker to respond on (semantically a “reply-to-address”). Note that this still collapses to a single publicly-addressed bidirectional channel if that use of such a channel is desired, and Raceboat makes no requirement that multiple channels be used.
This may initially seem like unnecessary additional theory, and all that is really useful is a common interface for using one-of-a-suite of signaling channels. However, there are often practical considerations of usability and scalability that can make some channels infeasible to use in a bidirectional manner.
### 3.3 Communication Modes
Raceboat supports more than just the single-request single-response "protocol" above. There are actually four modes that drive use of channels:
1. **Unidirectional Push**
2. **Request-Response**
3. **Socket**
4. **Bootstrapping Socket**
(1) Unidirectional Push is just a one-sided version of (2) Request-Response, described in more detail above. This again may seem a trivial use-case, but explicitly building support for it means Raceboat avoids performing any unnecessary (and potentially more detectable or resource-wasting) handshakes as e.g. a SOCKS-wrapped implementation of a channel would. Similarly the request-response mode explicitly avoids more than a single round-trip of messages. The (3) Socket mode is provided for completeness in cases where a continuous connection is desired. Finally, the (4) Bootstrapping Socket is a specialized case in which Raceboat bootstraps a socket-like connection with shared secrets from a publicly-addressed initial channel, illustrated in Figure 5. This is equivalent to embedding both the bridge request and subsequent bridge connection into a single application-level step and could be used when larger amounts of data needs to be conveyed in either direction.
### 3.4 Implementation
We implemented Raceboat with a simple CLI-based application layer to perform any of its four communication modes (see above) and take a configuration bundle of channel names and special parameters (e.g. account credentials required for account-based services, etc.) as either CLI-arguments or a manifest file. Raceboat can also be directly included as a C++ library. Internally, Raceboat implements a plugin-based architecture where each plugin provides one or more channels (or components of decomposed channels, see Section 4.2 below). These plugins are dynamically loaded at runtime to support flexible deployment scenarios and minimize difficulty updating.
Note, for the purposes of mobile use, plugins are not run as separate processes but are all run within the Raceboat process (or a parent process, if Raceboat itself is included as a library).
Plugins implement a straightforward asynchronous API covering activation/deactivation of a channel, creating/loading/destroying links and connections, sending/receiving packages, and callbacks to update the status of the channel, links, or packages. This API is slightly more complex than the Pluggable Transports API, but also allows support for more nuanced use of the channels and better handling of error conditions.
#### 3.4.1 Cross-Language Bindings
While the Raceboat framework is implemented in C++, we have built language bindings for Python, Java, Rust, and Go to seamlessly support running plugins in those languages.
The cross-language implementation varies depending on the language: Python and Go are handled via SWIG auto-generated bindings [29], Java and Rust bindings are both explicitly built as bidirectional translation layers. This is more time consuming to develop, but also provides greater transparency over SWIG’s auto-generated translation layers.
#### 3.4.2 Communication Modes
Raceboat provides several distinct communication modes to support different use cases and implements these as separate protocol state machines for easier extensibility. Each state machine manages the asynchronous use of the channels involved in the connection. We will now walk through the state machine for the complex bootstrapping-socket case (see Fig 5): first it activates each channel involved in the protocol (up to three in this case); then links are created or loaded as appropriate for their role in the protocol (sending and/or receiving) and directionality (see Fig 4). For links that are created, their link-addresses are extracted and concatenated with User App messages: this enables bootstrapping a new link in-band of an existing one. Finally, User App messages are batched into a minimum number of channel sends based on a reported maximum-transmission-unit (MTU) for the channel being used.
The protocol transparently multiplexes Raceboat control messages (namely, link-addresses of new links) with User App messages to minimize channel usage. Since some links cannot exist until after the connection starts (e.g. in-band bootstrapped links) the protocol state machines encompass the entire connection, not just an...
We observe that application tunneling protocols broadly function. We observe that application tunneling channels have a common set for what application actions are executed. Conceptually this is targeted gap in security because it causes the cover application usage to be shaped by the user (whether a human or a circumvention applica- tion). Breaking this linkage provides behavioral independence and enables evaluating the security of a channel independent of its eventual communications usage.
There are likely many ways to modularize application tunneling channels - our design divides channels into three components and aims to enable swapability of components without impacting the ability to express novel application tunneling techniques or domains. Additionally, we sought to separate the concerns of the component developers to allow experts in one area to contribute novel components while using existing versions of other components. We observe that application tunneling protocols broadly function as: managing use of an application; encoding/decoding messages into/out-of application content; interfacing with the application to send and receive content. We will use a running example of an application tunneling channel based on sending emails containing steganographic image attachments throughout the following descriptions of our design.
4.1 Components
There are likely many ways to modularize application tunneling channels - our design divides channels into three components and aims to enable swapability of components without impacting the ability to express novel application tunneling techniques or domains. Additionally, we sought to separate the concerns of the component developers to allow experts in one area to contribute novel components while using existing versions of other components. We observe that application tunneling protocols broadly function as: managing use of an application; encoding/decoding messages into/out-of application content; interfacing with the application to send and receive content. We will use a running example of an application tunneling channel based on sending emails containing steganographic image attachments throughout the following descriptions of our design.
4.1.1 User Models. The user model component controls when and what application actions are executed. Conceptually this is targeted at user-level behaviors - e.g. when an email is sent, how large it is, whether it has an attachment or not. Many existing application tunneling channels do not include any functionality of this sort. However, as recent research shows, this can leave a significant gap in security because it causes the cover application usage to be shaped by the user (whether a human or a circumvention application). Breaking this linkage provides behavioral independence and enables evaluating the security of a channel independent of its eventual communications usage.
User models represent application usage as a timeline of actions. These actions correspond to implementations provided by the transport component and can contain parameters about the actions. E.g. our email user model specifies a timeline of send-email actions, the size of the body text, and whether there should be an attachment or not. This is fairly simple, but more complex behaviors are expressible in the user model framework: in particular, the set of action types is only restricted by what the transport component implements.
In some use cases, true behavioral independence is unnecessary and introduces significant performance loss. E.g. an email channel operating in a permissive network environment may only need to avoid exceeding some sending limit rather than make its messages adhere to a rich model of real user behavior. For these cases we provide an optional “on-demand” API that informs the user model of new data to send and allows it to provide a set of “supplemental” actions to facilitate sending. Even in this case the user model may still add nuance to its response, e.g. providing delayed sending actions or sometimes responding with supplemental actions and sometimes sticking to its original timeline.
In addition to actions, there are events that are produced by the transport and consumed by the user model. This feedback path enables reactive user models that could, e.g., update the action timeline to include a new send action for a fraction of new emails received (to mimic a user sending more replies when they receive more emails). Again the space of events is only constrained by the implementation choices of the user model and transport. This means an arbitrarily complex user model can be expressed, such as the very-reactive OUStralopithicus model.
Despite requiring the user model and transport agree on the types of actions and events there is still value in separating them: it enables independent development, distribution, and use of new incompatible versions: e.g. once a single email transport and user model are developed, new user models can be built and deployed with no interaction with the email transport code or developer knowledge. Imposing a formal separation also encourages modular code that is more easily built upon: e.g. adapting an approach taken in one user model is easier when its code is not deeply intertwined with other functionalities.
4.1.2 Transports. The transport component handles all interactions with external entities; e.g. the email transport includes a library which functions as a client for 3rd-party email servers and handles actually sending and receiving emails. As described above, the transport implements translating user model actions into interactions with the cover application and detecting and sending events back to the user model.
Transports can define their actions and events arbitrarily, but to maximize utility we encourage transports of similar types to share actions and events as much as possible. E.g. a Twitter transport and a Mastodon transport will need independent implementations to deal with different application APIs, but ideally they can provide identical action and event types to enable user model interoperability. We could have pushed the decomposed design further and explicitly built multiple types of transports with different interfaces to enforce this pattern. However, that could restrict the expression of new transports.
The transport is also responsible for handling “addressing.” Conceptually, this is how a message sent by one transport instance is able to be (efficiently) received by a counterpart instance and it requires implementing the semantics of link-addresses described above (Section 3.1.2). E.g. the email transport straightforwardly implements this by simply sending emails to specific email addresses. However, something like publicly posting to a series of pseudorandomly rotating hashtags (and the receiver polling the same) can make this logic more complex. Theoretically the addressing functionality and the application interaction could be decomposed into separate components. However, addressing is often deeply intertwined to application-specific implementations, even among similar applications: e.g. Twitter supports searching for an intersection of multiple hashtags while Mastodon only supports searching for single hashtags, so a hashtag-based addressing scheme would need be different as well.
Execution of actions is how messages are ultimately sent and received. However, since all data is being sent through an application tunnel, the actual messages must be encoded into valid application content before being transmitted. Thus, in addition to consuming actions from the user model, the transport also consumes content from the encoding (see below). The transport provides a specification about what content (if any) is suitable to include in the execution of an action and the encoding provides content that satisfies this specification. E.g. the email transport receiving a send-email action for a message with an attachment would, in turn, request a body of natural language text and an image from the encodings to support execution of the action.
4.1.3 Encodings. The encoding component is the most straightforward: it encodes messages into application content according to a specification or, in reverse, decodes messages from application content. As with the user model and transport, the encoding and transport can operate over any agreed set of content. However, the intention is to support generic types of content used across many applications: i.e. common image and video formats, natural language text, and base64 strings. The transport can specify more complex parameters for content, such as the size of an image, or the length of a body of text. Within this context, arbitrarily complex encodings can also be constructed, e.g. an image encoding that takes a genre parameter for what type of image content to return.
In addition to encoding and decoding messages, the encoding is also responsible for providing requested content even when there is no message to send. E.g. if the transport is instructed to send an email with an attachment but there is no message to send, the encoding is still required to provide an image to attach, and said image simply will not have any data encoded in it.
4.1.4 Expressibility of Existing Circumvention Channels. The benefits of the decomposed approach are clear: re-usability and elimination of redundant development. However, a valid concern would be whether adopting the decomposed framework restricts the potential circumvention channels that can be built. To assess this, we surveyed 8 recent circumvention channels and assessed if-and-how they would be suitable to decompose. We also note that in many cases an existing channel does not already contain representative functionalities from all three component types but would be stronger if it did.
Camoufler [27]. Camoufler communicates over digital instant messenger platforms - transporting messages through the text and attachment features of WhatsApp, Signal, Telegram, Slack, and Skype. In a decomposition, each of these applications would be its own transport but they could share a generic “instant messenger user model.” The camoufler system as-published does not perform any encoding of data, but the authors explicitly note a need for steganography if the application does not provide end-to-end encryption (E2EE) and the censor can access or influence the service. Hence, decomposing could immediately resolve this by enabling text and multimedia encodings to be seamlessly added in for applicable cases. Further, we believe a more realistic user model could be necessary in cases where the censor can profile user behavior and the user app is performing significant data transfer.
Collage [5]. Collage communicates over image-posting websites - encoding messages into images and then posting those images on particular image hosting services and microblogs. Collage also uses the concept of tasks to specify both when and where an image should be posted or searched for in order to both retain realistic application usage and enable receiving to find sender content. The image generation of the collage system naturally decomposed to an image-based encoding. The task-based posting and polling system is more complex, and would best decompose into both a user model and transport component, or potentially multiple such components. The user model timeline fits with the “when and where” task scheme, and the transport would simply need to implement taking the encoded content and posting it to a particular service. We also note a decomposition of Collage particularly shows the expressibility of our decomposition: e.g. Collage stipulates a connection between a task (post an image with “#flowers”) and the content for it (the image posted should contain flowers). This connection is provided for by the user model and transport’s ability to specify arbitrary parameters, like an image genre, to the encoding for each content generation request.
CovertCast [23]. CovertCast communicates over video streaming services - encoding messages into valid video content formats and publishing them via livestream services like YouTube. This system neatly decomposes into an encoding piece that generates video data and a transport component that interfaces with YouTube. The “encoding” used is focused on bandwidth not covertness and could easily be detected if inspected, thus there would be a benefit to having alternative video encodings to use in scenarios with a stronger adversary. Inversely, the encoding could benefit from additional transports for other livestreaming services where YouTube may be blocked. Finally, a user model could improve covertness, particularly to prevent detectable behavior patterns in streaming clients.
FreeWave [18]. FreeWave communicates over audio streaming services - encoding messages into audio data via frequency manipulation and streaming it between endpoints via Skype. This system decomposes into an encoding component to produce the audio data and a transport component to control Skype and inject/extract audio content. Decomposing to an abstracted and reusable encoding component could make it easier to handle fixed and variable length audio codecs (a noted challenge for the authors). Decomposing
would also have enabled easier adaptation to other audio streaming applications (e.g. see Protozoa WebRTC transport below). Finally, a user model would again improve covertness against censors that can employ behavioral profiling.
Format Transforming Encryption (FTE) [9]. FTE was originally employed in a protocol-mimicry channel. However, we identify that the underlying mechanism of encoding messages into arbitrary regex-defined (or otherwise rankable) languages is a valid steganographic technique for many types of content (particularly machine-generated content). A number of specialized FTE instances could be built as encodings for use with various transports (e.g. see Camoufler transports above).
Protozoa and Stegozoa [3, 15]. Protozoa and Stegozoa both communicate over WebRTC video connections. Protozoa encodes messages in a high-bandwidth manner but is easily detected if the WebRTC video content can be inspected; Stegozoa encodes messages using a steganographic encoding that reduces bandwidth but provides security against content inspection. These would be decomposed as a shared WebRTC-based transport component and two separate encoding components. It is clear the authors took this modular approach in developing both systems, so we argue the advantage of Raceboat’s decomposition is in pushing for a more flexible modularization that would enable these encoding components to be effortlessly used for various other transports with video application content (e.g. see CovertCast YouTube transport above). Similarly, a WebRTC transport could be used with other encodings that might provide situational advantages in bandwidth or security. Finally, a user model would again improve covertness against censors that can employ behavioral profiling.
Raven [32]. Raven communicates over email services - it encrypts messages using GPG and then sends them to the recipient. The main innovation in Raven is introduction of a strict timeline of email (send-time, size) tuples to produce realistic behavior that is independent of (and thus unable to be violated by) the user app demands to send and receive messages. This timeline is based on sampling a sophisticated GAN model of real human email usage. Raven would be decomposed into an email client transport, a GPG-based encoder (or simply a GPG-formatter), and a user model that generates the timeline of sending actions. Decomposing would be beneficial by enabling raven to swap in more sophisticated encodings (i.e. if the censor can inspect email content then GPG emails will likely be blocked or a source of suspicion).
4.2 Implementation
As with "unified" channels described above (see Section 3.4) Decomposed channels are also implemented as plugins, where each plugin provides one or more components (see Fig. 7). Components are dynamically assembled into a composition at runtime. This enables extremely lightweight mixing-and-matching of components, e.g. the user or calling program can specify a new channel simply by modifying a JSON file to switch which components are used. Furthermore, the same component can be used simultaneously by multiple compositions - we explicitly designed component instantiation to enable resource-intensive pieces (like a generative AI model) to only be loaded once in these cases.

4.2.1 Composition Manager and Execution Flow. Internally, each composition is run by a manager which coordinates: fetching and scheduling a timeline of actions from the user model; requesting content from the encoding(s) for actions based on parameters the transport provides; passing encoded content to the transport; and finally pushing the scheduled actions to the transport for execution.
The call-paths for sending and receiving in a composition are shown in Fig. 8. These paths abstract-out the role of the manager in making each of these API calls, handling callbacks, and passing data between components. The manager has two inputs that drive its behavior: the timeline of actions provided by the user model ((1), getTimeline) and the queue of user app messages to-be-sent ((3), sendPackage). The action timeline dictates when the manager makes calls to doAction in the transport. Actions are intended to drive all transport behavior, so they include not only actions like "post an image" (to send messages) but also "view the newest post with this hashtag" (to receive messages) and "post an innocuous comment" (to be a more convincing user).
In cases where content is required for an action (e.g. posting an image), the enqueued user app messages (if any) are involved. First, the manager transforms an action into a set of content parameters via the transport (((2), getActionParams)). These parameters can include details specified by the user model (carried over from the action) and by the transport, which most importantly is a MIME-type field to state what type of content is required. An action can also require multiple pieces of content of different types (e.g. an image and text). A composition can contain multiple encodings (e.g. a text encoding and an image encoding) and the manager selects from among the encodings for a given piece of content based on the MIME-type field of the parameters. Once selected, the manager passes the parameters and enqueued user app message bytes to the encoding ((4), encodeBytes). Fragmentation and batching of user app messages is handled by the manager based on the covert data capacity for a piece of content reported by the encoding. If a message is too large then it is fragmented over multiple pieces of content, potentially spanning multiple actions. Conversely, messages that
are smaller than the capacity are batched together into a single piece of content. If there are no messages to encode, the encoding is expected to produce a valid piece of content, and it will simply lack any covert data. When the encoding finishes it passes the encoded content back to the manager, which passes it to the transport for use with a particular action ((5), `enqueueContent`).
When the timestamp of an action comes, the manager calls the transport to execute it ((6), `doAction`). The action has already been checked for content requirements, and if any content is required then it has already been enqueued in the transport (see Section 4.2.1 below for complexities). Thus, the transport should execute the action immediately, preserving a high-fidelity to the user model timeline. The interaction between transport and “application” is entirely internal to the transport: this could be requests against a remote server API (e.g. using an email client library) or manipulation of a locally running application (e.g. injection of data into a browser). At any time, the transport can also issue an event reflecting some application-level change (e.g. a service outage or a new email received) that is passed to the manager and then on to the user model ((7), `onEvent`); this enables arbitrarily complex, reactive, user models for cases where that type of covertness is necessary.
Receiving data follows much the same path, except typically there is no content required from the encoding. Rather, the transport executes an action ((8), `doAction`) that involves receiving content. These pieces of received content are passed to the manager which again passes them to an encoding based on MIME-type for decoding ((9), `decodeBytes`). The content is decoded into user app message data and a header (for handling fragmentation) and passed back to the manager to be reassembled and pushed to the user app as one or more new received messages ((10), `receiveEncPkg`).
4.2.2 Scheduling. A critical challenge to composition usage is handling potentially conflicting timings between when user models dictate actions should occur and how long encodings take to generate content for those actions. Failing to properly engineer these steps could disrupt the security and/or performance of a composition in several ways.
If encodings are too slow then the transport could either be forced to execute actions without necessary content or wait and violate the user model timeline of action occurrences. A potential heuristic to minimize this problem would be eagerly encoding content as soon as new message data exists to encode. However, this heuristic has two flaws: 1) new messages arrive for encoding within an existing sequence of actions, we ideally want to send the message on the soonest possible action, but we need a way to know if the soonest action is too soon for the encoding to complete in time; 2) if we eagerly encode as soon as the first new message arrives then we will miss the opportunity to batch subsequent messages into the same content for the soonest action. For some compositions and use cases this sort of batching can be critical for usable performance, and we do not want to force the user app to understand these details when it decides whether and how to concatenate or fragment its messages.
Relatedly, we give the option for the user model to update its timeline in case, for security reasons, the user model needs to adjust its near-future actions based on events received from the transport. When an update occurs, actions must potentially be removed, which cascades into the need to re-encode messages originally encoded for those actions into new content for new actions.
We overcome these challenges by using an estimated max-encoding-time for each encoding component. The manager then uses this as a basis for estimating how soon before an action will execute that encodings must be called to generate content for the action.
We consider the design and development of the Raceboat framework which is a wrapper written in Go. It imports the standard Obfs Go module we focus our evaluation on demonstrating the flexible capabilities when the components involve change in their character. e.g. an email transport can essentially take arbitrary content for attachments, but an imageboard transport must take images (and possibly images of specific size and type if server-side conversions are to be avoided).
4.2.3 Intercomponent Data Structures. There are naturally several points at which components need to produce data to be consumed by other components. The structure of these data are often obvious when working through a single example, but quickly become varied when the components involve change in their character. e.g. an email transport is blocked by a censor, but include them to demonstrate compatibility due to using direct IP connections that would be enumerated and blocked by a censor, but include them to demonstrate compatibility.
4.3 Component and Channel Implementations
In collaboration with other performer teams on the DARPA RACE program we have developed an initial set of components (see Table 1) designed to act as a combination of real world cirumvention channel components and as exemplars for aiding independent development. We believe some of these components have novel value on their own, but leave this information to independent publications for brevity.
<table>
<thead>
<tr>
<th>User Model</th>
<th>Transport</th>
<th>Encoding</th>
</tr>
</thead>
<tbody>
<tr>
<td>FileScript</td>
<td>RedisClient</td>
<td>NOOP</td>
</tr>
<tr>
<td>OnDemand</td>
<td>EmailClient</td>
<td>Base64</td>
</tr>
<tr>
<td>S3Bucket</td>
<td>JEL Image</td>
<td></td>
</tr>
</tbody>
</table>
This way the transport is never delayed waiting for content, but neither are messages that could have been batched instead put into separate actions and bandwidth wasted.
4.2.3 Intercomponent Data Structures. There are naturally several points at which components need to produce data to be consumed by other components. The structure of these data are often obvious when working through a single example, but quickly become varied when the components involve change in their character. e.g. an email transport is blocked by a censor, but include them to demonstrate compatibility due to using direct IP connections that would be enumerated and blocked by a censor, but include them to demonstrate compatibility.
4.3 Component and Channel Implementations
In collaboration with other performer teams on the DARPA RACE program we have developed an initial set of components (see Table 1) designed to act as a combination of real world cirumvention channel components and as exemplars for aiding independent development. We believe some of these components have novel value on their own, but leave this information to independent publications for brevity.
We also have a suite of non-decomposed channels that have been wrapped to support the Raceboat channel APIs. These include wrappers around several higher-bandwidth direct channels (Obfs [35], Snowflake [7], and Balboa [26]), as well as seven lower-bandwidth indirect channels suitable for signaling use (using a variety of social media or email services and varying image-based steganographic methods).
5 EVALUATION
We consider the design and development of the Raceboat framework to be our main contribution, not the performance or security of any individual channel or composition that it can run. Therefore, we focus our evaluation on demonstrating the flexible capabilities described in the above design. We use a set of channels, some constructed as compositions from components and some built as monolithic channels, to demonstrate Raceboat’s capabilities, and briefly describe them below for context and to demonstrate the range of channels Raceboat can enable to interoperate.
Obfs. We use a version of Obfs wrapped in a Raceboat-compatible wrapper written in Go. It imports the standard Obfs Go module which is a direct channel and so likely not usable for many signaling use cases, but for higher-bandwidth communications.
Snowflake. We use a version of Snowflake wrapped in a Raceboat-compatible wrapper written in Go. It imports the standard Snowflake module which, like Obfs, is direct and useful for non-signaling use cases.
S3Bucket. This is a transport based on reading and writing to publicly-permissioned AWS S3 objects. This is inspired by Cloud-Transport, but uses permissions to enable public put/get without list to enable secret addressing as well as public addressing. Network traffic appears as generic HTTPS traffic to an S3 region, not associated with the specific bucket. A null encoding is used for this threat model, but arbitrary content encodings can be used since S3 will accept any type of object data.
Email+Base64. This channel is a composition of an email transport and user-model with a simple base64 encoder. The composition sends and receives by encoding data into the body of the email and sending emails to particular recipients. It is essentially equivalent to Raven or SWEET but written within the decomposed framework.
Email+JEL. This channel is a composition of the email transport and user-model (above) combined with jpeg steganographic encoding based on the JEL technique [6]. It encodes data into jpegs that are sent as attachments instead of the body of emails.
Flickr+JEL. This channel uses the above JEL encoder but rather than transporting them via email it uploads them to Flickr with pseudorandomly chosen hashtags. Receivers then poll these hashtags to check for new covert message. Upload and polling rates are controlled to avoid Flickr API limits.
Tumblr+JEL. This channel uses the JEL encoder and a similar pseudorandom hashtag scheme but uploads and polls on the Tumblr service rather than Flickr.
5.1 Bridge Distribution Evaluation
One of the primary use cases identified for Raceboat is facilitating bridge distribution. Recent research on the Lox system [30] shows the development of more elaborate bridge distribution protocols may be necessary for preventing adversary exhaustion of bridges via enumeration. Therefore we decided to demonstrate the flexibility and performance of Raceboat by empirically evaluating the latency for completing the equivalent of a Lox bridge fetch and invitation acceptance protocol. This consists of a single 346B request and a 1.3KB response that bundles together a Lox invitation redemption and bridge request from client-to-server and a Lox response and bridge address from server-to-client. We evaluate this sequence across a combination of channels and compositions used as upstream and downstream links to demonstrate the ability to run arbitrary combinations of channels. We mark channels with an asterisk(*) that are likely inappropriate for this use case, usually due to using direct IP connections that would be enumerated and blocked by a censor, but include them to demonstrate compatibility in the Raceboat framework.
All connections were run on a laptop with 6-core 2.6GHz CPU and 32GB of RAM. Where channels required accounts with application services (e.g. email channels requiring email addresses) we created temporary accounts for testing.
Table 2: Latency of a Lox-based Bridge Request + Invitation Redemption (346B request + 1.3KB response) using different combinations of upstream and downstream channels. * indicates direct channels likely unsuitable for signaling; N/A indicates technical incompatibility of underlying channel software.
<table>
<thead>
<tr>
<th>Upstream</th>
<th>obfs*</th>
<th>snowflake*</th>
<th>S3Bucket+NOOP</th>
<th>Email+Base64</th>
<th>Email+JEL</th>
<th>Flickr+JEL</th>
<th>Tumblr+JEL</th>
</tr>
</thead>
<tbody>
<tr>
<td>obfs*</td>
<td>1.47s</td>
<td>0.96s</td>
<td>2.54</td>
<td>11.28s</td>
<td>14.11s</td>
<td>89.4s</td>
<td>69.34s</td>
</tr>
<tr>
<td>snowflake*</td>
<td>0.75s</td>
<td>0.47s</td>
<td>2.52</td>
<td>11.34s</td>
<td>15.17s</td>
<td>66.25s</td>
<td>66.14</td>
</tr>
<tr>
<td>S3Bucket+NOOP</td>
<td>3.15s</td>
<td>2.99</td>
<td>6.58</td>
<td>24.54</td>
<td>26.73</td>
<td>90.10</td>
<td>71.42</td>
</tr>
<tr>
<td>Email+Base64</td>
<td>11.93s</td>
<td>6.7s</td>
<td>14.51</td>
<td>16.54s</td>
<td>21.48s</td>
<td>89.51s</td>
<td>52.13</td>
</tr>
<tr>
<td>Email+JEL</td>
<td>9.81s</td>
<td>10.46s</td>
<td>18.56</td>
<td>19.3s</td>
<td>20.50s</td>
<td>89.51s</td>
<td>55.13</td>
</tr>
<tr>
<td>Flickr+JEL</td>
<td>62.59s</td>
<td>55.61s</td>
<td>65.13</td>
<td>47.91s</td>
<td>54.90s</td>
<td>90.58s</td>
<td>N/A</td>
</tr>
<tr>
<td>Tumblr+JEL</td>
<td>32.91s</td>
<td>27.59</td>
<td>32.88</td>
<td>47.24</td>
<td>49.73</td>
<td>N/A</td>
<td>80.94</td>
</tr>
</tbody>
</table>
5.2 Results
The results shown in Table 2 demonstrate that Raceboat is capable of flexibly using a variety of very different channels to achieve a core signaling use-case. Purely in terms of performance some channels clearly make stronger cases for use than others. However, the point of Raceboat is that different channels will be suitable for different threat scenarios: snowflake and obfs are (expectedly) the fastest but are also not actually usable in most signaling use cases. Further, the very existence of multiple seamlessly-swappable channels is a compelling strength because it forces a censor to split attention and resources blocking many different channels before benefits can be gained.
6 RELATED WORKS
6.1 Pluggable Transports
Pluggable Transports(PT) [25] is the current de facto standard interface for exposing circumvention channels to user apps. There are multiple active versions of the PT specification, including the original subprocess-oriented version and the newer V2.0 suited for inclusion as a library. There is direct overlap in the effect of the PT and Raceboat interfaces: both provide a simple and uniform way to use a circumvention channel with socket-like semantics. However, PTs are designed to be implemented on the basis of individual and independent channels - even if some PT libraries implement multiple channels, a given connection can only use one at a time. For example, the obfs4proxy [1] software provides both obfs4 and meek [14], but there is neither an interface nor internal logic to use both for a given connection. Moreover, the single-channel orientation of PT means there are no semantics around which to build a fully expressive multichannel library like Raceboat: i.e. there is no existing formalism around a channel providing anything other than bidirectional connectivity, and no uniform way to generate new links on channels based on a desired set of properties. We address the topic of compatibility with the PT interface in Section 7.2.
6.2 Turbo Tunnel
Raceboat incorporates some concepts already expressed by the Turbo Tunnel [13] design, namely automatically applying fragmentation and ordering to user app messages. However, Turbo Tunnel presents itself as a design pattern to be applied to individual channels. This inherently assumes continuing the "one channel at a time" paradigm in which the user app is expected to use a single channel for a given bidirectional communication task. Some of Turbo Tunnel’s suggestions, like using sessions to seamlessly shift among short-lived proxies, are reminiscent of Raceboat’s desired channel agility; the difference is that Turbo Tunnel applies these goals at the level of each channel independently, while Raceboat aims to enhance communication by flexibly shifting between channels.
Implementing Turbo Tunnel’s approach inside a channel does not impact Raceboat’s use of that channel, so we consider Turbo Tunnel to be orthogonal to our work, operating at the channel layer. We believe Turbo Tunnel’s suggestions mostly apply to the transport component of our decomposed channel design. Furthermore some of its approaches, like packetizing and providing ordering, are actually automatically implemented by the Raceboat manager and so do not need to be implemented by each individual transport.
6.3 Marionette
Marionette [10] is spiritually similar to Raceboat’s channel decomposition. However, the two are fundamentally different: Marionette explicitly rejects application tunneling in favor of a more sophisticated version of protocol mimicry. The main shared features are an emphasis on building a modular framework with developer use and plugin re-use as major goals. Marionette does not engage with the topic of flexibly shifting between channels or specifying multiple simultaneously, rather trying to mimic on the fly. If the advanced protocol mimicry was still wanted by a user, it could be wrapped into a usable Raceboat channel.
7 DISCUSSION
7.1 Other Signaling Channel Use Cases
We have referred to bridge requests as the exemplary case for signaling channel usage throughout this paper. However, we believe there are a number of other immediate use cases for them.
Recent bridge distribution schemes [8, 30] introduce nontrivial amounts of latency-tolerant "control traffic" between censored clients and uncensored infrastructure. Reporting bridge blockages or other censorship telemetry from the client is a similar case. In these cases signaling channels are beneficial if there is either a...
significant chance the client knows no reachable bridges or if there is a cost to using a bridge (increasing likelihood of discovery) that is only justified for servicing actual user requests.
Signaling channels can also have roles outside of large-scale circumvention infrastructures. For example, we have built-in a bootstrapping mode in which a publicly addressed channel is used for a single upstream message, which then bootstraps secretly addressed links in each direction (or a single bidirectional one). This combines the conceptual elements of bridge distribution and bridge use; very similar to Snowflake’s use of a domain-fronted broker mediating connection to ephemeral proxies, but abstracted to any suitable combination of channels Raceboat supports.
Finally, signaling channels are potentially suited for any other latency-tolerant uses. Asynchronous messaging akin to social media can be accomplished, as prior systems have shown [20]. Higher latencies also do not always imply low bandwidth; e.g. steganographic videos uploaded to 3rd-party streaming sites can involve lengthy encoding and upload delays but provide megabytes of data transfer at a time without using a direct connection to a bridge. Use cases for such channels could include distributing static censored content to users, pushing content from users, or even distributing software updates for circumvention tools.
### 7.2 Pluggable Transport Compatibility
There are two sides to the Pluggable Transports interface: the user app and the channel (or “transport” in PT parlance). Raceboat already demonstrates the ability to wrap the channel interface to allow a PT-compatible channel to be used by Raceboat (see use of Obfs4 and Snowflake).
The user app interface is different in that the PT interface specifies a subset of Raceboat functionality. A PT connection is equivalent to Raceboat in continuous connection mode with a single channel for send and receive and static link addresses. Therefore, Raceboat can be wrapped in a PT-compliant wrapper for use by existing PT-based user apps. However, that eschews most of the benefits of the Raceboat framework: the continuous connection mode is a poor fit for short-lived signaling uses; the restriction to bidirectional channels removes flexibility and security gains from including unidirectional channels.
### 7.3 Security of Composed Channels
We have focused on Raceboat as an intermediary capability that includes dynamically creating compositions from modular components. We have detailed the interfaces of those components and which areas of security each is responsible for, but have not addressed the actual security of a composition. Ultimately we consider this out-of-scope of this work, aside from ensuring our framework does not weaken the security provided by each component. E.g., Raceboat never generates its own actions for a transport to execute: only the user model component can do so. Therefore, Raceboat cannot violate behavioral independence unless the user model does so. Similarly, Raceboat never provides content for transport actions, only the encoding does.
We do not guarantee the security of any given component, and therefore the security of any given composition. Rather, the varying landscape of threats and frequent tradeoffs between security and performance would make this overly restrictive. E.g. we include a ‘NOOP’ encoding component that simply returns the bytestream given to it; this is useful in cases where a transport is assumed secure from adversarial inspection but is a gross security flaw otherwise. We recommend newly developed components are designed and evaluated as they are now: with specific threat models and evaluations. Insofar as the authors plan to maintain a library of components and compositions, the relevant threat models for each will be clearly and prominently documented to avoid misunderstandings that could endanger users.
### 7.4 Value of User Models
Related to threat models, a reasonable question is whether the user model component is necessary or, if not, brings any value along with its added complexity. Current reports on censors do not show the use of application behavior profiling to block circumvention channels [36]. Despite this, we argue including user models is valuable for several reasons: first, behavioral profiling is not yet in use, but a censor could begin using it in the near future (particularly on a smaller or more focused scale than whole-of-nation). We argue it is better to have proactively designed user models now than suddenly require a major reworking of components, almost certainly breaking backwards compatibility, to incorporate them later.
Additionally, in the case of transports that interact with application servers, e.g. email or social media transports, user model-style rate limitations may be necessary. Most such services have denial-of-service, spam, and/or bot detection protections; Raceboat transports do not aim to act maliciously with respect to the application server, but if interaction is driven based on the user app then the transport could still fall afoul of these protections. In these cases, even if a realistic user model is not necessary, a throttling user model may be necessary to avoid automatic blocking by the server.
### 8 CONCLUSION
We have presented Raceboat, a framework for flexibly developing and using censorship circumventing signaling channels. We have provided a formalized definition and implementation for circumvention channel link establishment that enables a flexible mix-and-match approach to upstream and downstream communications, including the concepts of public vs. secret addresses and indirect vs direct channels. We have designed an expressive modular decomposition of application tunneling channels into orthogonal components to enable faster development and greater research reuse. We have implemented both of these innovations in a framework and shown its capability to use both new and existing circumvention channels, based on adaptation of Pluggable Transports compatibility, to conduct useful signaling channel tasks, e.g. supporting use of a cutting edge bridge distribution approach.
### ACKNOWLEDGMENTS
This material is based upon work supported by the AFRL-RI and DARPA under Contract No. FA8750-19-C-0501. Any opinions, findings and conclusions or recommendations expressed in this material are those of the author(s) and do not necessarily reflect the views of the AFRL-RI and/or DARPA. “A” (Approved for Public Release, Distribution Unlimited).
|
{"Source-Url": "https://petsymposium.org/popets/2024/popets-2024-0027.pdf", "len_cl100k_base": 13035, "olmocr-version": "0.1.50", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 41404, "total-output-tokens": 13508, "length": "2e13", "weborganizer": {"__label__adult": 0.00041866302490234375, "__label__art_design": 0.0005593299865722656, "__label__crime_law": 0.0010213851928710938, "__label__education_jobs": 0.0008840560913085938, "__label__entertainment": 0.00027751922607421875, "__label__fashion_beauty": 0.0001854896545410156, "__label__finance_business": 0.0003964900970458984, "__label__food_dining": 0.00037932395935058594, "__label__games": 0.0010929107666015625, "__label__hardware": 0.0020999908447265625, "__label__health": 0.0004901885986328125, "__label__history": 0.0005211830139160156, "__label__home_hobbies": 0.0001029372215270996, "__label__industrial": 0.00049591064453125, "__label__literature": 0.0006122589111328125, "__label__politics": 0.0006113052368164062, "__label__religion": 0.00043487548828125, "__label__science_tech": 0.258544921875, "__label__social_life": 0.00020301342010498047, "__label__software": 0.08721923828125, "__label__software_dev": 0.64208984375, "__label__sports_fitness": 0.00032401084899902344, "__label__transportation": 0.0007791519165039062, "__label__travel": 0.0002560615539550781}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 66157, 0.02733]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 66157, 0.15283]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 66157, 0.92066]], "google_gemma-3-12b-it_contains_pii": [[0, 3321, false], [3321, 7541, null], [7541, 13203, null], [13203, 18757, null], [18757, 23440, null], [23440, 29813, null], [29813, 36796, null], [36796, 42634, null], [42634, 46605, null], [46605, 53773, null], [53773, 59589, null], [59589, 66157, null], [66157, 66157, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3321, true], [3321, 7541, null], [7541, 13203, null], [13203, 18757, null], [18757, 23440, null], [23440, 29813, null], [29813, 36796, null], [36796, 42634, null], [42634, 46605, null], [46605, 53773, null], [53773, 59589, null], [59589, 66157, null], [66157, 66157, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 66157, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 66157, null]], "pdf_page_numbers": [[0, 3321, 1], [3321, 7541, 2], [7541, 13203, 3], [13203, 18757, 4], [18757, 23440, 5], [23440, 29813, 6], [29813, 36796, 7], [36796, 42634, 8], [42634, 46605, 9], [46605, 53773, 10], [53773, 59589, 11], [59589, 66157, 12], [66157, 66157, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 66157, 0.07865]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
0218918c0fc2b792352027f0fb9e3594f94b6c51
|
Stefan Hildenbrand
Generation of Test Cases
Programming a tool for the generation of test cases from finite automata
Semester Thesis
Summer Semester 2005
ETH Zürich, October 10th, 2005
Supervisor: Diana Senn
Professor: David Basin
Zusammenfassung
Wir konzentrieren die Entwicklung auf den Einsatz im Zusammenhang mit Netzwerkprotokollen, daher bietet das Tool die Möglichkeit eigene Eingabe- und Ausgabesymbole zu definieren.
Abstract
We develop a tool for test case generation from finite state machines. The tool allows the user to specify a finite state machine and then generates the test cases using the Wp-method. Other methods can be easily added.
We focus the development on network protocols therefore the tool offers the possibility to define input and output symbols.
## Contents
### I. Introduction and Theory
1. About Finite State Machines
1.1. Finite State Machines
1.2. Mealy Machines
1.3. FSMs and Computer Science
2. About Testing
2.1. Testing
2.2. Testing in Computer Science
2.3. Testing Using FSMs
3. Generating a Test Suite From a FSM
3.1. Some Notations and Definitions
3.2. The Wp-METHOD
3.3. Finding the Identification Sets
### II. Development
4. Requirements
4.1. Purpose of the Tool
4.2. Interfaces
4.2.1. Graphical Interface
4.2.2. Machine Interface
4.3. Functions
4.4. Other Requirements
5. Evaluation
5.1. Web Research
5.2. Exorciser
5.3. JFLAP
6. Design
6.1. Modelling - Capturing the Essence
6.2. Test Case Generation - Combining the Algorithms
6.3. File Format - Presenting the Results
6.3.1. File Format for the Automaton
6.3.2. File Format for the Alphabet
6.3.3. File Format for the Test Cases
7. Implementation
7.1. Adaptation of JFLAP
7.1.1. Extending JFLAP for Mealy Machines
7.1.2. Adding More Editor Capabilities to JFLAP
7.1.3. XML Output ........................................ 26
7.2. Implementation of the Test Case Generation Algorithms .......... 26
7.2.1. Transforming the Graphical Representation to the Transition Table View 26
7.2.2. Generating Minimal Distinguishing Sequences Using P-Tables .... 26
7.2.3. Finding the Cover Sets by a Broad Search in the Automaton ...... 27
7.2.4. Constructing the Multiple-experiment tree ........................ 27
7.2.5. Combining the Found Sequences to Test Cases .................... 27
7.2.6. Preparing the Test Cases for Output ............................ 28
III. Results 29
8. Manual 30
8.1. The Automaton Editor ........................................ 30
8.2. The Form Editor .............................................. 31
8.3. The Symbol Editor ............................................ 33
9. Developer Guide 33
9.1. Adding Other Algorithms ...................................... 33
9.2. Good to Know ................................................ 34
10. Example 34
10.1. Drawing the Automaton ....................................... 34
10.2. Defining Alphabet and Symbols ................................ 34
10.3. Starting the test case generation process ..................... 35
10.4. Looking behind the scene .................................... 35
10.5. Large Example .............................................. 36
IV. Conclusions 41
11. Summary 42
12. Conclusions 43
13. Future Work 44
V. Appendices 45
A. Task Description 45
B. Software Requirements Document 51
C. Schedule 57
D. CD contents 58
E. Readme 59
# List of Figures
1. a very simple Mealy Machine ........................................ 8
2. an old blueprint of the Wright Flyer ................................ 10
3. example automaton .................................................. 13
4. Multiple-experiment trees (MET) for example automaton .......... 13
5. Screenshot of Exorciser: editing a FSM ............................. 19
6. Screenshot of JFLAP: editing a FSM ................................ 20
7. Screenshot of JFLAP: offering different types of FSMs .......... 20
8. XML-Output produced by JFLAP .................................... 21
9. File format for automaton ............................................. 24
10. File format for alphabets ............................................. 25
11. File format for test suite output .................................... 25
12. Screenshot: Editing a finite automaton ............................. 30
13. Screenshot: Editing a transition .................................... 32
14. Screenshot: View Showing the Form Editor ....................... 32
15. Screenshot: View Showing the Symbol Editor ..................... 33
16. Output produced by TCGTool ....................................... 36
17. Example 2: Automaton ............................................... 37
18. Example 2: Extract from the alphabet file ........................ 38
19. Example 2: Extract from the output file .......................... 39
Part I.
Introduction and Theory
This part covers the basics needed for this project such as some theory about finite state machines and the most important terms used in this project.
1. About Finite State Machines
1.1. Finite State Machines
In the world of computer science, basically everything can be represented as a so called finite state machine (FSM). Such FSMS are composed of states, transitions and actions. A state stores information about the past, i.e. it reflects the input changes from the system start to the present moment. A transition indicates a state change and is described by a condition that would need to be fulfilled to enable the transition. An action is a description of an activity that is to be performed at a given moment.
1.2. Mealy Machines
Since FSMS are quite important to computer science, there has been a lot of research on that topic. Different types of FSMS are known. In this thesis, we concentrate on so called Mealy Machines. This type of FSM extends the base type with an output function.
Formally a Mealy Machine can be described as a 7-tuple: $A = (Q, \Sigma, \Omega, \delta, \lambda, q_0, F)$. $Q$ is a finite set of states. $|Q| < \infty$.
- $\Sigma$ is the input alphabet. $|\Sigma| < \infty$.
- $\Omega$ is the output alphabet. $|\Omega| < \infty$.
- $\delta$ is the transition function. $\delta : Q \times \Sigma \rightarrow Q$.
- $\lambda$ is the output function $\lambda : Q \times \Sigma \rightarrow \Omega$.
- $q_0 \in Q$ is the initial state.
- $F \subseteq Q$ is a (finite) set of possible accepting states. If the FSM stops in a state in $F$ after reading input $w \in \Sigma^*$, then $w$ is part of the language $L(A)$.
Figure 1: a very simple Mealy Machine, which ignores every third input.
1.3. FSMs and Computer Science
In computer science, FSMs are used to model a certain behaviour of software, hardware or other processes, for example a TCP network protocol. A device (meaning a computer or a firewall or the like) reacts usually on inputs from its environment and changes its state accordingly. Quite commonly it produces an output, too. Exactly this behaviour is reflected in a Mealy-Machine. Therefore a network protocol (e.g. TCP) can be represented as a FSM. For humans this graphical representation is usually easier to understand than the source code of the actual implementation.
2. About Testing
2.1. Testing
Testing is a process used to investigate the properties of a certain implementation under test (IUT), especially the correctness, completeness and quality. In order to be able to test, one needs to know what correct is. This means one needs a complete specification of the IUT, i.e. a so called model. Usually, testing does not guarantee complete correctness, since there can always be cases, which were not covered by the set of test cases (i.e. test suite) and sometimes even the model is not complete, giving another source of errors.
2.2. Testing in Computer Science
In computer science, testing used to play a much bigger role than in other engineering disciplines. During the construction of an airplane, the engineers can calculate most of the properties the airplane is going to have, because they have their blue prints, the physical laws and mathematics to their help. Surely, they are doing a test
flight before delivering the airplane to the customer, but this is just the last step.
As opposed to this approach, developing computer devices usually goes through many test flights. On one hand it is easier and cheaper to run a test build of software than to fly a prototype of an airplane. On the other hand, in computer science rarely any calculations can be done on the model, since there are usually no blue prints. Although there exist so called formal methods which can be seen as a form of blue prints, coming up with them is often omitted. Bugs in software are widely accepted, a bug in an airplane is not.
2.3. Testing Using FSMs
One possible model of a computer device is an FSM. The expected behaviour is reflected in a FSM. To test the IUT, one checks whether the IUT gets to the same state as the FSM does, after reading the same input sequence. In this case, both the model and the IUT must be FSMs. By using Mealy Machines, one can test whether the IUT produces the same output sequence as the model does, when reading the same input sequence. This allows testing more types of IUT.
Usually a FSM reflecting a computer device has many states and many more transitions. Since testing is a very time consuming and therefore expensive process, one is eager to reduce the number of test cases that need to be run against the IUT. There exist several algorithms
to extract a test suite from a FSM. The algorithm used in this thesis is explained in the next section.
3. Generating a Test Suite From a FSM
This section is not intended as a complete review of the known methods to generate test cases from FSMs. This is covered in [FvBK+91]. This section is neither supposed to go in deep detail on the presented method, but should give the basics to understand the so called partial W-method (Wp-method) used in our tool.
My supervisor decided on this method because the W- and Wp-method are most generally applicable among these methods and the Wp-method was preferred because it yields smaller test suites than the original W-method.
3.1. Some Notations and Definitions
First we introduce some notations and definitions later used to present the algorithm. The same definitions are used in [FvBK+91].
\( M_i \xrightarrow{\pi/y} M_j \) means that the FSM \( M \) in state \( M_i \) fed with input \( x \) responds with an output \( y \) and makes the transition to the state \( M_j \).
\( M_i \xrightarrow{p} M_j \) means that the FSM \( M \) is originally in state \( M_i \) and goes to the state \( M_j \) when the input sequence \( p \) is applied.
\(M_{i|p}\) represents the output sequence produced by \(M\) in state \(Mi\) when the input sequence \(p\) is applied.
**V1.V2** The concatenation of two sets \(V1\) and \(V2\) of input sequences is a set of input sequences defined as follows: \(V1.V2 \equiv \{v1.v2 \mid v1 \in V1, v2 \in V2\}\) where \(v1.v2\) stands for the concatenation of the two sequences \(v1\) and \(v2\).
**\(V\)-equivalence of states** Given a set \(V\) of input sequences, two states \(Si\) and \(Ik\) are \(V\)-equivalent (written as “\(Si \approx_V Ik\)” ) if \(S\) in \(Si\) and \(I\) in \(Ik\) respond with identical output sequences to each input sequence in \(V\).
**Equivalence of states** Two states \(Si\) and \(Ik\) are equivalent (written as “\(Si \approx Ik\)” ) if they are \(V\)-equivalent for any set \(V\).
**Equivalence of FSMs** Two FSMs \(S\) and \(I\) are equivalent if their initial states \(So\) and \(Io\) are equivalent.
**minimal FSM** A FSM \(M\) is minimal if the number of states in \(M\) is less than or equal to the number of states for any machine \(M'\) which is equivalent to \(M\).
**state cover set \(Q\)** Let \(Q\) be a set of input sequences. \(Q\) is a state cover set of \(S\) if for each state \(Si\) of \(S\) there is an input sequence \(pi \in q\) such that \(So \xrightarrow{pi} Si\). The empty input sequence \((\epsilon)\) belongs to \(Q\).
**transition cover set \(P\)** Let \(P\) be a set of input sequences. \(P\) is a transition cover set of \(S\) if for each transition \(Si \xrightarrow{x/y} Sj\) there are sequences \(p\) and \(p.x\) in \(P\) such that \(So \xrightarrow{p} Si\) and \(So \xrightarrow{p.x} Sj\). The empty input sequence \((\epsilon)\) is a member of \(P\). By definition, each transition cover set \(P\) contains a subset which is also a state cover set.
**identification set \(Wi\)** A set of input sequences \(Wi\) is an identification set of state \(Si\) if and only if for each state \(Sj\) in \(S\) (with \(i \neq j\)) there exists an input sequence \(p\) of \(Wi\) such that \(Si|p \neq Sj|p\) and no subset of \(Wi\) has this property.
**characterisation set \(W\)** The union of all identification sets \(Wi\) is a characterisation set. A characterisation set consists of input sequences that can distinguish between the behaviours of every pair of states in a minimal automaton.
### 3.2. The Wp-Method
The Wp-method as described in [FvBK+91] is an improvement of the original W-method as described in [Cho78]. Using the Wp-method reduces the length of the test suite.
The Wp-method makes some assumptions about the specification \(S\) and the implementation \(I\). The specification \(S\) should be minimal. This is a necessary (and sufficient) condition for the existence of a characterisation set \(W\). In order to guarantee the error-detection power of this method, \(S\) and \(I\) are assumed to be completely specified and deterministic. Moreover the following explanation assumes that the number of states in \(S\) and \(I\) are equal. But the more general case when the implementation has more states than the specification can also be handled by this method. This extension is explained in [FvBK+91].
Like several other methods, the Wp-method follows a two-phase approach:
3.3 Finding the Identification Sets
The first phase checks that all the states defined by the specification are identifiable in the implementation, and also checks for each state $I_k$ that it can be identified by the identification set $W_k$. At the same time, the transitions leading from the initial state to these states are checked for correct output and state transfer. This happens by applying the following test sequences to the IUT: $Q.W$ where $Q$ is a state cover set and $W$ is a set of input sequences including at least all the identification sets $W_i$ of all states.
The second phase checks all transitions which were not checked during the first phase. This is done by applying the sequences of the transition cover set $P$ which are not contained in $Q$ concatenated with the corresponding $W_i$:
$$R \otimes W = \bigcup_{p \in R} \{p\}.W_j$$
where $R = P - Q$ and $W$ is the set of all identification sets. $W_j$ is the identification set of $S_j$ in $W$ and $S_j$ is reached by $p$, meaning $S_0 \overset{p}{\rightarrow} S_j$.
If the implementation $I$ passes the tests of both phases, it is equivalent to the specification $S$. More details and a proof of this assertion can be found in [FvBK91]. A detailed example can be found in Section 10.
3.3. Finding the Identification Sets
For the presented method, it is necessary to have the cover sets and identification sets. Finding the state and transition cover sets is basically a broad search in the automaton, but finding the identification sets is not that easy. Our tool uses the algorithm for the so called “Multiple Preset Diagnosing Experiment” as presented in [Gil62]. The author of this reference distinguishes between diagnosing and homing, preset and adaptive as well as simple and multiple experiments:
**The diagnosing problem:** It is known that a given machine $M$, whose transition table is available, is in one of the states $\sigma_{i_1}, \sigma_{i_2}, \ldots, \sigma_{i_m}$. Find this state.
**Preset experiments:** the applied input sequence is completely determined in advance
**Adaptive experiments:** the applied input sequence is composed of two or more subsequences, each subsequence (except the first) determined on the basis of responses resulting from preceding subsequences.
**Simple experiments:** only one copy of the machine is required
**Multiple experiments:** more than one copy of the machine is required
For our needs, we must solve the diagnosing problem. Since this tool is supposed to generate a static test suite, we use the “preset” algorithm, which does not depend on the output the IUT produces.
I decided to use the “multiple” algorithm, because we can have virtual copies of the IUT with the help of the reset transition which must be implemented anyway (this is a requirement for the Wp-method). Additionally, the “multiple preset diagnosing experiment” is more powerful than the simple one, meaning that it can solve some problems the simple one cannot.
This algorithm has one shortcoming though, as described in [Gil62]:
12
3.3 Finding the Identification Sets
Figure 3: example automaton
\[
\begin{align*}
M_1 & = \{0,1,2\} \\
E(0,1) & = a \\
E(0,2) & = b \\
G_1 & = \{0,1,2\} \\
M_2 & = \{0,1\} \\
E(0,1) & = a \\
E(1,2) & = b
\end{align*}
\]
(a) MET using ‘a’ as first input
(b) MET using ‘b’ as first input
Figure 4: Multiple-experiment trees (MET) for example automaton
It may be noted that, although the design procedure for the multiple experiment can minimize the length of the input sequence applied to each copy, it generally does not minimize the total length of the experiment or its multiplicity.
The selection criteria for the distinguishing sequences (take one of the shortest) applied in the different steps is ambiguous. Sometimes a distinguishing sequence applied in later steps could separate other paths of the diagnosing tree too. Therefore some of the input sequences in the identification sets are superfluous. Therefore the power of this algorithm costs the requirement, that the tool should produce minimal test suites.
This shortcoming is best described in an example taken from [FvBK+91] and presented in Figure 3.
Obviously the identification sets for this automaton are either \{\{a\}, \{a, b\}, \{b\}\} or \{\{a\}, \{c\}, \{b\}\}. But none of these can be found by the algorithm presented in [Gil62]. The diagnosing tree looks either like Figure 4(a) yielding \{\{a\}, \{a, b\}, \{a, b\}\} or like Figure 4(b), yielding \{\{a, b\}, \{a, b\}, \{b\}\} as identification sets.
Part II.
Development
In this part the development of the tool is presented: Discussing the requirements, doing a web research for a similar tool which can be extended and the programming work.
4. Requirements
The first step of the development of this tool was to collect the requirements it should fulfil.
4.1. Purpose of the Tool
This tool is supposed to allow the user to draw a graphical representation of a FSM and translating it into a machine readable representation. From this machine readable representation the tool should generate so called abstract test cases. An abstract test case consists of an input sequence to be fed to the IUT and an output sequence, which can then be compared to the output sequence the IUT produced. ‘Abstract’ indicates, that usually these test cases cannot be fed directly to the IUT since the model contains variables in the input sequences which must be instantiated with concrete values. E.g. a model of the TCP-protocol does not contain the actual IP-addresses of sender and receiver but most likely imposes some restrictions on them. Another tool must then be used to instantiate the variables with IP-addresses fitting the restrictions.
4.2. Interfaces
The tool consists of two completely different interfaces. One interface to the user and one to other tools.
4.2.1. Graphical Interface
The graphical interface lets the user construct arbitrary Mealy Machines. This interface needs three views.
Automaton view The Automaton view allows the user to construct the actual automaton. This means to draw states and the transitions between these states. The transitions consist of an input symbol and an output symbol. Different input symbols can lead to the same state and produce the same output.
The symbol is referenced by its name. The actual meaning of the symbol is not of interest here, but must be defined in the Symbol view for later use in test case generation.
Form view The Form view allows the user to edit the base form of the symbols of the input alphabet the FSM should listen to. This view allows to add, edit and delete the fields a symbol can have. A field of a symbol consists of a name and a data type.
Symbol view The Symbol view allows to create, edit and remove symbols to the alphabet. This view basically should implement a mask of the defined form, allowing to fill the fields with values.
4.2.2. Machine Interface
The machine interface is used in different ways. One purpose of this interface is to allow communication with other tools, e.g. to instantiate the test cases generated by this tool. The other purpose is to be able to save and reload the work.
test cases The tool produces a structured output which can be used by other tools (such as fwtest [Zau]).
FSM The tool allows also to save and load the constructed FSM, the form and the symbols.
4.3. Functions
By defining the interfaces, the functions are quite obvious:
- construction of Mealy Machines, including an alphabet consisting of user defined symbols based on a user defined format
- translation of the graphical representation into a machine readable form and vice versa
- generation of abstract test cases from the machine readable form
4.4. Other Requirements
There are a few other requirements which do not fit the above distinction:
- the tool should not impose high requirements on the environment it is run in
- the tool should be released under some form of free license
5. Evaluation
After knowing what the tool should be capable to do, I started looking for other tools which do similar tasks. The goal was to find a tool which was easy to adapt but also powerful enough to satisfy our needs.
5.1. Web Research
Since FSMs are a widely researched topic in computer science there exist lots of tools covering all kind of tasks in this area. But most of the hits I found by googling the net were the opposite approach to the problem. There are lots of toolboxes, libraries and other software out there to display a static graphical representation of a given automaton, but I found it rather difficult to find software which was also offering to construct or edit an automaton in a graphical and interactive way.
In computer science, FSMs are basic knowledge, so looking for some interactive teaching utilities brought two tools into my scope, Exorciser and JFLAP, which are discussed later in this section.
One page I found quite helpful which I would like to mention here, was the link page of the Grail+ Project. Lots of packages and toolboxes concerning FSMs and the like can be found there. A reference to the JFLAP project is listed there as well. Although the descriptions are not always up to date, it is an alternative to crawling through the many hits google produces.
5.2. Exorciser
The Exorciser is an interactive teaching software covering different topics in theoretical computer science such as finite state machines, regular languages and the like. Different theses have improved this tool since its first presentation in 2002. Offering a pleasing graphical interface and being developed at ETH too, this seemed to me a very promising foundation for my tool.
One of the focuses of the Exorciser was to make the tool easy to extend with new exercises covering other topics in the field of theoretical computer science. But this extendability just worked for completely new exercises. Trying to adapt the FSM part to Mealy Machines seemed rather difficult. Missing comments in the source and an out of date report of the semester thesis [HL01], which added the FSM part to the Exorciser framework made work very hard. The initial commit of the FSM was changed in meanwhile, but not well documented such that several classes mentioned in [HL01] do not even exist anymore. Additionally, in most parts of the project, transition inputs were restricted to a single character, which is not convenient for our project. Changing this restriction seemed heavy work too.
These difficulties encouraged me to look for another software I could use as foundation for my tool.
5.3. JFLAP
JFLAP is a package of graphical tools which can be used as an aid in learning the basic concepts of Formal Languages and Automata Theory. This software lets the user create and
1http://www.csd.uwo.ca/research/grail/links.html
edit different types of finite state machines and then offers a variety of tasks on this FSM such as converting non-deterministic automata into deterministic ones.
The structure of the program allows to add new types of FSMs by simply implementing an interface. By adding a new type of transition, different kinds of transitions (such as input-output pairs needed for Mealy Machines) can easily be added. The source code is well commented and the software may be reused under the usual restrictions.
With this tool I found the foundation of a graphical representation of FSM which seemed quite simple to extend to the needs of this tool. Providing a graphical user interface and an obvious way to add other types of FSMs together with a simple XML-based way to save and load existing FSMs made this software my choice.
Figure 5: Screenshot of Exorciser: editing a FSM
5.3 JFLAP
Figure 6: Screenshot of JFLAP: editing a FSM
Figure 7: Screenshot of JFLAP: offering different types of FSMs
<!— Created with JFLAP 4.0b14.—>
<structure>
<type>fa</type>
<!—The list of states.—>
<state id="0">
<x>371.0</x>
<y>196.0</y>
</state>
<state id="1">
<x>157.0</x>
<y>204.0</y>
</state>
<!—The list of transitions.—>
<transition>
<from>0</from>
<to>1</to>
<read>a</read>
</transition>
<transition>
<from>1</from>
<to>0</to>
<read>a</read>
</transition>
</structure>
Figure 8: XML-Output produced by JFLAP
6. Design
As pointed out before, there are two basic actions our tool has to perform:
1. **Modelling.** Let the user draw an automaton representing a sequence of actions of interest.
2. **Test Case Generation.** Generate test cases for the given automaton.
6.1. Modelling - Capturing the Essence
The first step in modelling is to capture the Essence of the thing one wants to model. The keyword is *Abstraction*. The main purpose of this tool will be to generate test cases for network protocols. Such network protocols can become very complex. Especially the input and output behaviour of the participating hosts can differ from protocol to protocol. So called *packets* are exchanged between the hosts, but the format of TCP-packets has not much in common with the format of IP-packets.
So we need to be able to define the input and output format. But on the other side, for test case generation, the actual meaning of the symbols is not important at all. Therefore one should separate these things.
For complete modelling, we need to define
1. the actual control flow, meaning the automaton with its transitions
2. the form of the input and output symbols
3. the input and output symbols themselves
The abstraction of the actual meaning of the symbols is quite easy. In the automaton, the symbols are referred with a key unique for each symbol. During test case generation, this key is sufficient. When it comes to using the test cases, these keys can be filled with meaningful information by using the symbol definitions.
For the format of the symbols, which consists of several fields, it seems to be enough to have a name (used to identify a field) and a data type for each field. How the actual test packet is constructed from this field must be determined otherwise. Most likely not all fields of the actual test packet are of interest. Reviewing some of the most important network protocols, I decided to go with the following data types: Integer, String, Character, Boolean, and Variable. The special type of *Variable* is introduced to distinguish between a defined String from a placeholder which has to be replaced with an actual value.
6.2. Test Case Generation - Combining the Algorithms
The Generation of Test Cases following the Wp-method is performed through several steps. We need to get from a graphical representation of an automaton to a test suite.
1. **Cover sets.** We need the state and transition cover sets. These sets can be found by performing a broad search in the automaton.
2. **minimal distinguishing sequences.** Since we want to apply the algorithm to find the identification sets as presented in [Gil62] we first need to find the minimal distinguishing sequences. I proceeded as suggested in Section 4.4 of [Gil62].
4. *generate test cases for the states*. Now knowing all necessary sets we can perform phase 1 of the Wp-method.
5. *generate test cases for the transitions*. The next step is then to perform phase 2 of the Wp-method.
6. *completing the test cases*. So far we generated input sequences needed to test the automaton. Now we feed these sequences to the modelled automaton and note the expected output for each input sequence. This gives then a complete test case consisting of an input sequence and an output sequence which has to be compared to the output the IUT generates.
### 6.3. File Format - Presenting the Results
#### 6.3.1. File Format for the Automaton
The file format for saving the automaton must be some kind of structured file. Therefore a XML-file seems suitable. The following informations have to be written to the file:
- type of the automaton (in our case always Mealy Machine, but could be subject to change)
- reference to the alphabet this automaton is using
- list of states
- list of transitions
For each state we need to save:
- id
- position
- flags for initial and final
For each transition we need to save:
- start and end state
- input and output symbol
These elements can be directly transformed to a XML-file. An example is shown in Figure 9.
Figure 9: File format for automaton
6.3.2. File Format for the Alphabet
The same list of important fields can be collected for alphabets:
- name of the alphabet
- list of fields each symbol must have
- list of symbols
For each field we need to know:
- name
- data type
For each field we must save:
- name
- the values for each field defined in the alphabet
This list of elements can as well be quickly transformed in a XML-file as the example in Figure 10 shows.
6.3.3. File Format for the Test Cases
The output is then dumped to a file in the format as shown in Figure 11:
- one test case per row
- each step of the automaton is enclosed in ()
- input and output symbol are separated by /
7. Implementation
7.1. Adaption of JFLAP
7.1.1. Extending JFLAP for Mealy Machines
The design of JFLAP includes an interface called ‘Automaton’, that all different types of automaton extend. It was easy to adapt the Turing Machine implementation, which is part of the JFLAP package, to represent a Mealy Machine.
7.1.2. Adding More Editor Capabilities to JFLAP
The editor of JFLAP is built up from several panes which are collected in an environment. These elements are predefined in the Java base library. To add the possibility to edit the alphabet and its symbols needed for our tool, I just had to add new panes to the environment. The editor panes and the objects they present to the user are connected using the Observer-pattern [var], so changes in the alphabet or its symbols are immediately represented in the editor views.
7.1.3. XML Output
For this step JFLAP was perfectly designed, too. The generation of XML output is kind of modularized in JFLAP such that transducers for new types of automaton and new types of objects (like the additional alphabet I added to a mealy machine) can easily be added. Several helper functions predefined in an abstract super class take care of most of the XML specific details.
7.2. Implementation of the Test Case Generation Algorithms
7.2.1. Transforming the Graphical Representation to the Transition Table View
The rather unhandy distinction between states and transitions in JFLAP had to be translated to a format easier to handle. The states in the design of JFLAP do not know anything about the transitions around them. This is fine for graphical representation, but for actually working with the automaton, this seemed not the right way. The obvious way were transition tables, since they are the starting point in [Gil62], too.
I implemented the transition table as an array of so called transition table entries. Each of these entries represented a state and contained the next state and the output symbol for each symbol in the alphabet. This information could be gathered from the JFLAP-view of the automaton by inspecting all transitions and filling in the entries step by step.
7.2.2. Generating Minimal Distinguishing Sequences Using P-Tables
For constructing the P-Tables as described in [Gil62] I had to come up with another object type. I decided to use a top-down approach:
- A P-Table has to know what equivalence classes it contains. Also it knows in which equivalence class all states are. This is kept in the P-Table because otherwise each row would have to be updated if a state gets into a new equivalence class.
Now implementing the algorithm of Gill was not difficult any more: Take the P-Table (which
knows the annotations), and for each P-Table-Entry in that P-Table, distribute the P-Table-
Rows in new P-Table-Entries and add them to the original P-Table. When all Entries are
inspected, restart until each Entry contains only one row.
During the work at this algorithm, I noticed that this algorithm can be applied incrementally
and we do not need to save the intermediate steps. All we need to do is to generate the minimal
distinguishing sequences (MDS) after we completed a step. A MDS between two states can be
constructed by prepending a symbol which causes the automaton to go into two distinguishable
states (meaning they are not in the same equivalence group) to the MDS of these two states.
7.2.3. Finding the Cover Sets by a Broad Search in the Automaton
For this step, the JFLAP representation is as good as the transition table view. We are looking
for the shortest path from the initial state to all states and to all transitions.
I implemented this with a queue serving as todo list. This list is initialised with the initial
state of the automaton. As long as there are states in the todo list, each state reachable from
the first one in the list by one transition and not yet visited is enqueued in the todo list.
During this loop all states and transitions are visited. When first visiting a state or transition
I record the sequence that led to that object.
7.2.4. Constructing the Multiple-experiment tree
The construction of the Multiple-experiment trees (MET) is recursive. We initialise the pro-
cedure by creating a MET with all states in the admissible set. An admissible set contains
the states the automaton could be in. During the process, the admissible set becomes smaller,
since we narrow the set of possibilities.
The constructor looks for the shortest MDS between the states in the admissible set. The
output produced when feeding the found MDS to the automaton in all states of the admissible
set is then compared and for each subset with the same output, a new MET is constructed
with the smaller subset. The construction ends, if there is just one element in the admissible
set.
By using a static variable of the Java programming language, each MET can access the same
variable. Therefore the identification sets can be constructed incrementally. Each time the
tree branches, the used MDS is added to the identification sets of all states in the admissible
set. When the recursion ends, the identification sets can be found in that static variable.
7.2.5. Combining the Found Sequences to Test Cases
The combination of the found sequences to test cases is a simple loop through all elements
which have to be concatenated as described in \cite{FvBK91}.
• A \textit{P-Table-Entry} then represents an equivalence class. It knows which states it contains.
• A \textit{P-Table-Row} finally represents one state. It knows about the next states on each
symbol.
7.2.6. Preparing the Test Cases for Output
After having constructed the test cases, the found input sequences are fed to the automaton and the expected output together with the input sequence is dumped to a file in the format presented earlier.
Part III.
Results
This part presents the results this thesis acquired: a short description of the tool developed and some examples what it can do.
The tool comes with an integrated help function, which I reused from JFLAP. It is a simple HTML-Browser which opens an HTML-document associated with the actual view shown in the application window. I adapted the help pages to reflect the changes I made to JFLAP. Following the description that also appears on the corresponding HTML-page in the tool.
8.1. The Automaton Editor

Figure 12: Screenshot: Editing a finite automaton
The Editor Pane
The editor pane has three main components. On the top is a detachable tool bar (the section with the four iconic buttons), and on the bottom right is the canvas where the automaton is drawn. If you defined an alphabet for the automaton, a list of the symbols you defined is on the bottom left.
The automaton is edited through clicks on the canvas. The current action taken in response to those clicks depends on the currently selected tool, which is indicated by a darker background on the icon.
To select another tool, click on it, or use its shortcut key. You can get the shortcut key, as well as a short description of the tool, by holding the cursor over the tool button. After a short time this should display a tool-tip with information.
The Tools
The Attribute Tool (the “A” key)
This tool is used to modify existing states and transitions. To move states, drag them with this tool. Dragging transitions will move transitions. Clicking on a transition edits a transition. A user may right-click (control click on Macs that have only one mouse button) on a state to bring up a pop-up menu to define the state as final, to define the state as initial, to set its label (an auxiliary description for a state), to clear its label (if it has one), or to clear all labels of all states.
The State Tool (the “S” key)
While the state tool is selected, a click on the canvas will create a new state centred at the point that was clicked.
The Transition Tool (the “T” key)
By dragging from one state to another, a transition from the first state clicked to the second state dragged to will be created. Looping transitions from a state to itself are handled the same. Once two states are joined with the tool, the user will then be asked for certain parameters of the transitions.
The Delete Tool (the “D” key)
This will erase any transition or state it is clicked on. If a state is deleted, those transitions incident on or emanating from a state will be deleted as well.
Right-clicking in a blank portion of the canvas will bring up a different pop-up menu. One option of this menu is to hide or show labels; if labels are hidden, the user may hold the mouse cursor over a state to bring up a tool-tip with the label for that state. The other option is the graph layout algorithm, which will rearrange the states of the automaton. As a nota bene, this popup menu is available in all views with an automaton; however, as one might expect the user may use the graph layout algorithm only where JFLAP allows the moving of states.
Editing Transitions
Only one transition may be edited at a time. While a transition is being edited, fields appear that allow the user to change parameters regarding that particular transition. Each field corresponds to a particular parameter of that transition. In order to find out which is which during runtime, hold the mouse cursor over one of the fields; a tool tip will pop up to tell what it is.
A Mealy Machine’s transition editor will have two fields, one for the input symbol and one for the output symbol.
To stop editing, take some other action with the tool, or click in an empty space on the canvas, or press return/enter. To cancel the editing of a transition and to revert that transition to its state before the edit, press escape. Pressing shift-return/enter will stop editing the transition, and begin the process of editing a new one between the start and end states of the transition that we just stopped editing.
8.2. The Form Editor
The Form Pane
The Form Pane consists of several input fields. The top one holds the name of the actual alphabet. The ones below represent the fields of this alphabet. Each field has a name and a
data type.
To add a new field click on the “add field”-button at the bottom. To delete fields, tick them and click on the “delete fields”-button at the bottom.
8.3. The Symbol Editor
The Symbol Pane
The Symbol Pane has two main components. On the left is a list of the defined symbols. On the right is a mask to create new and edit defined symbols.
To create a new symbol fill in the fields on the right side (Name on top, the other fields according to the form you previously defined in the Form Editor). When you are finished, click on “save”.
To edit a symbol, click on its name on the left side and alter the fields in the editor, when finished, click on “save”. To delete a symbol, load it in the editor (by clicking its name on the left side) and click on “delete”.
9. Developer Guide
This section should give some hints where to start when extending this tool. More topics are covered in Section 7: Implementation. Section 10.4 might give some information, too.
9.1. Adding Other Algorithms
The package tcg contains the classes used to generate the test cases. It has an abstract class called TestCaseGenerator which new classes implementing another test case generation method should extend. The extending class must implement the method generateTestCases which is supposed to return a set of input sequences which form a test suite. The other
methods of the abstract class then take care of the output to these sequences and dump the complete test suite to a file.
The class \texttt{TCGwithTT} can be used as a foundation for new algorithms, too. It extends the \texttt{TestCaseGenerator} class by offering a transition table view of the automaton.
To add the new test case generator to the menu of the GUI, one has to add a corresponding line to \texttt{gui.menu.MenuBarCreator#getTcgMenu}.
As another option, the XML-file produced by the tool can be used as a starting point for test case generation. This could be an interface to another application, e.g. if one wants to escape the Java environment.
\section*{9.2. Good to Know}
The delimiter for separating the symbol names in input sequences is defined in \texttt{tcg.model.MealyMachineModel\#DELIMITER}. It is set to “:” by default.
The data type “set” is not implemented. This is just a placeholder for further development.
\section*{10. Example}
To illustrate the whole process, we go step by step through a simple example. The example is taken from [FvBK+91]. The same example was used before and is illustrated in Figure 3.
\subsection*{10.1. Drawing the Automaton}
The user interface of JFLAP is quite instructive. To draw our example automaton, we select the state tool and click three times in the white space of the editor window. Each time we click, a state is created at this position. We distribute the three states over the pane to have enough space for the transitions.
Next, we add the transitions between the states. We chose the transition tool and click and hold on the state the first transition starts from. We drag the mouse (while still keeping the button down) to the end state of this transition. Then we release the button and an input field for the transition symbols appears. In the left field, we enter the input symbol, in the right field, we enter the output symbol. If we want to use an alphabet with defined symbols, we have to define them first (see below). After having defined the symbols, we can use the buttons on the left in the Editor view to add symbols to the transitions.
To create transitions leading to the same state, just click on that state. Finally we need to define the initial state. This happens by selecting the Attribute tool and right clicking on the desired initial state. A popup menu appears. One of the offered menu items is “initial”. We select that item and a large triangle is added to the drawing, indicating the initial state.
\subsection*{10.2. Defining Alphabet and Symbols}
For the actual test case generation, the alphabet and symbol functionality is not needed. The unique keys used in the automaton are sufficient. The process of defining an alphabet and symbols for an automaton should be quite instructive and can be read in the online help.
### 10.3. Starting the test case generation process
When we are finished with drawing our automaton, we select the item “Wp method” from the “test case generation” menu. Now everything runs in background. When test case generation is finished, we are prompted for a file name, where the test suite is saved to.
### 10.4. Looking behind the scene
Since we are interested in the details of this tool, we take a look behind the scene and show what is going on before we can save the output to a file.
Using the debug mode of eclipse, we inspect the key steps in the process:
```java
public Set generateTestCases() {
generateCoverSets();
generateIdentificationSets();
\[
\text{result . addAll(generatePhase1());}
\text{result . addAll(generatePhase2());}
\]
return result;
}
```
After the first method call above, the variable state cover set contains \([a,c,c]\) and the variable transition cover set contains \([c:b, a:b, a:a, a:c, a, c, c:a, c:c, b]\). This result is different from what [FvBK+91] says. But a closer look shows, that the tool just used \(a\) instead of \(b\) in the state cover set, which is completely correct, since both \(a\) and \(b\) take the automaton to the same state, starting from the initial state. This different choice is then also reflected in the transition cover set, where the first \(b\) in the multiple input sequences is replaced with \(a\).
The next step abstracts the whole process of building up the P-Tables and the multiple-experiment tree. After the execution of that row, the variable identification sets reads: \([a, b, a, b, [b]]\). Now this is obviously different from \([a, b, a, [b]]\) as suggested by [FvBK+91], and there is no way to make it look the same. The reason of this problem is the shortcoming of the used algorithm as described in 3.3. The identification set of one state is a little to large. The consequences of this problem will become apparent in a few steps.
After some intermediate steps, the Wp-method takes control. The test sequences generated in phase 1 are \([c:b, a:b, a:a, a:c:a, b]\). [FvBK+91] comes to \([c:b, b:b, b:a, a, a:b, c:b:a, a:a:b]\). Here too, the first \(a\) is replaced with \(a\) and vice versa. This should be the same test suite. But now we can see what influence the larger identification set has. The tool produced two more test cases then necessary, because it used more test cases to be sure that the automaton is in the correct state. The output produced by this example is shown in Figure 16.
I injected the state and transition cover sets from [FvBK+91] in the code to see if really the same test suite would be produced and it was the case. Surely the larger identification set caused the same problem. Injecting the shorter identification set finally allowed the tool to generate the exact identical test suite as in [FvBK+91].
(a/a)(a/a)(a/a)
(b/b)(b/b)(a/b)(a/a)(a/b)
(b/b)(a/b)(a/a)(a/b)(a/a)(b/b)(a/b)(a/a)(a/b)
(b/b)(a/b)(b/b)(a/a)(a/a)
(b/b)(a/b)(a/a)(a/b)
(b/b)(a/b)(a/a)(b/b)(a/b)(a/a)(a/a)
(b/b)(a/b)(a/a)(a/b)(a/a)(a/a)(a/a)
(b/b)(a/b)(a/a)(a/b)(a/a)
(b/b)(a/b)(a/a)
(a/a)(a/a)
(b/b)(a/b)(b/b)(a/b)(a/a)(a/b)
(b/b)(a/b)(a/a)(a/b)(b/b)(a/b)(a/a)(a/a)
(a/a)(b/b)(a/b)(a/a)(a/b)(a/b)
Figure 16: Output produced by TCGTool
10.5. Large Example
After this simple and understandable example follows a larger example. This is about TCP connection handling. It shows the results produced by TCGTool when operating on a larger automaton. This example also includes an alphabet. Figure 17 shows a simplified view of the automaton, Figure 18 shows an extract from the XML-file used to store the alphabet and the symbols and Figure 19 shows an extract of the generated test case file.
Figure 17: Example 2: Automaton
Figure 18: Example 2: Extract from the alphabet file
Figure 19: Example 2: Extract from the output file
Part IV.
Conclusions
Finally a summary, some concluding notes to this work as well as some comments about further work to be done on this topic.
11. Summary
In this semester thesis, we implemented a tool for test case generation. This tool allows the user to draw a graphical representation of a Mealy Machine and then constructs input sequences with the expected output sequences.
We used JFLAP [Rod], a tool for interactive teaching of FSMs, as a foundation and extended it with the functionality to handle Mealy Machines. This graphical representation of Mealy Machines is then the starting point for the test case generation.
Test case generation can be done in several ways. In this semester thesis we implemented one way to do so. We focused on a general method which can be used in most cases. The used algorithms from [Gil62] for generating identification sets for each state work for every minimal FSM. The supervisor chose the Wp-method from [FvBK+91] for generating test cases. This algorithm is applicable in more cases than similar algorithms.
The test case generation step is independent from the other parts of the tool. This modular design allows other test case generation algorithms to be added to the tool.
The implemented tool especially supports everything needed to test implementations of network protocols. The possibility to define the form and content of the used input and output symbols gives great flexibility without making the test case generation process more difficult since only the unique name of each symbol is used in the algorithms. The generated abstract test cases have then to be instantiated with the content defined.
Experiments with the tool showed that the used algorithms are general but not in every case optimal. The tool produces a few more test cases than needed for sufficient testing. This problem is inherent in the used algorithm and cannot be easily solved.
12. Conclusions
Looking back at this semester thesis, I recognize the importance of a well chosen foundation and a complete theoretical background. Today, building up something from scratch is in most cases not necessary or affordable any more. But finding the right foundation is not that easy since there are so many possible candidates out there. I had already started to extend the Exorciser tool when I noticed that I only produced a mess in the source code. I then started again to look for another piece of software and came across JFLAP which now is the foundation for this tool.
When I started reading [FvBK+91], I did not expect many problems. The cover and identification sets were always presented as requirements and I found no word about the difficulty of generating them. I really hit the wall when I recognized that there is actually much more behind this test case generation than just these simple steps as described in [FvBK+91]. I crawled through several referenced papers and finally ended up with [Gil62], a book dating back in 1962. All papers about this test case generation methods just assumed that one knows the identification sets and in most cases did not even give a hint where to find out about that topic. It took me then about a week to get into that topic and to really understand the presentation in [Gil62], before I could go back to code and continue the actual work.
Enthusiastically about having mastered that problem, I overlooked that small paragraph in [Gil62] about the shortcomings of that method. When I finally had the code working that problem became apparent. But I was not able to find an alternative to this method and I had to accept that shortcoming.
But all in all I think this was an important experience for me. The work included everything from web research to reading and understanding quite difficult theoretical papers, from implementing algorithms to extending existing source code. Not to forget the writing of a report recalled the skill to express myself with words.
Sometimes I came with nothing to the weekly meetings with my supervisor. Sometimes I impressed her so much with my progress that she forgot to tell me some quite important things.
13. Future Work
This tool is a small part in a whole framework of tools to test firewalls. I solved the problem of abstract test case generation but it is just one step on the way down to a complete method to test firewalls.
The following list is not complete, but shows some tasks to improve this tool and points out the direction for getting further on the way to a method to test firewalls.
- *other test case generation algorithms*: The tool is designed to ease the extension with other test case generation algorithms. Different algorithms may solve some problems more efficiently than the chosen combination of methods.
- *implementing the algorithm more efficient*: Actually the algorithm for generating the test cases is implemented in Java, which probably is not the best choice from the performance viewpoint. From the XML-Output of the automaton, it is possible to attack the same problem with another implementation, such as with a functional language.
- *instantiating the abstract test cases*: As mentioned before, the test cases generated by this tool can not be used directly. The test cases have to be instantiated with additional information, e.g. testing an implementation of a TCP-protocol requires complete TCP-packets with IP-addresses, sequence numbers and so on, which is not provided by this tool.
- *allowing input and output alphabet to be different*: The actual implementation works with a single alphabet containing the input and output symbols of the automaton. This is no restriction for network protocols, since usually what can go in must also be allowed to go out and vice versa. But for other applications it is probably desirable to have distinctive alphabets for input and output.
- *adding other data types in the alphabet*: The offered data types cover the most basic data types used in computer science. Other data types can be added. Especially the notation of “set” which is suggested in the GUI but not implemented could be an useful extension.
- *implementing special symbols*: I noticed that automata representing network protocols can become quite large. A few special symbols could improve readability of those automata: > for a packet that gets forwarded and - for a packet that gets dropped. With these symbols one could combine multiple transitions that indicate that the incoming packet gets forwarded by one single transition with multiple input symbols. The other special symbol is other, abstracting all input symbols that do not have a transition from that state. This makes drawing completely defined automata much easier.
- *more comfort in file handling*: Sometimes I really struggled with that file dialogue and the different file types the tool produced. A sophisticated file dialogue helping to distinguish all the files would be nice to have.
Part V.
Appendices
Additional documents to this thesis.
A. Task Description
Following the document describing the tasks for this semester thesis.
1 Introduction
We live in a world where all the company networks are connected to the Internet. Nobody can control the Internet, therefore a company has to protect their data from unauthorised access through the Internet. This is done by firewalls whose analogon in the physical world are locks. Everybody understands that doors need to be locked to prevent unauthorised access. It is the same in the digital world: unauthorised access to a company's network should be prevented, and this can be done by one or several firewalls.
Using the analogon of the door lock again, everybody understands that it is not enough to have a door lock. Only if the lock is locked properly and only authorised people have got a key to unlock it, we have what we want. It is the same in the digital world. It is not enough to have a firewall. We can only be satisfied if the firewall is doing what we expect from it. And to find out if a firewall satisfies our expectations (stated by a policy) we need to test it.
Figure 1: Automaton for Simple Call Establishment in H.323
2 Motivation
When testing firewalls, one of many things that needs to be tested is the correct stateful handling of various protocols by the firewall. To do this, one needs a specification of the protocol which is to be tested. Such a specification can be written as a finite automata, see figures 1 and 2 for examples. From such automata, test cases can then be generated, using different methods [1, 2, 4, 5, 6, 7], and run against the firewall.
Some explanations:
1 / 0 on transitions means input and output respectively.
The outputs are the reaction of a firewall to the given inputs. This is either accepting (forwarding) a packet or dropping it.
“x1 = 2-5” means x1 ∈ 2..6
Figure 2: Automaton for tcp
3 Assignment
3.1 Objectives
The goal of this project is to implement a tool which converts a graphical representation of an automaton into abstract test cases. These abstract test cases will then be instantiated with test tuples to generate concrete test cases [8], which then can be fed to fwttest [9].
3.2 Tasks
- Define criteria the tool has to satisfy (together with the supervisor)
- Evaluate tools for the graphical specification of automata, e.g. [3].
- Adapt the best suited tool to our needs
- Write a converter (for the tool chosen) between graphical and textual specifications of finite automata
- Generate abstract test cases from a textual representation of a finite automaton using an algorithm given by the supervisor
The whole software written during this thesis should rely on open source software (if possible) and should be modular and extendable. Particularly it should be easily possible to later on extend the software by other test generation algorithms.
3.3 Deliverables
- At the beginning of the semester thesis an agreement must be signed which allows the supervisor of this thesis, his project partners and ETH Zurich to use and distribute the software written during the thesis.
- At the end of the second week, a detailed time schedule of the semester thesis must be given and discussed with the supervisor.
- At the end of the semester thesis a presentation of 20 minutes must be given during an Infsec group seminar. It should give an overview as well as the most important details of the work.
- The final report may be written in English or German. It must contain an abstract written in both English and German, this assignment and the schedule. It should include an introduction, an analysis of related work, and a complete documentation of all used software tools. Three copies of the final report must be delivered to the supervisor.
- Software and configuration scripts developed during the thesis must be delivered to the supervisor on a CD-ROM.
16th August 2005
Prof. D. Basin
References
B. Software Requirements Document
Following the document written at the beginning of this thesis fixing the requirements for the tool.
Inhaltsverzeichnis
1 Einführung ........................................... 2
1.1 Zweck des Dokuments .................................. 2
1.2 Zweck des Softwareprodukts .............................. 2
1.3 Erläuterungen zu Begriffen .............................. 2
1.4 Übersicht über das restliche Dokument ................. 3
2 Beschreibung des Softwareprodukts .................... 3
2.1 Zielsetzung und Einsatzumgebung des Produkts .......... 3
2.2 Implementierte Funktionen ................................ 3
2.3 Informationen zu erwarteten Nutzern ....................... 4
2.4 Abhängigkeiten und Voraussetzungen ................. 4
3 Spezielle Anforderungen ................................ 4
3.1 Benutzeroberfläche (UI) .................................. 4
3.2 Schnittstellen (IF) ...................................... 4
3.3 Funktionalität (FUN) ..................................... 4
1 Einführung
1.1 Zweck des Dokuments
Dieses Dokument soll das Software-Tool “Generation of Test Cases”, das während der Semes-
terarbeit von Stefan Hildenbrand an der ETH Zürich entsteht, beschreiben. Es wendet sich sowohl an die Entwickler der Software als auch an die Benutzer, welche die Software einsetzen werden.
1.2 Zweck des Softwareprodukts
1.3 Erläuterungen zu Begriffen
Endlicher Automat Ein deterministischer Endlicher Automat (DFA, engl. deterministic finite automaton) ist ein Modell des Verhaltens, bestehend aus Zuständen, Zustandsübergängen und Aktionen. Ein Zustand speichert die Information über die Vergangenheit, d.h. er reflektiert die Änderungen der Eingabe seit dem Systemstart bis zum aktuellen Zeitpunkt. Ein Zustandsübergang zeigt eine Änderung des Zustandes des DFA und wird durch logische Bedingungen beschrieben, die erfüllt sein müssen, um den Übergang zu ermöglichen. Eine Aktion ist die Ausgabe des DFA, die in einer bestimmten Situation erfolgt.
Ein besonderer Typ DFA ist die Mealy-Maschine. Eine Mealy-Maschine wird formal als 7-Tupel beschrieben: \( A = (Q, \Sigma, \Omega, \delta, \lambda, q_0, F) \).
- \( Q \) ist eine endliche Menge von Zuständen (\(|Q| < \infty\)).
- \( \Sigma \) ist das Eingabealphabet. \(|\Sigma| < \infty\)
- \( \Omega \) ist das Ausgabealphabet. \(|\Omega| < \infty\)
- \( \delta \) ist die Übergangsfunktion \( \delta : Q \times \Sigma \rightarrow Q \)
- \( \lambda \) ist die Ausgabefunktion \( \lambda : Q \times \Sigma \rightarrow \Omega \)
- \( q_0 \in Q \) ist der Startzustand.
- \( F \subseteq Q \) ist eine (endliche) Menge möglicher akzeptierender Zustände. Wenn der Automat nach Lesen des Eingabewortes \( w \in \Sigma^* \) in einem Zustand aus \( F \) hält, so gehört \( w \) zur Sprache \( L(A) \).
In dieser Arbeit sind alle nicht näher definierten DFA Mealy-Maschinen.
Modell Das Modell bezeichnet die Anforderungen an die Implementation, die durch das Design oder die Policy vorgegeben werden.
zu testende Implementation Die zu testende Implementation (IUT - implementation under test) bezeichnet eine existierende Implementation des Modells, deren korrekte Arbeitsweise überprüft werden soll.
1.4 Übersicht über das restliche Dokument
Der Rest dieses Dokuments gibt eine detaillierte Beschreibung aller Requirements, welche das Tool erfüllen muss. Dabei geht es in Section 2 um die allgemeine Beschreibung der Software, dies beinhaltet einen generellen Produktüberblick, Überblick über die hauptsächlichen Funktionen der Software, Anforderungen an den Benutzer und Abhängigkeiten / Limitationen, die sich für die Software ergeben. Section 3 wird dann detailliert auf die spezifischen Anforderungen eingehen.
2 Beschreibung des Softwareprodukts
2.1 Zielsetzung und Einsatzumgebung des Produkts
Das Tool bildet ein Bindeglied zwischen dem Benutzer und weiteren Tools, welche in der Lage sind, die abstrakten Testfälle auf die entsprechende IUT anzuwenden. Das Ausgabeformat dieses Tools soll so generisch wie möglich gehalten werden, damit dieser Einsatzzweck erfüllt werden kann.
Für den Einsatz sollen aber möglichst geringe Anforderungen an bestehende Soft- oder Hardware gestellt werden. Eine Java-Umgebung darf vorausgesetzt werden.
2.2 Implementierte Funktionen
- der Benutzer kann beliebige Endliche Automaten erstellen, bestehend aus Zuständen und Übergängen
- der Benutzer kann für Ein- und Ausgabe jeweils ein beliebiges Format definieren, bestehend aus einer beliebigen Anzahl von Feldern und zugehörigem Datentyp
- für die Übergänge können entsprechend der Datentypen der Felder des Inputalphabets Einschränkungen definiert werden, z.B. die Zahl im Feld 4 muss zwischen 3 und 12 liegen
- die Einschränkungen für die einzelnen Felder können zu beliebigen Bool’schen Formeln verknüpft werden
- die definierten Einschränkungen können mit einem beliebigen Namen referenziert werden, der dann beim Übergang steht. Diese Namen dienen als Alphabet des DFA.
• das Format für Ein- und Ausgabe, sowie der DFA können in einem Format abgespeichert werden, das für weitere Tools einfach lesbar ist
2.3 Informationen zu erwarteten Nutzern
2.4 Abhängigkeiten und Voraussetzungen
3 Spezielle Anforderungen
3.1 Benutzeroberfläche (UI)
TG_UI_01 Das Tool bietet dem Benutzer eine graphische Oberfläche, wo er den DFA mit der Maus und den nötigen Werkzeugen (Zustand erstellen, Übergang erstellen) aufzeichnen kann.
TG_UI_02 Die Oberfläche bietet einen Button an, mit dem man zum Übergangsformat gelangt. Hier kann der Benutzer in Form von zwei Tabellen das Format für Ein- und Ausgabe definieren (Name der einzelnen Felder eingeben sowie Datentyp wählen).
3.2 Schnittstellen (IF)
TG_IF_01 Das Format für die Übergänge und die gezeichneten DFA können abgespeichert werden und ebenfalls wieder eingelesen werden.
TG_IF_02 Das Tool bietet eine Schnittstelle in Form einer strukturierteren Datei an, so dass weitere Tools den gezeichneten DFA verarbeiten können.
TG_IF_03 Das Tool erzeugt eine für fwtest geeignete Ausgabe von Test Cases.
3.3 Funktionalität (FUN)
TG_FUN_01 Die gezeichneten DFA können auf Anweisung des Benutzers automatisch angeordnet werden.
TG_FUN_02 Das Tool enthält einen Algorithmus zur Generierung von Test Cases und erzeugt auf Anweisung des Benutzers aus dem aktuellen DFA eine entsprechende Ausgabe.
C. Schedule
Following the schedule used for the work at this project.
D. CD contents
Following the description of the CD contents that also appears in the root directory of the CD.
****************************
* *
* TCGTOOL *
* automated test case generation *
* from finite state machines *
****************************
CD CONTENTS
on this CD-ROM the following content can be found:
/this file
/doc
The report of this semester thesis, the presentation slides used in an intern talk and additional documentation.
/doc/src
The tex source files for the documentation and the slides. Call ”make” in this directory to compile the documentation.
/src
The Java source files for TCGTool. Call ”make” in this directory to compile the tool.
/bin
A jar package of TCGTool, already compiled, just start it using your Java Runtime Environment.
/opt
Additional files and packages used for TCGTool.
/opt/jflap – the tool used as foundation for TCGTool
/opt/java – the Java 1.4.2 JRE for Windows and Linux/x86
/opt/latex – some additional packages and files necessary to compile the documentation.
E. Readme
Following the Readme coming with the tool.
*******************************************************************************
* TCGTOOL *
* automated test case generation *
* from finite state machines *
*******************************************************************************
This tool was developed as part of a semester thesis at
the Swiss Federal Institute of Technology.
The report of this thesis can be found at:
http://www.infsec.ethz.ch/people/dsenn/SA_StefanHildenbrand_05.pdf
The tool comes with an online help. All information can
be found in the report and the online help.
Building TCGTool
Compiling and running TCGTool requires Java 1.4. In order to
build TCGTool and the JAR, a Makefile has been provided.
To build TCGTool with gnumake, enter
make
from the command line. This will create the compiled '.class'
Java files, as well as the TCGTool.jar executable jar file.
The script used to create the TCGTool.jar executable requires
Python 2.2.
To run from the class files:
java TCGTool
To run as a JAR:
java -jar TCGTool.jar
Alternatively on OS X and on a correctly configured Windows
machine, you can double click the TCGTool.jar file and it
should just work.
References
|
{"Source-Url": "http://archiv.infsec.ethz.ch/education/projects/archive/Bericht_Stefan.pdf", "len_cl100k_base": 16032, "olmocr-version": "0.1.49", "pdf-total-pages": 60, "total-fallback-pages": 0, "total-input-tokens": 118333, "total-output-tokens": 20110, "length": "2e13", "weborganizer": {"__label__adult": 0.000362396240234375, "__label__art_design": 0.000644683837890625, "__label__crime_law": 0.00022482872009277344, "__label__education_jobs": 0.008209228515625, "__label__entertainment": 0.00011539459228515624, "__label__fashion_beauty": 0.00019681453704833984, "__label__finance_business": 0.00027179718017578125, "__label__food_dining": 0.00034809112548828125, "__label__games": 0.0008397102355957031, "__label__hardware": 0.0009126663208007812, "__label__health": 0.0003075599670410156, "__label__history": 0.0003833770751953125, "__label__home_hobbies": 0.0002007484436035156, "__label__industrial": 0.00038552284240722656, "__label__literature": 0.0006074905395507812, "__label__politics": 0.00023674964904785156, "__label__religion": 0.0005254745483398438, "__label__science_tech": 0.018310546875, "__label__social_life": 0.00023818016052246096, "__label__software": 0.00965118408203125, "__label__software_dev": 0.9560546875, "__label__sports_fitness": 0.00026488304138183594, "__label__transportation": 0.00043082237243652344, "__label__travel": 0.00020313262939453125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 73224, 0.03334]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 73224, 0.60073]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 73224, 0.7906]], "google_gemma-3-12b-it_contains_pii": [[0, 234, false], [234, 234, null], [234, 1056, null], [1056, 2150, null], [2150, 3741, null], [3741, 5214, null], [5214, 5398, null], [5398, 6972, null], [6972, 9898, null], [9898, 11094, null], [11094, 14347, null], [14347, 17406, null], [17406, 18894, null], [18894, 18894, null], [18894, 19088, null], [19088, 21534, null], [21534, 22331, null], [22331, 25184, null], [25184, 26055, null], [26055, 26176, null], [26176, 26643, null], [26643, 29410, null], [29410, 30800, null], [30800, 30836, null], [30836, 31500, null], [31500, 34098, null], [34098, 37093, null], [37093, 37339, null], [37339, 37487, null], [37487, 38741, null], [38741, 41686, null], [41686, 41847, null], [41847, 43047, null], [43047, 45887, null], [45887, 48753, null], [48753, 49610, null], [49610, 49642, null], [49642, 49695, null], [49695, 49746, null], [49746, 49746, null], [49746, 49892, null], [49892, 51666, null], [51666, 53881, null], [53881, 56695, null], [56695, 56844, null], [56844, 56844, null], [56844, 57904, null], [57904, 58921, null], [58921, 60640, null], [60640, 61939, null], [61939, 62075, null], [62075, 62075, null], [62075, 62996, null], [62996, 65565, null], [65565, 67889, null], [67889, 69758, null], [69758, 69829, null], [69829, 70939, null], [70939, 72156, null], [72156, 73224, null]], "google_gemma-3-12b-it_is_public_document": [[0, 234, true], [234, 234, null], [234, 1056, null], [1056, 2150, null], [2150, 3741, null], [3741, 5214, null], [5214, 5398, null], [5398, 6972, null], [6972, 9898, null], [9898, 11094, null], [11094, 14347, null], [14347, 17406, null], [17406, 18894, null], [18894, 18894, null], [18894, 19088, null], [19088, 21534, null], [21534, 22331, null], [22331, 25184, null], [25184, 26055, null], [26055, 26176, null], [26176, 26643, null], [26643, 29410, null], [29410, 30800, null], [30800, 30836, null], [30836, 31500, null], [31500, 34098, null], [34098, 37093, null], [37093, 37339, null], [37339, 37487, null], [37487, 38741, null], [38741, 41686, null], [41686, 41847, null], [41847, 43047, null], [43047, 45887, null], [45887, 48753, null], [48753, 49610, null], [49610, 49642, null], [49642, 49695, null], [49695, 49746, null], [49746, 49746, null], [49746, 49892, null], [49892, 51666, null], [51666, 53881, null], [53881, 56695, null], [56695, 56844, null], [56844, 56844, null], [56844, 57904, null], [57904, 58921, null], [58921, 60640, null], [60640, 61939, null], [61939, 62075, null], [62075, 62075, null], [62075, 62996, null], [62996, 65565, null], [65565, 67889, null], [67889, 69758, null], [69758, 69829, null], [69829, 70939, null], [70939, 72156, null], [72156, 73224, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 73224, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 73224, null]], "pdf_page_numbers": [[0, 234, 1], [234, 234, 2], [234, 1056, 3], [1056, 2150, 4], [2150, 3741, 5], [3741, 5214, 6], [5214, 5398, 7], [5398, 6972, 8], [6972, 9898, 9], [9898, 11094, 10], [11094, 14347, 11], [14347, 17406, 12], [17406, 18894, 13], [18894, 18894, 14], [18894, 19088, 15], [19088, 21534, 16], [21534, 22331, 17], [22331, 25184, 18], [25184, 26055, 19], [26055, 26176, 20], [26176, 26643, 21], [26643, 29410, 22], [29410, 30800, 23], [30800, 30836, 24], [30836, 31500, 25], [31500, 34098, 26], [34098, 37093, 27], [37093, 37339, 28], [37339, 37487, 29], [37487, 38741, 30], [38741, 41686, 31], [41686, 41847, 32], [41847, 43047, 33], [43047, 45887, 34], [45887, 48753, 35], [48753, 49610, 36], [49610, 49642, 37], [49642, 49695, 38], [49695, 49746, 39], [49746, 49746, 40], [49746, 49892, 41], [49892, 51666, 42], [51666, 53881, 43], [53881, 56695, 44], [56695, 56844, 45], [56844, 56844, 46], [56844, 57904, 47], [57904, 58921, 48], [58921, 60640, 49], [60640, 61939, 50], [61939, 62075, 51], [62075, 62075, 52], [62075, 62996, 53], [62996, 65565, 54], [65565, 67889, 55], [67889, 69758, 56], [69758, 69829, 57], [69829, 70939, 58], [70939, 72156, 59], [72156, 73224, 60]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 73224, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-27
|
2024-11-27
|
211b7a1243a270461f1ae215c0eaa4f7bf7e103d
|
7 Algorithms for Massive Data Problems
Massive Data, Sampling
This chapter deals with massive data problems where the input data (a graph, a matrix or some other object) is too large to be stored in random access memory. One model for such problems is the streaming model, where the data can be seen only once. In the streaming model, the natural technique to deal with the massive data is sampling. Sampling is done “on the fly”. As each piece of data is seen, based on a coin toss, one decides whether to include the data in the sample. Typically, the probability of including the data point in the sample may depend on its value. Models allowing multiple passes through the data are also useful; but the number of passes needs to be small. We always assume that random access memory (RAM) is limited, so the entire data cannot be stored in RAM.
To introduce the basic flavor of sampling on the fly, consider the following primitive. From a stream of \( n \) positive real numbers \( a_1, a_2, \ldots, a_n \), draw a sample element \( a_i \) so that the probability of picking an element is proportional to its value. It is easy to see that the following sampling method works. Upon seeing \( a_1, a_2, \ldots, a_i \), keep track of the sum \( a = a_1 + a_2 + \cdots + a_i \) and a sample \( a_j, j \leq i \), drawn with probability proportional to its value. On seeing \( a_{i+1} \), replace the current sample by \( a_{i+1} \) with probability \( \frac{a_{i+1}}{a+a_{i+1}} \) and update \( a \).
7.1 Frequency Moments of Data Streams
An important class of problems concerns the frequency moments of data streams. Here a data stream \( a_1, a_2, \ldots, a_n \) of length \( n \) consists of symbols \( a_i \) from an alphabet of \( m \) possible symbols which for convenience we denote as \( \{1, 2, \ldots, m\} \). Throughout this section, \( n, m \), and \( a_i \) will have these meanings and \( s \) (for symbol) will denote a generic element of \( \{1, 2, \ldots, m\} \). The frequency \( f_s \) of the symbol \( s \) is the number of occurrences of \( s \) in the stream. For a nonnegative integer \( p \), the \( p^{th} \) frequency moment of the stream is
\[
\sum_{s=1}^{m} f_s^p.
\]
Note that the \( p = 0 \) frequency moment corresponds to the number of distinct symbols occurring in the stream. The first frequency moment is just \( n \), the length of the string. The second frequency moment, \( \sum_s f_s^2 \), is useful in computing the variance of the stream.
\[
\frac{1}{m} \sum_{s=1}^{m} \left( f_s - \frac{n}{m} \right)^2 = \frac{1}{m} \sum_{s=1}^{m} \left( f_s^2 - 2 \frac{n}{m} f_s + \left( \frac{n}{m} \right)^2 \right) = \frac{1}{m} \sum_{s=1}^{m} f_s^2 - \frac{n^2}{m^2}
\]
In the limit as $p$ becomes large, \( \left( \sum_{s=1}^{m} f_{s}^{p} \right)^{1/p} \) is the frequency of the most frequent element(s).
We will describe sampling based algorithms to compute these quantities for streaming data shortly. But first a note on the motivation for these various problems. The identity and frequency of the most frequent item or more generally, items whose frequency exceeds a fraction of $n$, is clearly important in many applications. If the items are packets on a network with source and destination addresses, the high frequency items identify the heavy bandwidth users. If the data is purchase records in a supermarket, the high frequency items are the best-selling items. Determining the number of distinct symbols is the abstract version of determining such things as the number of accounts, web users, or credit card holders. The second moment and variance are useful in networking as well as in database and other applications. Large amounts of network log data are generated by routers that can record the source address, destination address, and the number of packets for all the messages passing through them. This massive data cannot be easily sorted or aggregated into totals for each source/destination. But it is important to know if some popular source-destination pairs have a lot of traffic for which the variance is the natural measure.
### 7.1.1 Number of Distinct Elements in a Data Stream
Consider a sequence $a_1, a_2, \ldots, a_n$ of $n$ elements, each $a_i$ an integer in the range 1 to $m$ where $n$ and $m$ are very large. Suppose we wish to determine the number of distinct $a_i$ in the sequence. Each $a_i$ might represent a credit card number extracted from a sequence of credit card transactions and we wish to determine how many distinct credit card accounts there are. The model is a data stream where symbols are seen one at a time. We first show that any deterministic algorithm that determines the number of distinct elements exactly must use at least $m$ bits of memory.
**Lower bound on memory for exact deterministic algorithm**
Suppose we have seen the first $k \geq m$ symbols. The set of distinct symbols seen so far could be any of the $2^m$ subsets of \( \{1, 2, \ldots, m\} \). Each subset must result in a different state for our algorithm and hence $m$ bits of memory are required. To see this, suppose first that two different size subsets of distinct symbols lead to the same internal state. Then our algorithm would produce the same count of distinct symbols for both inputs, clearly an error for one of the input sequences. If two sequences with the same number of distinct elements but different subsets lead to the same state, then on next seeing a symbol that appeared in one sequence but not the other would result in subsets of different size and thus require different states.
**Algorithm for the Number of distinct elements**
Let \( a_1, a_2, \ldots, a_n \) be a sequence of elements where each \( a_i \in \{1, 2, \ldots, m\} \). The number of distinct elements can be estimated with \( O(\log m) \) space. Let \( S \subseteq \{1, 2, \ldots, m\} \) be the set of elements that appear in the sequence. Suppose that the elements of \( S \) were selected uniformly at random from \( \{1, 2, \ldots, m\} \). Let \( \text{min} \) denote the minimum element of \( S \). Knowing the minimum element of \( S \) allows us to estimate the size of \( S \). The elements of \( S \) partition the set \( \{1, 2, \ldots, m\} \) into \(|S| + 1\) subsets each of size approximately \( \frac{m}{|S| + 1} \). See Figure 7.1. Thus, the minimum element of \( S \) should have value close to \( \frac{m}{|S| + 1} \). Solving \( \text{min} = \frac{m}{|S| + 1} \) yields \( |S| = \frac{m}{\text{min}} - 1 \). Since we can determine \( \text{min} \), this gives us an estimate of \(|S|\).
The above analysis required that the elements of \( S \) were picked uniformly at random from \( \{1, 2, \ldots, m\} \). This is generally not the case when we have a sequence \( a_1, a_2, \ldots, a_n \) of elements from \( \{1, 2, \ldots, m\} \). Clearly if the elements of \( S \) were obtained by selecting the \(|S| \) smallest elements of \( \{1, 2, \ldots, m\} \), the above technique would give the wrong answer. If the elements are not picked uniformly at random, can we estimate the number of distinct elements? The way to solve this problem is to use a hash function \( h \) where
\[
h : \{1, 2, \ldots, m\} \to \{0, 1, 2, \ldots, M - 1\}.
\]
To count the number of distinct elements in the input, count the number of elements in the mapped set \( \{h(a_1), h(a_2), \ldots\} \). The point being that \( \{h(a_1), h(a_2), \ldots\} \) behaves like a random subset so the above heuristic argument using the minimum to estimate the number of elements applies. If we needed \( h(a_1), h(a_2), \ldots \) to be completely independent, the space needed to store the hash function would too high. Fortunately, only 2-way independence is needed. We recall the formal definition of 2-way independence below. But first recall that a hash function is always chosen at random from a family of hash functions and phrases like “probability of collision” refer to the probability in the choice of hash function.
**Universal Hash Functions**
A set of hash functions
$$H = \{h \mid h : \{1, 2, \ldots, m\} \to \{0, 1, 2, \ldots, M - 1\}\}$$
is 2-universal if for all $x$ and $y$ in $\{1, 2, \ldots, m\}$, $x \neq y$, and for all $z$ and $w$ in $\{0, 1, 2, \ldots, M - 1\}$
$$\Pr(h(x) = z \text{ and } h(y) = w) = \frac{1}{M^2}$$
for a randomly chosen $h$. The concept of a 2-universal family of hash functions is that given $x$, $h(x)$ is equally likely to be any element of $\{0, 1, 2, \ldots, M - 1\}$ and for $x \neq y$, $h(x)$ and $h(y)$ are independent.
We now give an example of a 2-universal family of hash functions. For simplicity let $M$ be a prime. For each pair of integers $a$ and $b$ in the range $[0, M-1]$, define a hash function
$$h_{ab}(x) = ax + b \mod (M)$$
To store the hash function $h_{ab}$, store the two integers $a$ and $b$. This requires only $O(\log M)$ space. To see that the family is 2-universal note that $h(x) = z$ and $h(y) = w$ if and only if
$$\begin{pmatrix} x & 1 \\ y & 1 \end{pmatrix} \begin{pmatrix} a \\ b \end{pmatrix} = \begin{pmatrix} z \\ w \end{pmatrix} \mod (M)$$
If $x \neq y$, the matrix $\begin{pmatrix} x & 1 \\ y & 1 \end{pmatrix}$ is invertible modulo $M$ and there is only one solution for $a$ and $b$. Thus, for $a$ and $b$ chosen uniformly at random, the probability of the equation holding is exactly $\frac{1}{M^2}$.
### Analysis of distinct element counting algorithm
Let $b_1, b_2, \ldots, b_d$ be the distinct values that appear in the input. Then $S = \{h(b_1), h(b_2), \ldots, h(b_d)\}$ is a set of $d$ random and 2-way independent values from the set $\{0, 1, 2, \ldots, M - 1\}$. We now show that $\frac{M}{\min}$ is a good estimate for $d$, the number of distinct elements in the input, where $\min = \min(S)$.
**Lemma 7.1** Assume $M > 100d$. With probability at least $\frac{2}{3}$, $\frac{d}{6} \leq \frac{M}{\min} \leq 6d$, where $\min$ is the smallest element of $S$.
**Proof:** First, we show that $\Pr\left(\frac{M}{\min} > 6d\right) < \frac{1}{6}$.
$$\Pr\left(\frac{M}{\min} > 6d\right) = \Pr\left(\min < \frac{M}{6d}\right) = \Pr\left(\exists k, h(b_k) < \frac{M}{6d}\right)$$
$$\leq \left(\sum_{i=1}^{d} \Pr(h(b_i) \leq M/6d)\right) \leq d/(6d) = 1/6.$$
Finally, we show that \( \text{Prob}(M_{\text{min}} < \frac{d}{6}) < \frac{1}{6} \). \( \text{Prob}(M_{\text{min}} < \frac{d}{6}) = \text{Prob}(\min \frac{6M}{d}) \) = \( \text{Prob}(\forall k, h(b_k) > \frac{6M}{d}) \) For \( i = 1, 2, \ldots, d \) define the indicator variable \( y_i = \begin{cases} 0 & \text{if } h(b_i) > \frac{6M}{d} \\ 1 & \text{otherwise} \end{cases} \)
and let \( y = \sum_{i=1}^{d} y_i \). Now \( \text{Prob}(y_i = 1) \geq \frac{d}{6}, E(y_i) \geq \frac{d}{6}, \) and \( E(y) = 6 \). For 2-way independent random variables, the variance of their sum is the sum of their variances. So \( \text{Var}(y) = d \text{Var}(y_1) \). Further, it is easy to see since \( y_1 \) is 0 or 1 that \( \text{Var}(y_1) = E[(y_1 - E(y_1))^2] = E(y_1^2) - E^2(y_1) = E(y_1) - E^2(y_1) \leq E(y_1) \). Thus \( \text{Var}(y) \leq E(y) \).
Now by the Chebychev inequality,
\[
\text{Prob}\left(\frac{M_{\text{min}}}{d} < \frac{d}{6}\right) = \text{Prob}\left(\min \frac{6M}{d}\right) = \text{Prob}\left(\forall k, h(b_k) > \frac{6M}{d}\right) = \text{Prob}(y = 0) \leq \text{Prob}(|y - E(y)| \geq E(y)) \leq \frac{\text{Var}(y)}{E^2(y)} \leq \frac{1}{E(y)} \leq \frac{1}{6}
\]
Since \( M_{\text{min}} > 6d \) with probability at most \( \frac{1}{6} \) and \( M_{\text{min}} < \frac{d}{6} \) with probability at most \( \frac{1}{6} \), \( \frac{d}{6} \leq M_{\text{min}} \leq 6d \) with probability at least \( \frac{2}{3} \).
### 7.1.2 Counting the Number of Occurrences of a Given Element.
To count the number of occurrences of an element in a stream requires at most \( \log n \) space where \( n \) is the length of the stream. Clearly, for any length stream that occurs in practice, we can afford \( \log n \) space. For this reason, the following material may never be used in practice, but the technique is interesting and may give insight into how to solve some other problem.
Consider a string of 0’s and 1’s of length \( n \) in which we wish to count the number of occurrences of 1’s. Clearly if we had \( \log n \) bits of memory we could keep track of the exact number of 1’s. However, we can approximate the number with only \( \log \log n \) bits.
Let \( m \) be the number of 1’s that occur in the sequence. Keep a value \( k \) such that \( 2^k \) is approximately the number of occurrences \( m \). Storing \( k \) requires only \( \log \log n \) bits of memory. The algorithm works as follows. Start with \( k=0 \). For each occurrence of a 1, add one to \( k \) with probability \( 1/2^k \). At the end of the string, the quantity \( 2^k - 1 \) is the estimate of \( m \). To obtain a coin that comes down heads with probability \( 1/2^k \), flip a fair coin, one that comes down heads with probability \( 1/2 \), \( k \) times and report heads if the fair coin comes down heads in all \( k \) flips.
Given \( k \), on average it will take \( 2^k \) ones before \( k \) is incremented. Thus, the expected number of 1’s to produce the current value of \( k \) is \( 1 + 2 + 4 + \cdots + 2^{k-1} = 2^k - 1 \).
7.1.3 Counting Frequent Elements
The Majority and Frequent Algorithms
First consider the very simple problem of \( n \) people voting. There are \( m \) candidates, \( \{1, 2, \ldots, m\} \). We want to determine if one candidate gets a majority vote and if so who. Formally, we are given a stream of integers \( a_1, a_2, \ldots, a_n \), each \( a_i \) belonging to \( \{1, 2, \ldots, m\} \), and want to determine whether there is some \( s \in \{1, 2, \ldots, m\} \) which occurs more than \( n/2 \) times and if so which \( s \). It is easy to see that to solve the problem exactly on read only once streaming data with a deterministic algorithm, requires \( \Omega(n) \) space. Suppose \( n \) is even and the first \( n/2 \) items are all distinct and the last \( n/2 \) items are identical. After reading the first \( n/2 \) items, we need to remember exactly which elements of \( \{1, 2, \ldots, m\} \) have occurred. If for two different sets of elements occurring in the first half of the stream, the contents of the memory are the same, then a mistake would occur if the second half of the stream consists solely of an element that is in one set, but not in the other. Thus, \( \log_2 \left( \binom{m}{n/2} \right) \) bits of memory, which if \( m > n \) is \( \Omega(n) \), are needed.
The following is a simple low-space algorithm that always finds the majority vote if there is one. If there is no majority vote, the output may be arbitrary. That is, there may be “false positives”, but no “false negatives”.
Majority Algorithm
Store \( a_1 \) and initialized a counter to one. For each subsequent \( a_i \), if \( a_i \) is the same as the currently stored item, increment the counter by one. If it differs, decrement the counter by one provided the counter is nonzero. If the counter is zero, then store \( a_i \) and set the counter to one.
To analyze the algorithm, it is convenient to view the decrement counter step as “eliminating” two items, the new one and the one that caused the last increment in the counter. It is easy to see that if there is a majority element \( s \), it must be stored at the end. If not, each occurrence of \( s \) was eliminated; but each such elimination also causes another item to be eliminated and so for a majority item not to be stored at the end, we must have eliminated more than \( n \) items, a contradiction.
Next we modify the above algorithm so that not just the majority, but also items with frequency above some threshold are detected. We will also ensure that there are no false positives as well as no false negatives. Indeed the algorithm below will find the frequency (number of occurrences) of each element of \( \{1, 2, \ldots, m\} \) to within an additive term of \( \frac{n}{k+1} \) using \( O(k \log n) \) space by keeping \( k \) counters instead of just one counter.
Algorithm Frequent
Maintain a list of items being counted. Initially the list is empty. For each item, if it is the same as some item on the list, increment its counter by one. If it differs from all the items on the list, then if there are less than \( k \) items on the list, add the item to the list with its counter set to one. If there are already \( k \) items on the list decrement each of the current counters by one. Delete an element from the list if its count becomes zero.
**Theorem 7.2** At the end of Algorithm Frequent, for each \( s \in \{1, 2, \ldots, m\} \), its counter on the list is at least the number of occurrences of \( s \) in the stream minus \( n/(k+1) \). In particular, if some \( s \) does not occur on the list, its counter is zero and the theorem asserts that it occurs fewer than \( n/(k+1) \) times in the stream.
**Proof:** View each decrement counter step as eliminating some items. An item is eliminated if it is the current \( a_i \) being read and there are already \( k \) symbols different from it on the list in which case it and \( k \) other items are simultaneously eliminated. Thus, the elimination of each occurrence of an \( s \in \{1, 2, \ldots, m\} \) is really the elimination of \( k + 1 \) items. Thus, no more than \( n/(k+1) \) occurrences of any symbol can be eliminated. Now, it is clear that if an item is not eliminated, then it must still be on the list at the end. This proves the theorem.
Theorem ?? implies that we can compute the true relative frequency, the number of occurrences divided by \( n \), of every \( s \in \{1, 2, \ldots, m\} \) to within an additive term of \( \frac{n}{k+1} \).
### 7.1.4 The Second Moment
This section focuses on computing the second moment of a stream with symbols from \( \{1, 2, \ldots, m\} \). Let \( f_s \) denote the number of occurrences of symbol \( s \) in the stream. The second moment of the stream is given by \( \sum_{s=1}^{m} f_s^2 \). To calculate the second moment, for each symbol \( s, 1 \leq s \leq m \), independently set a random variable \( x_s \) to \( \pm 1 \) with probability 1/2. Maintain a sum by adding \( x_s \) to the sum each time the symbol \( s \) occurs in the stream. At the end of the stream, the sum will equal \( \sum_{s=1}^{m} x_s f_s \). The expected value of the sum will be zero where the expectation is over the choice of the \( \pm 1 \) value for the \( x_s \).
\[
E\left( \sum_{s=1}^{m} x_s f_s \right) = 0.
\]
Although the expected value of the sum is zero, its actual value is a random variable and the expected value of the square of the sum is given by
\[
E\left( \sum_{s=1}^{m} x_s f_s \right)^2 = E\left( \sum_{s=1}^{m} x_s^2 f_s^2 \right) + 2E\left( \sum_{s \neq t} x_s x_t f_s f_t \right) = \sum_{s=1}^{m} f_s^2,
\]
The last equality follows since $E(x_s x_t) = E(x_s)E(x_t) = 0$ for $s \neq t$. Thus
$$a = \left( \sum_{s=1}^{m} x_s f_s \right)^2$$
is an estimator of $\sum_{s=1}^{m} f_s^2$. One difficulty, which we will come back to, is that to store all the $x_i$ requires space $m$ and we want to do the calculation in $\log m$ space.
How good this estimator is depends on its variance.
$$\text{Var}(a) \leq E \left( \sum_{s=1}^{m} x_s f_s \right)^4 = E \left( \sum_{1 \leq s,t,u,v \leq m} x_s x_t x_u x_v f_s f_t f_u f_v \right)$$
The first inequality is because the variance is at most the second moment and the second equality is by expansion. In the second sum, since the $x_s$ are independent, if any one of $s$, $u$, $t$, or $v$ is distinct from the others, then the expectation of the whole term is zero. Thus, we need to deal only with terms of the form $x_s^2 x_t^2$ for $t \neq s$ and terms of the form $x_s^4$. Note that this does not need the full power of mutual independence of all the $x_s$, it only needs 4-way independence, that any four of the $x_s$’s are mutually independent. I.e., for any distinct $s$, $t$, $u$, and $v$ in $\{1, 2, \ldots, m\}$ and any $a$, $b$, $c$, and $d$ in $\{-1, +1\}$
$$\Pr(x_s = a, \ x_t = b, \ x_u = c, \ x_v = d) = \frac{1}{16}. \]
Each term in the above sum has four indices, $s, t, u, v$, and there are $\binom{4}{2}$ ways of choosing two indices that have the same $x$ value. Thus,
$$\text{Var}(a) \leq \binom{4}{2} E \left( \sum_{s=1}^{m} \sum_{t=s+1}^{m} x_s^2 x_t^2 f_s^2 f_t^2 \right) + E \left( \sum_{s=1}^{m} x_s^4 f_s^4 \right)$$
$$= 6 \sum_{s=1}^{m} \sum_{t=s+1}^{m} f_s^2 f_t^2 + \sum_{s=1}^{m} f_s^4$$
$$\leq 3 \left( \sum_{s=1}^{m} f_s^2 \right)^2.$$
The only drawback with the algorithm we have described so far is that we need to store the vector $x$ in memory so that we can do the running sums. This is too space-expensive. We need to do the problem in space dependent upon the logarithm of the size of the alphabet $m$, not $m$ itself.
In the next section, we will see that the computation can be done in $O(\log m)$ space by using a pseudo-random vector $x$ instead of a truly random one. This pseudo-randomness and limited independence has deep connections, so we will go into the connections as well.
**Error-Correcting codes, polynomial interpolation and limited-way independence**
Consider the problem of generating a random $m$-vector $x$ of $\pm 1$’s so that any subset of four coordinates is mutually independent. An $m$-dimensional vector may be generated from a truly random “seed” of only $O(\log m)$ mutually independent bits. Thus, we need only store the $\log m$ bits and can generate any of the $m$ coordinates when needed. Thus the 4-way independent random $m$-vector can be stored using only $\log m$ bits. The first fact needed for this is that for any $k$, there is a finite field $F$ with exactly $2^k$ elements, each of which can be represented with $k$ bits and arithmetic operations in the field can be carried out in $O(k^2)$ time. Here, $k$ will be the ceiling of $\log_2 m$. We also assume another basic fact about polynomial interpolation; a polynomial of degree at most three is uniquely determined by its value over any field $F$ at four points. More precisely, for any four distinct points $a_1, a_2, a_3, a_4 \in F$ and any four possibly not distinct values $b_1, b_2, b_3, b_4 \in F$, there is a unique polynomial $f(x) = f_0 + f_1 x + f_2 x^2 + f_3 x^3$ of degree at most three, so that with computations done over $F$, $f(a_1) = b_1, f(a_2) = b_2, f(a_3) = b_3$, and $f(a_4) = b_4$.
The definition of the pseudo-random $\pm 1$ vector $x$ with 4-way independence is simple. Choose four elements $f_0, f_1, f_2, f_3$ at random from $F$ and form the polynomial $f(s) = f_0 + f_1 s + f_2 s^2 + f_3 s^3$. This polynomial represents $x$ as follows. For $s = 1, 2, \ldots, m$, $x_s$ is the leading bit of the $k$-bit representation of $f(s)$. Thus, the $m$-dimensional vector $x$ requires only $O(k)$ bits where $k = \lceil \log m \rceil$.
**Lemma 7.3** The $x$ defined above has 4-way independence.
**Proof:** Assume that the elements of $F$ are represented in binary using $\pm 1$ instead of the traditional 0 and 1. Let $s, t, u,$ and $v$ be any four coordinates of $x$ and let $\alpha, \beta, \gamma, \delta \in \{-1, 1\}$. There are exactly $2^{k-1}$ elements of $F$ whose leading bit is $\alpha$ and similarly for $\beta, \gamma,$ and $\delta$. So, there are exactly $2^4(\log 4) = 4$-tuples of elements $b_1, b_2, b_3, b_4 \in F$ so that the leading bit of $b_1$ is $\alpha$, the leading bit of $b_2$ is $\beta$, the leading bit of $b_3$ is $\gamma$, and the leading bit of $b_4$ is $\delta$. For each such $b_1, b_2, b_3, b_4$, there is precisely one polynomial $f$ so that $f(s) = b_1, f(t) = b_2, f(u) = b_3$, and $f(v) = b_4$. The probability that $x_s = \alpha, x_t = \beta, x_u = \gamma,$ and $x_v = \delta$ is precisely
$$\frac{2^4}{\text{total number of } f} = \frac{2^{4(k-1)}}{2^k} = \frac{1}{16},$$
as asserted.
\[\_\_]
The variance can be reduced by a factor of \( r \) by taking the average of \( r \) independent trials. With \( r \) independent trials the variance would be at most \( \frac{1}{r} E^2(a) \), so to achieve relative error \( \varepsilon \) in the estimate of \( \sum_{s=1}^{m} f_s^2 \), \( O(1/\varepsilon^2) \) independent trials suffice. If \( \varepsilon \) is \( \Omega(1) \), then \( r \) is in \( O(1) \), so it is not the number of trials \( r \) which is the problem. It is the \( m \).
Lemma ?? describes how to get one vector \( x \) with 4-way independence. However, we need \( r = O(1/\varepsilon^2) \) vectors. Also the vectors must be mutually independent. But this is easy, just choose \( r \) independent polynomials at the outset.
To implement the algorithm with low space, store only the polynomials in memory. This requires \( 4k = O(\log m) \) bits per polynomial for a total of \( O(\log m/\varepsilon^2) \) bits. When a symbol \( s \) in the stream is read, compute each polynomial at \( s \) to obtain the value for the corresponding value of the \( x_s \) and update the running sums. \( x_s \) is just the leading bit of the polynomial evaluated at \( s \); this calculation is in \( O(\log m) \) time. Thus, we repeatedly compute the \( x_s \) from the “seeds”, namely the coefficients of the polynomials.
This idea of polynomial interpolation is also used in other contexts. Error-correcting codes is an important example. Say we wish to transmit \( n \) bits over a channel which may introduce noise. One can introduce redundancy into the transmission so that some channel errors can be corrected. A simple way to do this is to view the \( n \) bits to be transmitted as coefficients of a polynomial \( f(x) \) of degree \( n - 1 \). Now transmit \( f \) evaluated at points 1, 2, 3, \ldots, \( n + m \). At the receiving end, any \( n \) correct values will suffice to reconstruct the polynomial and the true message. So up to \( m \) errors can be tolerated. But even if the number of errors is at most \( m \), it is not a simple matter to know which values are corrupted. We do not elaborate on this here.
### 7.2 Matrix Algorithms using sampling
How does one deal with a large matrix? An obvious suggestion is to take a sample of the matrix. Uniform sampling does not work in general. For example, if only a small fraction of the matrix entries are large, uniform sampling may miss them. So the sampling probabilities need to take into account the magnitude of the entries. It turns out that sampling the rows/columns of a matrix with probabilities proportional to the length squared of the row/column is a good idea in many contexts. We present two examples here; matrix multiplication and the sketch of a matrix.
#### 7.2.1 Matrix Multiplication Using Sampling
Suppose \( A \) is an \( m \times n \) matrix and \( B \) is an \( n \times p \) matrix and the product \( AB \) is desired. We show how to use sampling to get an approximate product faster than the traditional
multiplication. Let $A(:, k)$ denote the $k^{th}$ column of $A$. $A(:, k)$ is a $m \times 1$ matrix. Let $B(k, :)$ be the $k^{th}$ row of $B$. $B(k, :)$ is a $1 \times n$ matrix. It is easy to see that
$$AB = \sum_{k=1}^{n} A(:, k)B(k, :).$$
Note that for each value of $k$, $A(:, k)B(k, :)$ is an $m \times p$ matrix each element of which is a single product of elements of $A$ and $B$. An obvious use of sampling suggests itself. Sample some values for $k$ and compute $A(:, k)B(k, :)$ for the sampled $k$'s and use their suitably scaled sum as the estimate of $AB$. It turns out that nonuniform sampling probabilities are useful. Define a random variable $z$ that takes on values in $\{1, 2, \ldots, n\}$. Let $p_k$ denote the probability that $z$ assumes the value $k$. The $p_k$ are nonnegative and sum to one. Define an associated random matrix variable that has value
$$X = \frac{1}{p_k} A(:, k) B(k, :)$$
with probability $p_k$. Let $E(X)$ denote the entry-wise expectation.
$$E(X) = \sum_{k=1}^{n} \text{Prob}(z = k) \frac{1}{p_k} A(:, k) B(k, :) = \sum_{k=1}^{n} A(:, k)B(k, :) = AB.$$
This explains the scaling by $\frac{1}{p_k}$ in $X$.
Define the variance of $X$ as the sum of the variances of all its entries.
$$\text{Var}(X) = \sum_{i=1}^{m} \sum_{j=1}^{p} \text{Var}(x_{ij}) \leq \sum_{ij} E(x_{ij}^2) \leq \sum_{ij} \sum_{k} p_k \frac{1}{p_k} a_{ik}^2 b_{kj}^2.$$
We can simplify the last term by exchanging the order of summations to get
$$\text{Var}(X) \leq \sum_{k} \frac{1}{p_k} \sum_{i} a_{ik}^2 \sum_{j} b_{kj}^2 = \sum_{k} \frac{1}{p_k} |A(:, k)|^2 |B(k, :)|^2.$$
What is the best choice of $p_k$? It is the one which minimizes the variance. In the above calculation, we discarded $\sum_{ij} E^2(x_{ij})$, but this term is just $\sum_{ij} (AB)_{ij}^2$ since $E(X) = AB$ and is independent of $p_k$. So we should choose $p_k$ to minimize $\sum_{k} \frac{1}{p_k} |A(:, k)|^2 |B(k, :)|^2$. It can be seen by calculus that the minimizing $p_k$ are proportional to $|A(:, k)||B(k, :)|$. In the important special case when $B = A^T$, pick columns of $A$ with probabilities proportional to the squared length of the columns.
Such sampling has been widely used and it goes under the name of “length squared sampling”. We will use it here. Even in the general case when $B$ is not $A^T$, it simplifies the bounds. If $p_k$ is proportional to $|A(:, k)|^2$, i.e, $p_k = \frac{|A(:, k)|^2}{||A||_F}$, then
Figure 7.2: Approximate Matrix Multiplication using sampling
\[
\begin{bmatrix}
A \\
m \times n
\end{bmatrix}
\begin{bmatrix}
B \\
n \times p
\end{bmatrix} \approx
\begin{bmatrix}
\text{Corresponding scaled rows of } B \\
s \times p
\end{bmatrix}
\begin{bmatrix}
\text{Sampled columns of } A \\
m \times s
\end{bmatrix}
\]
\[
\text{Var}(X) \leq ||A||_F^2 \sum_k |B(k,:)|^2 = ||A||_F^2 ||B||_F^2.
\]
To reduce the variance, do \( s \) independent trials. Each trial \( i, i = 1, 2, \ldots, s \) yields a matrix \( X_i \) as in (??). We take \( \frac{1}{s} \sum_{i=1}^s X_i \) as our estimate of \( AB \). Since the variance of a sum of independent random variables is the sum of variances, the variance of \( \frac{1}{s} \sum_{i=1}^s X_i \) is \( \frac{1}{s} \text{Var}(X) \) and so is at most \( \frac{1}{s} ||A||_F^2 ||B||_F^2 \).
To implement this, suppose \( k_1, k_2, \ldots, k_s \) are the \( k \)'s chosen in each trial. It is easy to see that
\[
\frac{1}{s} \sum_{i=1}^s X_i = \frac{1}{s} \left( \frac{A(:,k_1) B(k_1,:)}{p_{k_1}} + \frac{A(:,k_2) B(k_2,:)}{p_{k_2}} + \cdots + \frac{A(:,k_s) B(k_s,:)}{p_{k_s}} \right) = C \hat{B},
\]
where, \( C \) is the \( m \times s \) matrix of the chosen columns of \( A \) and \( \hat{B} \) is an \( s \times p \) matrix with the corresponding rows of \( B \) scaled, namely, \( \hat{B} \) has rows \( B(k_1,:)/(sp_{k_1}), B(k_2,:)/(sp_{k_2}), \ldots B(k_s,:)/(sp_{k_s}) \). This is represented in Figure ??.
We summarize our discussion in Lemma ??.
**Lemma 7.4** Suppose \( A \) is an \( m \times n \) matrix and \( B \) is an \( n \times p \) matrix. The product \( AB \) can be estimated by \( C \hat{B} \), where, \( C \) is an \( m \times s \) matrix consisting of \( s \) columns of \( A \) picked according to length-squared distribution and \( \hat{B} \) is the \( s \times p \) matrix consisting of the corresponding rows of \( B \) scaled as above. The error is bounded by:
\[
E \left( ||AB - C \hat{B}||_F^2 \right) \leq \frac{||A||_F^2 ||B||_F^2}{s}.
\]
### 7.2.2 Sketch of a Large Matrix
The main result of this section will be that for any matrix, a sample of columns and rows, each picked according to length squared distribution is a sufficient sketch of the matrix. Let \( A \) be an \( m \times n \) matrix. Pick \( s \) columns of \( A \) according to length squared distribution. Let \( C \) be the \( m \times s \) matrix containing the picked columns. Similarly, pick \( r \)
A according to length squared distribution on the rows of $A$. Let $R$ be the $r \times n$ matrix of the picked rows. From $C$ and $R$, we can find a matrix $U$ so that $A \approx CUR$. The schematic diagram is given in Figure ??.
The proof makes crucial use of the fact that the sampling of rows and columns is with probability proportional to the squared length. One may recall that the top $k$ singular vectors of the SVD of $A$, give a similar picture; but the SVD takes more time to compute, requires all of $A$ to be stored in RAM, and does not have the property that the rows and columns are directly from $A$. The last property - that the approximation involves actual rows/columns of the matrix rather than linear combinations - is called an interpolative approximation and is useful in many contexts. However, the SVD does yield the best 2-norm approximation. Error bounds for the approximation $CUR$ are weaker.
We briefly touch upon two motivations for such a sketch. Suppose $A$ is the document-term matrix of a large collection of documents. We are to “read” the collection at the outset and store a sketch so that later, when a query represented by a vector with one entry per term arrives, we can find its similarity to each document in the collection. Similarity is defined by the dot product. In Figure ?? it is clear that the matrix-vector product of a query with the right hand side can be done in time $O(ns + sr + rm)$ which would be linear in $n$ and $m$ if $s$ and $r$ are $O(1)$. To bound errors for this process, we need to show that the difference between $A$ and the sketch of $A$ has small 2-norm. Recall that the 2-norm $||A||_2$ of a matrix $A$ is $\max_{||x|| = 1} |Ax|$. The fact that the sketch is an interpolative approximation means that our approximation essentially consists a subset of documents and a subset of terms, which may be thought of as a representative set of documents and terms.
A second motivation comes from recommendation systems. Here $A$ would be a customer-product matrix whose $(i, j)^{th}$ entry is the preference of customer $i$ for product $j$. The objective is to collect a few sample entries of $A$ and based on them, get an approximation to $A$ so that we can make future recommendations. A few sampled rows of $A$ (all preferences of a few customers) and a few sampled columns (all customers’ preferences for a few products) give a good approximation to $A$ provided that the samples are drawn according to the length-squared distribution.
It remains now to describe how to find $U$ from $C$ and $R$. We describe this assuming $RR^T$ is invertible. Through the rest of this section, we make the assumption that $RR^T$ is invertible. This case will convey the essential ideas. Also, note that since $r$ in general will be much smaller than $n$ and $m$, unless the matrix $A$ is degenerate, it is likely that the $r$ rows in the sample $R$ will be linearly independent giving us invertibility of $RR^T$.
We begin with some intuition. Write $A$ as $AI$, where, $I$ is the $n \times n$ identity matrix. Pretend for the moment that we approximate the product $AI$ by sampling $s$ columns of $A$ according to length-squared. Then, as in the last section, write $AI \approx CW$, where, $W$ consists of a scaled version of the $s$ rows of $I$ corresponding to the $s$ columns of $A$ that
Figure 7.3: Schematic diagram of the approximation of $A$ by a sample of $s$ columns and $r$ rows.
were picked. Lemma ?? bounds the error $||A - CW||_F^2$ by $||A||_F^2 ||I||_F^2 / s = ||A||_F^2 s^2 / n$. But clearly, we would like the error to be a fraction of $||A||_F^2$ which would require $s \geq n$, which clearly is of no use since this would pick as many or more columns than the whole of $A$.
We modify the intuition. We assumed that $RR^T$ is invertible. Then it is easy to see, Lemma ??, that $R^T(RR^T)^{-1} R$ (we denote $R^T(RR^T)^{-1} R$ by $P$ for convenience) acts as the identity matrix on the space $V$ spanned by the rows of $R$. Let’s use this identity-like matrix $P$ instead of $I$ in the above discussion. Using the fact that $R$ is picked according to length squared we will show the following proposition later.
**Proposition 7.5** $A \approx AP$ and the error $E(||A - AP||_F^2)$ is at most $||A||_F^2 / r$.
We then use Lemma ?? to argue that instead of doing the multiplication $AP$, we can use the sampled columns of $A$ and the corresponding rows of $P$. The sampled $s$ columns of $A$ form $C$. We have to take the corresponding $s$ rows of $P = R^T(RR^T)^{-1} R$, which is the same as taking the corresponding $s$ rows of $R^T$, and multiplying this by $(RR^T)^{-1} R$. It is easy to check that this leads to an expression of the form $CUR$. Further, by Lemma ??, the error is bounded by
$$E(||AP - CUR||_F^2) \leq E(||AP - CUR||_F^2) \leq \frac{||A||_F^2 ||P||_F^2}{s} \leq \frac{r}{s} ||A||_F^2,$$
(7.2)
since we will show later that:
**Proposition 7.6** $||P||_F^2 \leq r$.
Putting (??) and Proposition ?? together (and using $||A - CUR||_2 \leq ||A - AP||_2 + ||AP - CUR||_2$ which implies that $||A - CUR||_2 \leq 2||A - AP||_2 + 2||AP - CUR||_2$), we get the main result:
**Theorem 7.7** Suppose $A$ is any $m \times n$ matrix and $r$ and $s$ are positive integers. Suppose $C$ is a $m \times s$ matrix of $s$ columns of $A$ picked according to length squared sampling and
similarly $R$ is a matrix of $r$ rows of $A$ picked according to length squared sampling. Then, we can find from $C, R$ an $s \times r$ matrix $U$ so that
$$E\left(\|A - CUR\|_2^2\right) \leq \|A\|_F^2 \left(\frac{2}{r} + \frac{2r}{s}\right).$$
Choosing $s = r^2$, the bound becomes $O(1/r)\|A\|_F^2$ and if want the bound to be at most $\varepsilon\|A\|_F^2$ for some small $\varepsilon > 0$, it suffices to choose $r \in \Omega(1/\varepsilon)$.
We now briefly look at the time needed to compute $U$. The only involved step in computing $U$ is to find $(RR^T)^{-1}$. But note that $RR^T$ is an $s \times s$ matrix and since $s$ is to much smaller than $n, m$, this is fast. Now we prove all the claims used in the discussion above.
**Lemma 7.8** If $RR^T$ is invertible, then $R^T(RR^T)^{-1}R$ acts as the identity matrix on the row space of $R$. I.e., for every vector $x$ of the form $x = R^Ty$ (this defines the row space of $R$), we have $R^T(RR^T)^{-1}Rx = x$.
**Proof**: For $x = R^Ty$, since $RR^T$ is invertible
$$R^T( (RR^T)^{-1}) R x = R^T( (RR^T)^{-1} R R^T ) y = R^T y = x.$$
Now we prove Proposition (??). First suppose $x \in V$. Then we can write $x = R^Ty$ and so $Px = R^T( (RR^T)^{-1} R R^T ) y = R^T y = x$, so for $x \in V$, we have $(A - AP)x = 0$. So, it suffices to consider $x \in V^\perp$. For such $x$, we have $(A - AP)x = Ax$ and we have
$$|(A - AP)x|^2 = |Ax|^2 = x^T A^T A x = x^T (A^T A - R^T R) x \leq ||A^T A - R^T R||_2 |x|^2,$$
so we get $||A - AP||_2^2 \leq ||A^T A - R^T R||_2$, so it suffices to prove that $||A^T A - R^T R||_2 \leq ||A||_2^2 / r$ which follows directly from Lemma (??) since we can think of $R^T R$ as a way of estimating $A^T A$ by picking (according to length-squared distribution) columns of $A^T$, i.e., rows of $A$. This proves Proposition (??).
Proposition (??) is easy to see: Since by Lemma (??), $P$ is the identity on the space $V$ spanned by the rows of $R$, we have that $||P||_2^2$ is the sum of its singular values squared which is at most $r$ as claimed.
### 7.3 Sketches of Documents
Suppose one wished to store all the web pages from the WWW. Since there are billions of web pages, one might store just a sketch of each page where a sketch is a few hundred bits that capture sufficient information to do whatever task one had in mind. A web page or a document is a sequence. We begin this section by showing how to sample a set and then how to convert the problem of sampling a sequence into a problem of sampling a set.
Consider subsets of size 1000 of the integers from 1 to $10^6$. Suppose one wished to compute the resemblance of two subsets $A$ and $B$ by the formula
$$\text{resemblance} (A, B) = \frac{|A \cap B|}{|A \cup B|}$$
Suppose that instead of using the sets $A$ and $B$, one sampled the sets and compared random subsets of size ten. How accurate would the estimate be? One way to sample would be to select ten elements uniformly at random from $A$ and $B$. However, this method is unlikely to produce overlapping samples. Another way would be to select the ten smallest elements from each of $A$ and $B$. If the sets $A$ and $B$ overlapped significantly one might expect the sets of ten smallest elements from each of $A$ and $B$ to also overlap. One difficulty that might arise is that the small integers might be used for some special purpose and appear in essentially all sets and thus distort the results. To overcome this potential problem, rename all elements using a random permutation.
Suppose two subsets of size 1000 overlapped by 900 elements. What would the overlap be of the 10 smallest elements from each subset? One would expect the nine smallest elements from the 900 common elements to be in each of the two subsets for an overlap of 90%. The resemblance($A, B$) for the size ten sample would be $9/11=0.81$.
Another method would be to select the elements equal to zero mod $m$ for some integer $m$. If one samples mod $m$ the size of the sample becomes a function of $n$. Sampling mod $m$ allows us to also handle containment.
In another version of the problem one has a sequence rather than a set. Here one converts the sequence into a set by replacing the sequence by the set of all short subsequences of some length $k$. Corresponding to each sequence is a set of length $k$ subsequences. If $k$ is sufficiently large, then two sequences are highly unlikely to give rise to the same set of subsequences. Thus, we have converted the problem of sampling a sequence to that of sampling a set. Instead of storing all the subsequences, we need only store a small subset of the set of length $k$ subsequences.
Suppose you wish to be able to determine if two web pages are minor modifications of one another or to determine if one is a fragment of the other. Extract the sequence of words occurring on the page. Then define the set of subsequences of $k$ consecutive words from the sequence. Let $S(D)$ be the set of all subsequences of length $k$ occurring in document $D$. Define resemblance of $A$ and $B$ by
$$\text{resemblance} (A, B) = \frac{|S(A) \cap S(B)|}{|S(A) \cup S(B)|}$$
And define containment as
$$\text{containment} (A, B) = \frac{|S(A) \cap S(B)|}{|S(A)|}$$
Let $W$ be a set of subsequences. Define min($W$) to be the $s$ smallest elements in $W$ and define mod($W$) as the set of elements of $w$ that are zero mod $m$.
16
Let $\pi$ be a random permutation of all length $k$ subsequences. Define $F(A)$ to be the $s$ smallest elements of $A$ and $V(A)$ to be the set mod $m$ in the ordering defined by the permutation.
Then
$$\frac{F(A) \cap F(B)}{F(A) \cup F(B)}$$
and
$$\frac{|V(A) \cap V(B)|}{|V(A) \cup V(B)|}$$
are unbiased estimates of the resemblance of $A$ and $B$. The value
$$\frac{|V(A) \cap V(B)|}{|V(A)|}$$
is an unbiased estimate of the containment of $A$ in $B$.
|
{"Source-Url": "http://www.cs.cmu.edu/afs/cs/usr/avrim/www/598/chap7only.pdf", "len_cl100k_base": 13220, "olmocr-version": "0.1.53", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 70832, "total-output-tokens": 14325, "length": "2e13", "weborganizer": {"__label__adult": 0.00033211708068847656, "__label__art_design": 0.000423431396484375, "__label__crime_law": 0.0004930496215820312, "__label__education_jobs": 0.001277923583984375, "__label__entertainment": 0.00010383129119873048, "__label__fashion_beauty": 0.00015747547149658203, "__label__finance_business": 0.0004296302795410156, "__label__food_dining": 0.0004646778106689453, "__label__games": 0.0009560585021972656, "__label__hardware": 0.0034503936767578125, "__label__health": 0.0006151199340820312, "__label__history": 0.00041747093200683594, "__label__home_hobbies": 0.00022125244140625, "__label__industrial": 0.0007781982421875, "__label__literature": 0.00044465065002441406, "__label__politics": 0.0002808570861816406, "__label__religion": 0.0005693435668945312, "__label__science_tech": 0.2152099609375, "__label__social_life": 0.00010341405868530272, "__label__software": 0.02117919921875, "__label__software_dev": 0.7509765625, "__label__sports_fitness": 0.0002734661102294922, "__label__transportation": 0.0005688667297363281, "__label__travel": 0.0002267360687255859}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 43054, 0.02424]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 43054, 0.776]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 43054, 0.867]], "google_gemma-3-12b-it_contains_pii": [[0, 2709, false], [2709, 5626, null], [5626, 8003, null], [8003, 10206, null], [10206, 13246, null], [13246, 16115, null], [16115, 18871, null], [18871, 20874, null], [20874, 23913, null], [23913, 26926, null], [26926, 29363, null], [29363, 31852, null], [31852, 35202, null], [35202, 37223, null], [37223, 39730, null], [39730, 42592, null], [42592, 43054, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2709, true], [2709, 5626, null], [5626, 8003, null], [8003, 10206, null], [10206, 13246, null], [13246, 16115, null], [16115, 18871, null], [18871, 20874, null], [20874, 23913, null], [23913, 26926, null], [26926, 29363, null], [29363, 31852, null], [31852, 35202, null], [35202, 37223, null], [37223, 39730, null], [39730, 42592, null], [42592, 43054, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 43054, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 43054, null]], "pdf_page_numbers": [[0, 2709, 1], [2709, 5626, 2], [5626, 8003, 3], [8003, 10206, 4], [10206, 13246, 5], [13246, 16115, 6], [16115, 18871, 7], [18871, 20874, 8], [20874, 23913, 9], [23913, 26926, 10], [26926, 29363, 11], [29363, 31852, 12], [31852, 35202, 13], [35202, 37223, 14], [37223, 39730, 15], [39730, 42592, 16], [42592, 43054, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 43054, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
a28c56c1c0a11ad38cfff638190c1ae66f1159d7
|
XML-based Available-to-Promise Logic for Small and Medium Sized Enterprises
Joerg-Michael Friedrich
Information Systems Research Network
University of Erlangen-Nuremberg
Äußerer Laufer Platz 13-15, 90403 Nuremberg, Germany
friedrich@forwin.de
Jochen Speyerer
Information Systems Research Network
University of Erlangen-Nuremberg
Äußerer Laufer Platz 13-15, 90403 Nuremberg, Germany
speyerer@forwin.de
Abstract
At the Information Systems Research Network (FORWIN), Nuremberg, Germany we have prototypically implemented a lean and flexible available-to-promise application which is integrated with a framework of software components fulfilling the functions of supply chain management (SCM). This project is to demonstrate that it is possible to implement cost-effective and flexible software tailored to the needs of small businesses which can provide reliable information about product availability. To suit a large variety of companies, the way in which the component influences decisions or automates processes can be adjusted through different parameters, such as timeout, substitution, automatic processing or prioritization of suppliers.
In order to integrate all sorts of existing MRP or legacy systems along the supply chain the information flow is organized through a transaction-based exchange of standardized XML documents via the Internet.
1 Introduction
In order to provide customers with reliable information about product availability it is necessary to combine sophisticated calculation methods with fast and accurate information sharing along the whole supply chain (SC). One of the most essential features of supply chain management (SCM) is an available-to-promise (ATP) check [1]. The vendors of SCM standard software packages like i2 Technologies, Manugistics, or SAP have included ATP functionality in their products. Unfortunately, these applications are not really suitable for smaller businesses since they are very complex and expensive [2]. Due to their very restricted financial resources, small and medium sized enterprises (SME) have specific demands on business information systems. They need cost-effective and scalable solutions which fit their special needs. Rather than having complex software for advanced planning and scheduling, SME require simple tools to improve information flow within the SC and collaboration with their partners.
At the Information Systems Research Network (FORWIN), Nuremberg, Germany we are currently developing a software framework for SCM which is tailored to the needs of SME. In order to keep developing cost low while being adaptive to individual aspects of the user’s company and industry the framework is based upon the componentware [3] approach. The basic idea of componentware is to mask individual functions in components that can work independently, or as part of a larger application. Like a construction set the different functions of SCM are being encapsulated in such modules. They can then be arranged and combined flexibly in order to implement only the functionality the user really needs [4]. In prior projects a prototype of a manufacturing resource planning (MRP II) system called CW-PPS (Componentware – Production Planning and Scheduling, see [5] and [6]) using the componentware approach was developed and implemented for several SME in different industries [7]. The success of this concept led us to the idea to extend the focus from MRP II to SCM, and to work on a prototype of CW-SCM (Componentware – Supply Chain Management). In autumn 2000, we published a report explaining the general architecture and defining first core components of CW-SCM [8]. Single prototypes of these modules are being developed one of which is the ATP component.
In the following we discuss the concept of ATP. Then, the software framework of CW-SCM and the information exchange between participating companies are described. Subsequently, the prototype of the ATP software is explained and an exemplary usage scenario given. Before terminating with an outlook for perspective developments of CW-SCM, the technologies used for implementation are discussed.
2 Available-to-Promise
A possible and often referred to definition of ATP can be found in the APICS dictionary. It defines ATP as “… the uncommitted portion of a company’s inventory and planned production, maintained in the master schedule to support customer order promising. The ATP quantity is the uncommitted inventory balance in the first period and is normally calculated for each period in which a master production schedule (MPS) receipt is scheduled. In the first period, ATP includes on-hand inventory less customer orders that are due and overdue” [9]. This rather narrow definition reduces ATP to a calculable quantity. Nevertheless, the aim of ATP is to determine if an incoming order can be promised for a specific delivery date. Since the volume of units available depends on the method of calculation used, the three basic ways of finding the right ATP are described in the following (see [10]).
2.1.1 Discrete ATP
The main step in calculating the discrete ATP quantity is to subtract promised deliveries for the period the ATP is calculated for, and for all following periods for which no master production quantity has been scheduled from the MPS. The MPS, thereby, specifies the items the company anticipates manufacturing each period. In addition to this step, two exceptions have to be considered. In the first period calculated, a possible beginning inventory has to be added to the ATP. The other exception occurs if no MPS is planned for a period. In this case the ATP is always zero. By computing the discrete ATP no forecast data is taken into account.
2.1.2 Cumulative ATP without lookahead
The cumulative ATP without lookahead is calculated as the sum of the ATP quantity of the preceding period and the MPS, minus the promised deliveries for the period under consideration. The difference between this method and the discrete ATP method is that the ATP can include quantities already included in the ATP of other periods.
2.1.3 Cumulative ATP with lookahead
The cumulative ATP with lookahead overcomes the drawback of the cumulative ATP without lookahead where items produced in one period, but promised for delivery in a future period are excluded from the ATP quantities of other periods. Therefore, the algorithm to compute the cumulative ATP with lookahead can be described as follows [10]:
\[
\text{ATP}_j = \text{ATP}_{j-1} + \text{MPS}_j - B_j - \sum (B_j - \text{MPS}_j) \\
\text{until } \sum \text{MPS}_j > \sum B_j, \text{ where } j > 1
\]
In this equation Bi,j describe promised, but yet to be shipped orders from customers. Table 1 illustrates the three discussed methods for calculating the ATP.
<table>
<thead>
<tr>
<th>Period</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
</tr>
</thead>
<tbody>
<tr>
<td>Beginning Inventory</td>
<td>10</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>MPS</td>
<td>169</td>
<td>169</td>
<td>22</td>
<td>0</td>
<td></td>
</tr>
<tr>
<td>B</td>
<td>110</td>
<td>80</td>
<td>5</td>
<td>15</td>
<td></td>
</tr>
<tr>
<td>Discrete ATP</td>
<td>10</td>
<td>69</td>
<td>89</td>
<td>2</td>
<td>0</td>
</tr>
<tr>
<td>Cumulative ATP without looka-</td>
<td>10</td>
<td>69</td>
<td>158</td>
<td>175</td>
<td>160</td>
</tr>
<tr>
<td>Cumulative ATP with looka-</td>
<td>10</td>
<td>69</td>
<td>158</td>
<td>160</td>
<td>160</td>
</tr>
</tbody>
</table>
Table 1. Calculation methods for ATP [10]
In the course of the development of SCM practices throughout the last years it seems reasonable to broaden the definition of ATP from being a strictly numerical value. In our comprehension ATP is not limited to the computation of available quantities. It also includes the ability to provide information about possible delivery dates at any time, as well as the fast communication of possible delays during production or shipment. Therefore, an event driven technology is required, which considers all partners along the SC with the ability to provide information about delivery dates, bottlenecks, or shipments virtually in real time. In this respect Buxmann and Koenig describe ATP as a “… functionality for multi-level checks of the resource and product availability, which permits an integrated, up-to-date availability test along a SC. The ATP can be used to determine a delivery date or the effects of a desired date while making use of company internal and external areas and at the same time taking account of costs, such as additional transport costs” [11].
3 CW-SCM a framework for SCM
3.1 Preliminary considerations
In designing a suitable framework to enable information sharing throughout the SC, several different aspects must be considered. First of all, we decided that the framework should follow a distributed approach in contrast to the commonly used central approach of standard software for SCM [12]. The central and the distributed approaches represent two different ways in which information among partners of a SC can be shared and calculations can be done. The difference between those two lies in the location of data storage and how calculations take place. The basic concept of the central approach is to extract all relevant data from the enterprise resource plan-
ning (ERP) systems of connected companies. This data is then stored in a central database, or even kept in memory, and the SCM software does all the calculations. SAP’s and i2’s SCM products are examples that utilize this approach. While this concept allows very fast calculations, especially through the use of technologies like SAP’s ‘liveCache’ [13], and of sophisticated planning methods, it is not really suitable for SME. The central approach does not only imply the use of powerful and therefore expensive hardware and software, but it also requires the participating companies to set up and maintain a common database. In the distributed approach, however, every company uses its own ERP system to perform calculations and planning tasks. A server coordinates the communication between the partners of the SC and acts as a central hub. Figure 1 illustrates these two basic concepts.

Naturally, the results of the distributed approach can not equal the ones of the centralized approach. In a way they are even contradictory to the goal of SCM of finding a global optimum for all partners since the participating companies still aim for their local optima. Yet, the companies will have higher profits than before because the information their local systems calculate with is more up-to-date and precise. The use of the distributed approach also has several advantages over the central approach. First, it is not necessary to buy and maintain the respective hardware and software for a complex SCM system or to set up a common database for the SC. It is also favorable that each company uses its own ERP system to perform calculations because they then only have to share the results of calculations and therefore do not need to disclose sensitive, internal information. Companies can thus control the amount of data they divulge. For these reasons, the CW-SCM framework relies on a distributed architecture.
The time horizon in which information is exchanged between different applications has also a major impact on the architecture. Three different concepts can be distinguished: real-time, fixed interval updating, and transaction-based. Real-time systems always perform calculations using the most recent data. This is without doubt the preferable solution, as it provides the fastest and most accurate results. Unfortunately, it is not feasible for SME because of the expensive hardware and software that is required. Another approach would be to exchange information only at fixed intervals, e.g. every two hours or once a day. While a daily exchange of information might be sufficient for certain applications, it is not suitable for availability checks. Neither the required accuracy of data, nor the goal of providing fast answers can be accomplished with this concept. The best compromise in respect of costs and speed can be seen in the transaction-based approach. Every time an ATP check needs to be performed, the necessary information is exchanged and the calculation takes place. Therefore our framework is transaction-based.
The integration of different information systems such as MRP II, ERP or legacy systems along the SC is the third key issue in forming a suitable framework for SME. Granted that individual enterprises often apply diverse information systems it is essential to create an environment which is open to any of them. Therefore CW-SCM shares information between the business partners through messages. These rely on the extensible Markup Language (XML) [14]. XML is platform-independent and offers the opportunity to exchange data in well-structured documents which are readable for any recipient regardless of the software he uses (see also section 5.2).
The architecture of CW-SCM is based on a client/server concept. CW-SCM consists of a server and several clients installed with each participant. The clients operate as the system’s frontend. They send the necessary information to the server. The services provided by the CW-SCM server include authentication, sending, and receiving of information, storing, and routing of documents. For a reliable connection between the clients and the server, which is independent of the location, the transmission control protocol / Internet protocol (TCP/IP) is used. The CW-SCM client/server software provides the necessary framework for the components of CW-SCM. Its main purpose is to enable communication throughout the SC. The CW-SCM server can be seen as a central hub. Each registered partner of the SC connects through the CW-SCM client to the server. Each time a successful connection to the server is established, the client identifies itself with a username and password. Once a client is authenticated, the server software and the connected clients can start to send and receive information. To ensure a uniform and flexible format of the information, XML is used to structure the data. After the server receives an XML document, it stores it and routes it to one or several clients. The CW-SCM clients receive and store these documents and hand them to the appropriate component for further processing. The CW-SCM clients therefore act as an interface between the installed components of the CW-SCM.
system, e.g. the ATP component, and the server. Figure 2 illustrates this concept.
![Diagram of CW-SCM architecture]
**Figure 2. Overview of the CW-SCM architecture**
The database shown in Figure 2 is used in several ways. Beyond the purpose of authentication, it holds general data, like the name or address of each registered partner of the SC used for the routing process. The database also stores component specific data. For the ATP component, for example, these are tables that allow the correlation of products with vendors.
### 3.2 Information exchange via XML documents
#### 3.2.1 General parts of CW-SCM XML documents
To ensure the processing of XML documents from different components without failure, certain tags are required for all documents regardless of the component they originate from. This not only ensures the integrity of the documents, but also allows the routing of documents to their associated components and the storage according to their types.
```xml
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<DOCTYPE CW_SCM SYSTEM "http://localhost:8080/inquiry.dtd">
<CW_SCM>
<COMPONENT>ATP</COMPONENT>
<DOC_TYPE>inquiry</DOC_TYPE>
<SENDER>
<ID>123456</ID>
<NAMING>Company ABC</NAMING>
<CONTACT>Mr. Miller</CONTACT>
<DATE_SENT>2024-01-01</DATE_SENT>
</SENDER>
<RECIPIENT/>
...
</CW_SCM>
```
**Figure 3. General parts of CW-SCM documents**
Figure 3 shows an example of the general parts of an XML document for CW-SCM.
The XML and document type definition (DTD) declarations, as well as the root element are mandatory parts to ensure that the document is well formed and valid. The COMPONENT and DOC_TYPE elements are also required. While the COMPONENT element specifies the CW-SCM component that the document is associated with, the DOC_TYPE tag specifies the purpose of the document within this component. The elements between the SENDER tags identify the generating company and also hold information about the date and time the document was sent. The information about the recipient of the document plays a special role, since it may or may not be empty. If data about a certain recipient is provided, the server simply routes the document to the specified company. An empty element tag indicates that the document is meant for one or more addressees. The handling of this instance is described in the next section.
#### 3.2.2 Routing of CW-SCM XML documents
The CW-SCM server accomplishes the routing of the documents. Therefore, it is necessary that the server keeps track of the connected companies and the IP addresses associated with them. Since all SME are not necessarily assigned a static IP address, e.g. in the case of a dial-up connection, this cannot be achieved with a static table. In fact, the server dynamically stores the IP address of each company only for the time it is connected. As described in the previous section there are two possible scenarios. In the first case, in which information about a recipient is provided in the document, the server can look up the IP address of the company and send the document to it. The other scenario can, for example, occur during an inquiry for an ATP check. Consider the case in which several suppliers are producing the same product for a customer. Rather than sending the same document with different recipients from the CW-SCM client to the server, the server queries the database to find all the companies that sell the specified product and subsequently sends the document to them. This method helps to reduce the traffic and allows the flexible integration of other companies, as only a single database needs to be maintained.
#### 3.2.3 Storage of the XML documents
The documents sent and received are stored on both the client and the server side. The storage on the side of the clients is necessary for further processing, while the server stores the documents to enable a later delivery in case a company is not connected and for archival purposes. The possibility to specify a certain amount of time after which documents on the server get deleted is conceivable. The logic of the storage process for the clients and the server is similar. Each installed component has a designated folder.
under each of the three main directories: sent, received, and unprocessed. Furthermore, every component can maintain several sub-directories in its folders to reflect different document types. A received document is first stored in the unprocessed folder. After the task it is associated with is accomplished, the document is moved to the received folder. Therefore, the unprocessed folders are temporary storage spaces and should not contain files over a longer period of time. One difference between the directory structure of the clients and the server is that the server can generate and maintain this structure for every member of the SC. In addition, the server can generate an unconnected directory for each client and component to temporarily store received documents if a client is not connected at that time. Clients check these folders each time they connect to the server.
Figure 4 illustrates the directory structure generated by the CW-SCM server. The described directory structure and the unique filename for each document ensure the integrity of the data.
4 The ATP component of CW-SCM
As one of the first modules for CW-SCM, a prototype of an ATP component is being developed. Because of the chosen architecture of the underlying framework an optimization of the ATP check, in the sense of an improvement of the ATP result, heavily depends on the used ERP software. Nevertheless, the component helps the planner to simplify and speed up the decision-making process. Therefore, the developed ATP component could also be seen as a decision-support tool. To suit a wide variety of companies, the way in which the component influences decisions or automates processes can be adjusted by different parameters.
4.1 Main window
Figure 5 shows the main window of the ATP component. The table in the middle of the window contains all received, but not yet processed documents. To display these documents, the component checks its unprocessed folder each time it is started, or after the CW-SCM client informs it that a new document has been received. Depending on the type of the document, the user can display or answer it, as well as move it to a different folder, or even delete it. After the user has answered an unprocessed document, it is automatically moved to the appropriate directory in the received folder. Other menus give the user the possibility of displaying the sent and received documents instead of the unprocessed view, to configure the parameters of the ATP component, or to create and send a new document.
Figure 5. Main window of the ATP component
The functionality of the component depends on whether a company sends or receives a document and on the specific type of the document. The following sections describe and classify the different possible documents for the ATP component and their functions.
4.2 XML documents for ATP
At present, there are eight XML documents available to cover the different aspects and stages of an ATP check.
<table>
<thead>
<tr>
<th>Doc_Type</th>
<th>From</th>
<th>To</th>
<th>Doc_Type Answer</th>
</tr>
</thead>
<tbody>
<tr>
<td>Inquiry</td>
<td>Customer</td>
<td>Supplier(s)</td>
<td>Answer Inquiry</td>
</tr>
<tr>
<td>Answer Inquiry</td>
<td>Supplier(s)</td>
<td>Customer</td>
<td>New Inquiry, Order, -</td>
</tr>
<tr>
<td>Order</td>
<td>Customer</td>
<td>Supplier</td>
<td>Answ. Confirmation</td>
</tr>
<tr>
<td>Confirmation</td>
<td>Supplier</td>
<td>Customer</td>
<td>-</td>
</tr>
<tr>
<td>On Time</td>
<td>Customer</td>
<td>Supplier</td>
<td>Answer On Time</td>
</tr>
<tr>
<td>Answer On Time</td>
<td>Supplier</td>
<td>Customer</td>
<td>-</td>
</tr>
<tr>
<td>Shipping</td>
<td>Supplier</td>
<td>Customer</td>
<td>-</td>
</tr>
<tr>
<td>Delay</td>
<td>Supplier</td>
<td>Customer</td>
<td>-</td>
</tr>
</tbody>
</table>
Table 2. ATP documents within CW-SCM
The type of document thereby specifies the function of the sending and receiving company and also whether an answer might be expected or not. Table 2 gives an overview of the existing documents for the ATP component
and classifies them according to the relationship between the involved partners of the SC.
4.2.1 Inquiry
With the inquiry document, a potential buyer has the possibility to initiate an availability check and to request information about order terms and conditions. The customer specifies the desired product and quantity, as well as the price the company is willing to pay, the currency, and the desired delivery date. The customer can also omit information about the supplier. In this case the CW-SCM server looks up suppliers that sell the desired product and routes the inquiry document to them. Once a company has received the inquiry, it can perform an ATP check accordingly and generate an answer inquiry document.
4.2.2 Answer inquiry
The answer inquiry document contains information about the results of an ATP check. It specifies the conditions under which a supplier is willing to accept an order. The provided information can differ from the data of the inquiry document. The supplier is able to offer a substitute, change the available quantity, specify another possible delivery date, or change the requested price. Furthermore, the document contains the shipping costs and the total offer. To avoid misunderstandings among the partners of the SC, the involved companies have to agree upon the terms of delivery before they use the CW-SCM system, for example upon the transfer of perils. This is often accomplished by the use of Incoterms [15], such as free on board, or delivered ex ship, even if the shipment is not international.
4.2.3 Order
This document is used to place a new order. It is assumed that all conditions, like delivery date, available quantity, or costs were already negotiated between the involved companies. An order document contains the product number and description, the costs for the product and shipment, the currency, and the desired delivery date.
4.2.4 Confirmation
The confirmation document is used to verify and confirm the information of an arrived order. The document repeats the details of the order and must contain an order number that the supplier assigns. This not only makes it possible for the customer to ensure that the order was transmitted and received correctly, but also enables both companies to use the order number for further reference.
4.2.5 On time
The on time document provides customers with the capability of requesting a status report about an order already placed. Thereby, enabling the customer to initiate a check rather than waiting for a delay notice from the supplier. The only necessary information is the order number, but the customer can also provide the ordered quantity, and the desired delivery date.
4.2.6 Answer on time
This document represents the answer to an on time document and gives information about the status of an order. The document contains the order number and the details of the order, such as the original quantity and delivery date. Furthermore, it states the estimated delivery date and the estimated quantity for that date. In the event of a partial delivery, the document contains the estimated delivery dates and quantities for every planned shipment. While the order number would be sufficient to provide the answer, the repetition of the order details enable the ATP component to simply compare the data in the document rather than searching and loading the associated on time document.
4.2.7 Shipping
The XML document with the DOC_TYPE shipping can be used to inform a customer that its order has been shipped. It provides data about the quantity ordered and shipped. It also contains the desired delivery date and the actual shipping date, as well as a tracking number.
4.2.8 Delay
The delay document informs a customer about an unexpected delay during the processing of the order, i.e. a backlog. The document contains information about the initial number of units the customer wanted. It can also hold information about the data and quantity of a possible partial delivery. Furthermore, it contains a new estimated delivery date for the rest of the order.
4.3 Possible parameters for the ATP component
Besides the already discussed documents, covering the different aspects and stages of an ATP check, the planner can specify some parameters to adjust the software according to the needs and policy of each company. The ATP component thereby offers the possibility to set parameters either globally or locally. The global parameters can be specified from the main window of the component in the configuration menu (see (4) in Figure 5). Normally, these parameters are valid for every transaction within the ATP component, but the software also offers the possibility of adjusting the parameters for each document. These local parameters overwrite the global settings only for a single transaction. This gives the user the opportunity to adjust the parameters to fit most transactions, while still allowing the company to adapt to exceptions.
4.3.1 Timeout
The timeout parameter influences the time needed to find a result of the ATP check. The basic idea of this parameter is that the user can specify a certain time for transactions.
The functionality thereby depends on the type of document. For inquiry documents, the timeout specifies the time range in which the sender accepts answers. After a company has sent a new inquiry document to the server, the ATP component accepts answers until the specified timeout is reached. Answers received after the deadline will not be taken into account. Suppliers use the timeout parameter for the answer inquiry documents. It gives companies the possibility to limit the duration of reservations. After the specified timeout is reached, the company can free products and resources reserved for a potential order, so that they are available for other ATP checks.
4.3.2 Substitution
The CW-SCM ATP component offers two different parameters regarding a possible product substitution. First, the company can specify whether the system should accept answers to inquiry documents in situations where a supplier offers a substitute. The second set of parameters defines the way in which the software reacts if the suppliers cannot satisfy an initial inquiry.
The user can specify possible substitutes for each product. The ATP component then automatically generates and sends new inquiry documents for all possible substitutes. The software thereby automatically disables the processing of A, B, or C parts (see next section), since it is possible that the suppliers offer several different substitutes for a potential order. For specific products, where a substitution is not possible, the software can start a new inquiry process to find out a possible delivery date.
4.3.3 Processing of A, B, and C parts
To simplify the order process, the ATP component offers functions to classify the different products according to their value. While category A reflects the most expensive parts or products, category C represents the least expensive items, e.g. screws or small plastic parts. Category B contains products with prices between the two other categories [16]. The user has the possibility of assigning each product to one of these categories, and to define processing rules for them. If the parameter for a category is set to automated processing, the ATP component generates and sends orders for products of this category automatically, provided the price, date, and volume of the offer match the initial inquiry. Where price and date match and the desired quantity can be reached by combining several offers, the software also generates an order automatically. Otherwise the ATP component generates and displays a suggestion and waits for a decision by the user. When a new document is created, the software shows the defined parameters to enable the user to change the global values for specific transactions.
4.3.4 Prioritization of suppliers
Under certain circumstances it may be desirable for a company to express a preference of one supplier over another, even if it appears to be the more expensive solution. This may be the case if a customer has agreed to buy a certain quantity of products from a supplier over the year. The user can therefore specify a chain of suppliers for each product and defines a threshold for each of them. It is a percentage value and indicates how much higher the price per unit for a product compared to other offers may be, until the prioritization for a certain transaction is dismissed. The user can also temporarily disable the prioritization when creating a new inquiry document.
4.4 Example
To clarify the functionality of the CW-SCM ATP component and the influence of the described parameters, an example is illustrated in Figure 6. The documents shown here only contain a small portion of the information that could be found in the actual XML documents. In this scenario customer A creates a new inquiry (1) for 400 units of product 48F5. The company needs the supply on or before 2001-06-04 and is willing to pay 24.00 USD per unit. As mentioned earlier, the company can set the different parameters for this specific document only. Otherwise, the software assumes the globally defined values for the parameters.
The document created is then send to the CW-SCM server (2) and the timeout starts. Since the inquiry document does not contain data about a certain receiver, the server looks up the possible suppliers of the desired product in its database (3). After gathering the necessary information the server sends copies of the inquiry document to the three possible suppliers B, C, and D (4). At the supplier’s side, the CW-SCM clients receive the documents and hand them to the ATP components, which display the inquiry document in the unprocessed table view. The suppliers then feed the provided information into their local ERP systems to perform an ATP check (5). At this stage of development of the ATP prototype, the process of putting the data from the inquiry document into the local planning system and the triggering of the ATP check are performed personally.
After the local ERP systems returned their results, the planners generate the answer inquiry documents (6). In this example, supplier B is able to deliver 300 units of the desired product for the specified price. Supplier D can beat the price and meet the date customer A asked for, but the company is only able to deliver 280 units. As shown in
Figure 10, supplier C does not provide an answer. This can have several reasons. Maybe the supplier is not interested in the potential order. It is also possible that supplier C is just not able to deliver the desired product before the given date or that the supplier is not connected to the CW-SCM system at the time. In the next step the suppliers send their answer inquiry documents back to the server (7). It forwards them to the CW-SCM client of customer A (8). After the timeout is reached, the ATP component checks whether and how many answers to the initial inquiry were received. In our case, the ATP component finds the answers of supplier B and D. In a first step the ATP component now checks whether one of the received answers meets all criteria. As this is not the case, the software tries to combine the answers from the suppliers in the next step to match the initial inquiry. Under the assumption that customer A has not specified any prioritizations, the ATP component would suggest an order of 280 units from company D and the remaining 120 units from company B, since this is the least expensive solution. In the final step, the component generates new order documents for suppliers B (9) and D (10). If the component is configured for automated processing, the software sends the documents to the server. Otherwise, the ATP component waits for the decision of the user.
4.5 Further extensions
4.5.1 Automated execution
The transfer of data from the ATP component into the local planning systems of the SC partners and vice versa is, at the moment, done personally. Not only is this a possible source of error, but it is also a drawback in regard to the time it takes to perform an availability check. The implementation of interfaces for the ATP module and different MRP systems has therefore a high priority in the next stage of development. These adapters should offer the functionality to automate the process of transferring the necessary data between the ATP component and the planning systems and to trigger the appropriate functions, to obviate human intervention.
4.5.2 Connection to external marketplaces
In order to stay abreast of changes in the development of virtual marketplaces and private exchanges, another possible extension to the CW-SCM system would be the possibility to connect the software to these trading platforms. In doing so, the companies of the SC, which use the CW-SCM application can appear as a virtual enterprise (see e.g. [17]). This gives them the opportunity to present their products to a broader audience. Therefore, the implementation of interfaces, which extract and convert the data and coordinate the information flows between CW-SCM and external marketplaces would be necessary. Existing tools can simplify the mapping of required information between the CW-SCM format and the format of other systems. SAP offers for example the so-called SAP Business Connector. This software provides functionality for automatic conversion of data from an XML based format into SAP format and vice versa [18].
4.5.3 Security
Another important aspect of possible extensions for the prototype is the security of the software and the data. Since the partners in the SC transmit critical information via CW-SCM, it is important that this data is secure, not only from a possible loss, but also from the eyes of others, especially from competitors. At present the application uses unsecured Internet connections to send and receive the various documents. To overcome this vulnerability, the software should utilize secure and encrypted communication channels. This can be realized by implementing the Java secure socket extension (JSSE) package. It includes classes and methods to implement Java versions of the secure sockets layer (SSL) and transport layer security (TLS) protocols, as well as for data encryption and client authentication [19].

4.6 System requirements
Since one goal of the CW-SCM project is to develop a prototype that fits into the heterogeneous infrastructure and the limited IT resources of SME, there are no special hardware requirements for the system. The computer running the CW-SCM server needs a permanent connection to the Internet. As already mentioned, the server offers the functionality of temporarily storage for documents of clients that are not connected for a later delivery. For fast response, a constant Internet connection is however recommended for all clients, but it is not necessary for the system to work properly.
5 Technologies used
5.1 Java
For the implementation of the prototype, the Java programming language was used. The most important advantage Java offers over opponents, like C++, is its platform independence. The necessary software to run the CW-SCM application includes the Java 2 runtime environment (JRE), Standard Edition, in version 1.3.0, the Java API for XML processing (JAXP) package in version 1.0.1, and the Xalan XSLT processor version 1.2. All these programs are available for most operating systems and can be freely downloaded from the Internet.
5.2 XML
Instead of sending information between the partners of the SC in plain text format, the software utilizes the XML standard. The structuring of information with the help of tags may seem excessive, since the documents contain a redundant overhead of data, but the use of XML offers several advantages, which legitimize this procedure. First of all, it is simple for the software to control if a document contains all necessary data. Not only is the application able to check whether a document is well formed, by using a DTD, the software can also test the validity of a document. Another advantage of XML can be seen in the fact that data is separated from format instructions. With a cascading style sheet (CSS) or extensible style sheet language transformations (XSLT), a document can be transformed into and represented in a wide variety of formats, e.g. hypertext markup language (HTML) or portable document format (PDF), without the need of recoding. Finally, XML adopted Unicode, a character encoding standard that supports most languages, by providing a unique number for every character [20]. This, and the fact that XML is a very strict standard, ensures a platform and software independent way to communicate, no matter the language.
5.3 Databases
Microsoft Access 2000 is used to store information about the companies of the SC and their products, on both the client and the server side. The software developed executes common structured query language (SQL) statements to communicate with the databases. The connections are thereby established using the Java database connectivity - open database connectivity (JDBC-ODBC) bridge, a driver, which translates Java calls into ODBC conform calls [21]. This ensures platform and software independence, since the users can install any ODBC compliant database, without the need to change the code and recompile the software.
6 Conclusion
In this article we have shown that it is possible to design lean and flexible software solutions tailored to the special needs of SME. This can be achieved by implementing the componentware approach. The ATP component, one of the first modules of CW-SCM, has been developed to make availability checks for SME possible and to simplify and shorten the process of inquiring and ordering. The goal of this component is to present a cost-
effective alternative with limited functionality to highly sophisticated and expensive existing solutions. Yet the above mentioned extensions especially the integration of CW-SCM with different ERP systems must soon be approached in order to obtain a competitive ATP logic.
In order to prove the practical relevance of our approach we are negotiating with several pilot enterprises willing to use CW-SCM in day-to-day business. Only by implementing CW-SCM in different industries we can find out if it provides satisfying results and is able to compete with the products of other software vendors.
Regarding the progress of developing more components for the CW-SCM framework, we are working on further prototypes simultaneously. Next to ATP a module for vendor-managed inventory (VMI) (see [22]) has also been developed which allows the users of CW-SCM to intensify their relationships with customers and suppliers. Other components for monitoring and controlling, e-procurement and the configuration of an SC are currently being promoted.
7 References
|
{"Source-Url": "https://www.computer.org/web/csdl/index/-/csdl/proceedings/hicss/2002/1435/07/14350167b.pdf", "len_cl100k_base": 8504, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 32139, "total-output-tokens": 9987, "length": "2e13", "weborganizer": {"__label__adult": 0.0006775856018066406, "__label__art_design": 0.0005440711975097656, "__label__crime_law": 0.0009436607360839844, "__label__education_jobs": 0.0022125244140625, "__label__entertainment": 0.00012433528900146484, "__label__fashion_beauty": 0.00032830238342285156, "__label__finance_business": 0.01154327392578125, "__label__food_dining": 0.0007715225219726562, "__label__games": 0.0012807846069335938, "__label__hardware": 0.0020046234130859375, "__label__health": 0.0005779266357421875, "__label__history": 0.0004854202270507813, "__label__home_hobbies": 0.00018203258514404297, "__label__industrial": 0.00782012939453125, "__label__literature": 0.0003325939178466797, "__label__politics": 0.0004868507385253906, "__label__religion": 0.0005564689636230469, "__label__science_tech": 0.08984375, "__label__social_life": 0.00011754035949707033, "__label__software": 0.10748291015625, "__label__software_dev": 0.763671875, "__label__sports_fitness": 0.00044655799865722656, "__label__transportation": 0.006866455078125, "__label__travel": 0.0005559921264648438}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 45046, 0.05063]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 45046, 0.29517]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 45046, 0.8899]], "google_gemma-3-12b-it_contains_pii": [[0, 4147, false], [4147, 8957, null], [8957, 14214, null], [14214, 18450, null], [18450, 22274, null], [22274, 27236, null], [27236, 32715, null], [32715, 36658, null], [36658, 40176, null], [40176, 45046, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4147, true], [4147, 8957, null], [8957, 14214, null], [14214, 18450, null], [18450, 22274, null], [22274, 27236, null], [27236, 32715, null], [32715, 36658, null], [36658, 40176, null], [40176, 45046, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 45046, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 45046, null]], "pdf_page_numbers": [[0, 4147, 1], [4147, 8957, 2], [8957, 14214, 3], [14214, 18450, 4], [18450, 22274, 5], [22274, 27236, 6], [27236, 32715, 7], [32715, 36658, 8], [36658, 40176, 9], [40176, 45046, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 45046, 0.09945]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
f9a3a6ca713c13cd234c3974042364497762322e
|
[REMOVED]
|
{"Source-Url": "https://bilder.buecher.de/zusatz/25/25326/25326428_lese_1.pdf", "len_cl100k_base": 8452, "olmocr-version": "0.1.50", "pdf-total-pages": 18, "total-fallback-pages": 0, "total-input-tokens": 34725, "total-output-tokens": 9746, "length": "2e13", "weborganizer": {"__label__adult": 0.0003688335418701172, "__label__art_design": 0.0010242462158203125, "__label__crime_law": 0.0006575584411621094, "__label__education_jobs": 0.00377655029296875, "__label__entertainment": 0.0004019737243652344, "__label__fashion_beauty": 0.0002529621124267578, "__label__finance_business": 0.0008878707885742188, "__label__food_dining": 0.0003273487091064453, "__label__games": 0.0010423660278320312, "__label__hardware": 0.0011148452758789062, "__label__health": 0.0005841255187988281, "__label__history": 0.0007090568542480469, "__label__home_hobbies": 0.00014531612396240234, "__label__industrial": 0.0004069805145263672, "__label__literature": 0.0016355514526367188, "__label__politics": 0.0004892349243164062, "__label__religion": 0.0007081031799316406, "__label__science_tech": 0.2401123046875, "__label__social_life": 0.0003190040588378906, "__label__software": 0.219482421875, "__label__software_dev": 0.5244140625, "__label__sports_fitness": 0.0002503395080566406, "__label__transportation": 0.0005435943603515625, "__label__travel": 0.0002791881561279297}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 43727, 0.02562]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 43727, 0.35406]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 43727, 0.93709]], "google_gemma-3-12b-it_contains_pii": [[0, 1916, false], [1916, 4998, null], [4998, 7698, null], [7698, 10299, null], [10299, 12974, null], [12974, 15849, null], [15849, 18864, null], [18864, 18948, null], [18948, 21906, null], [21906, 24137, null], [24137, 27129, null], [27129, 30098, null], [30098, 31509, null], [31509, 32221, null], [32221, 34497, null], [34497, 37561, null], [37561, 40611, null], [40611, 43727, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1916, true], [1916, 4998, null], [4998, 7698, null], [7698, 10299, null], [10299, 12974, null], [12974, 15849, null], [15849, 18864, null], [18864, 18948, null], [18948, 21906, null], [21906, 24137, null], [24137, 27129, null], [27129, 30098, null], [30098, 31509, null], [31509, 32221, null], [32221, 34497, null], [34497, 37561, null], [37561, 40611, null], [40611, 43727, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 43727, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 43727, null]], "pdf_page_numbers": [[0, 1916, 1], [1916, 4998, 2], [4998, 7698, 3], [7698, 10299, 4], [10299, 12974, 5], [12974, 15849, 6], [15849, 18864, 7], [18864, 18948, 8], [18948, 21906, 9], [21906, 24137, 10], [24137, 27129, 11], [27129, 30098, 12], [30098, 31509, 13], [31509, 32221, 14], [32221, 34497, 15], [34497, 37561, 16], [37561, 40611, 17], [40611, 43727, 18]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 43727, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-03
|
2024-12-03
|
32c09e88ee00ab438bfb8cbeeb0cfcc2bb41626b
|
Observably Deterministic Concurrent Strategies and Intensional Full Abstraction for Parallel-or*
Simon Castellan¹, Pierre Clairambault¹, and Glynn Winskel²
1 Univ Lyon, CNRS, ENS de Lyon, UCB Lyon 1, LIP
first.last@ens-lyon.fr
2 Computer Laboratory, University of Cambridge
Glynn.Winskel@cl.cam.ac.uk
Abstract
Although Plotkin's parallel-or is inherently deterministic, it has a non-deterministic interpretation in games based on (prime) event structures – in which an event has a unique causal history – because they do not directly support disjunctive causality. General event structures can express disjunctive causality and have a more permissive notion of determinism, but do not support hiding. We show that (structures equivalent to) deterministic general event structures do support hiding, and construct a new category of games based on them with a deterministic interpretation of aPCF_{por}, an affine variant of PCF extended with parallel-or. We then exploit this deterministic interpretation to give a relaxed notion of determinism (observable determinism) on the plain event structures model. Putting this together with our previously introduced concurrent notions of well-bracketing and innocence, we obtain an intensionally fully abstract model of aPCF_{por}.
1998 ACM Subject Classification F.3.2 Semantics of Programming Languages
Keywords and phrases Game semantics, parallel-or, concurrent games, event structures, full abstraction
1 Introduction
Plotkin's parallel-or is, in a sense, the most well-understood of programming language primitives. Recall that it is a primitive por : B → B → B defined by the three equations:
\[
\text{por} t t \perp = t t \quad \text{por} \perp t t = t t \quad \text{por} \perp \perp = \perp
\]
As Plotkin famously proved [16], it is the primitive to be added to the paradigmatic purely functional higher-order programming language PCF in order to get a perfect match (full abstraction) with respect to Scott domains. Since Plotkin's result, full abstraction has been the gold standard for semanticists. Indeed, whereas an adequate model is always sound to reason about program equivalence; a fully abstract model is also complete: two programs are observationally equivalent if and only if they have the same denotation. Though since Plotkin's paper fully abstract models of various programming languages have been proposed (often through game semantics, with e.g. state [3, 1], control [5, 14], concurrency [11], ...); they are usually quite a bit more complicated than plain Scott domains. Parallel-or is well-understood, in that it has a simple input-output (extensional) behaviour, and its presence in PCF reduces observational equivalence to input-output behaviour.
* This work was partially supported by French LABEX MILYON (ANR-10-LABX-0070), and by the ERC Advanced Grant ECSYM.
But is it really so well-understood? While parallel-or is simple extensionally, how is it best understood intensionally? What is the operational behaviour of \( \text{por} \, M, N \)? Any programmer will sense the subtleties in implementing such a primitive. It immediately spawns two threads, starting evaluating \( M \) and \( N \) in parallel. If both terminate with \( \text{ff} \), it terminates with \( \text{ff} \) as well after both have finished. But it suffices that one of the threads returns \( \text{tt} \), for parallel-or to return \( \text{tt} \) immediately, discarding the other. If both return \( \text{tt} \) then one of the threads will be discarded, but which one depends on the scheduler. From an input-output perspective it does not matter which one is selected as they race to produce the same result, but the race nonetheless happens from an operational viewpoint; and a sufficiently intensional semantics will show it. Such behaviour is useful in practice, for instance to speed up deciding the existence of a “good” branch in a search tree by spawning a thread for each branch to explore.
From a game semantics perspective, understanding this combination of racy concurrency and deterministic extensional behaviour has been an unexpected challenge. In earlier work, we dealt with deterministic parallelism using games and strategies based on event structures [17], showing an intensional full abstraction result for a parallel implementation of \( \text{PCF} \) [8]. We also showed how our tools supported shared memory concurrency [6]. Given that, it is no surprise that the racy behaviour mentioned above can be represented with event structures, yielding an adequate model of e.g. \( \text{PCF} \) plus parallel-or. But game semantics based on event structures is\footnote{Functional Program Design, Springer-Verlag, 1990.} causal: each event comes with a unique causal history. Dependency is\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} conjunctive: an event can only occur after\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} all its dependencies. This feature is key for the notion of concurrent innocence leading to our definability result [8] and in fact for the very construction of our basic category of games [7]. This has the unfortunate consequence of forcing the strategy for parallel-or to be\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} non-deterministic: the race in the operational behaviour of \( \text{por} \, \text{tt} \) yields a choice between two events competing to return \( \text{tt} \), each depending on one argument of \( \text{por} \). But interpreting \( \text{por} \) through non-determinism really is a workaround, as appears through the resulting failure of full abstraction.
Constructing a deterministic intensional model for parallel-or has proved demanding. Plays-based models [11] fail as they inline the non-deterministic choice of the scheduler, and cannot express even pure parallelism deterministically. We saw above that plain event structure games do not work either. Giving a deterministic model for parallel-or involves modifying the latter to allow\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} disjunctive causality: a given event may have several distinct causal histories, and an event occurrence does not carry information on its specific causal history. Alternatives to event structures allowing this have existed for a long time:\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.}\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} general event structures [19]. However, we will see that they do not, in general, support\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} hiding – which is required to build a category of strategies. We will show that they do support it modulo some further conditions, making them adequate to model parallel-or deterministically.
Beyond the historical twist of giving a fully abstract games model for a language with parallel-or (as game semantics was originally driven by the full abstraction problem for \( \text{PCF} \) without parallel-or [13, 2]), a treatment of disjunctive causality is indispensable in a complete game semantics framework for concurrency. The following example illustrates how mundane and widespread it is: the causal history of a packet arriving from the network is simply its route. Clearly, introducing an event for each route is best avoided if possible – especially since all further events depending on it will be hereditarily duplicated as well. Deterministic models for disjunctive causality serve two purposes: they allow for\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} 1) a more general, less intensional notion of determinism, and\footnote{Logical Relations in Game Semantics, Cambridge University Press, 2008.} 2) a coarser equivalence relation between strategies that abstracts away benign races. These issues, that we address in this paper, have become recurrent obstacles in our research programme; and the historical importance of parallel-or in semantics made it the perfect candidate to test and showcase the solution.
Contributions. Concretely, we build an intensionally fully abstract games model of \( \text{aPCF}_{\text{por}} \), an affine version of PCF with parallel-or. Our tools are designed with the extension in the presence of symmetry [8] to full PCF\(_{\text{por}}\) in mind; but presenting them in an affine case allows us to focus on the issues pertaining to parallel-or and disjunctive causality, orthogonal to symmetry. Presenting everything at once in conference format is not reasonable.
Full abstraction for \( \text{aPCF}_{\text{por}} \) has two facets: on the one hand, we need to import the conditions of innocence and well-bracketing from [8], which rely on conjunctive causality. On the other hand, we need a disjunctive notion of determinism. Consequently our model will involve: (1) the standard category \( \text{CG} \) of concurrent games on event structures, and (2) the main novelty of our paper, a new category \( \text{Disj} \) supporting disjunctive determinism. Glueing the two together, we can import in \( \text{CG} \) the notion of determinism from \( \text{Disj} \), thereafter dubbed observational determinism, and prove intensional full abstraction.
Related work. Game semantics is a branch of denotational semantics. Originally driven by the full abstraction problem for PCF, it has grown in the past 25 years into a powerful and versatile methodology to construct compositionally intensional representations of program execution. This lead on the one hand to many full abstraction results (particularly striking in the presence of state as the fully abstract models are then effectively presentable [3, 1]), and on the other hand to applications, ranging from program analysis (model-checking or equivalence checking) to compilation and hardware synthesis [10].
Within game semantics, the present paper is part of a line of work on so-called “truly concurrent” game semantics first pushed by Melliès and colleagues [4, 15], and which advocates the use of causal structures such as event structures and asynchronous transition systems in interactive semantics of programming languages. This line of work has seen a lot of activity in the past five years, prompted in part by a new category of games and strategies on event structures generalizing all prior work, introduced by Rideau and the last author [17]; but also the presheaf-based framework for concurrent games by Hirschowitz and colleagues [12].
Finally, our account of disjunctive causality relies heavily on determinism for hiding to work. The third author and Marc de Visme have developed event structures with disjunctive causality (edc) [9], supporting both disjunctive causality and hiding in a non-deterministic setting. Links between these two approaches are being explored; at the very least, extra axioms would have to be imposed on edcs in order to mimic the work here.
Outline. In Section 2 we introduce \( \text{aPCF}_{\text{por}} \) and give its (non-deterministic) event structure games model \( \text{CG} \). In Section 3 we introduce the deterministic model \( \text{Disj} \) for parallel-or. Finally in Section 4 we glue the two, generalize the conditions of [8] and prove full abstraction.
(call-by-name) operational semantics, also standard, yield an evaluation relation $M \Downarrow v$ between closed terms and values (booleans or abstractions). We write $M \Downarrow$ when there is some $v$ such that $M \Downarrow v$, and $M \nRightarrow$ otherwise. Two terms $\Gamma \vdash M, N : A$ are observationally equivalent ($M \equiv_{obs} N$) iff for all contexts $C[\_]$ such that $C[M], C[N]$ are closed terms of type $\mathbb{B}, \mathbb{C}[M] \Downarrow \Leftrightarrow \mathbb{C}[N] \Downarrow$. Recall that an interpretation $[-]$ in some model $\mathcal{M}$ is fully abstract if for all $M, N, M \equiv_{obs} N$ iff $\llbracket M \rrbracket = \llbracket N \rrbracket$ – it is intensionally fully abstract if $\mathcal{M}$ quotiented by the semantic equivalent of $\equiv_{obs}$ is fully abstract.
In [16], Plotkin proved that Scott domains are fully abstract not for PCF, but for its extension with the so-called parallel-or operation:
\[
\begin{array}{ccc}
M \Downarrow \text{tt} & \quad & N \Downarrow \text{tt} \\
\text{por } M \text{ N } & \Downarrow & \text{por } M \text{ N } \Downarrow \\
M \Downarrow \text{ff} & \quad & N \Downarrow \text{ff} \\
\text{por } M \text{ N } & \Downarrow & \text{por } M \text{ N } \Downarrow
\end{array}
\]
The combinator por is not sequential: it is not the case that it evaluates one of its arguments first. In particular, por $\uparrow \text{tt} \Downarrow \text{tt}$ and por $\downarrow \text{tt} \Downarrow \text{tt}$.
### 2.2 Strategies as event structures
As usual in game semantics, computation is a dialogue between the program and its environment. The moves, or events, are either due to the program (Player, +) or its environment (Opponent, −). They are either variable calls (Questions, $Q$) or returns (Answers, $A$). Unlike traditional game semantics, in our line of work such dialogues are partially ordered.
Figure 1 presents two concurrent strategies, both playing on (the game for) $\mathbb{B}_1 \rightarrow \mathbb{B}_2 \rightarrow \mathbb{B}_3$, where subscripts are for disambiguation. The diagrams are read from top to bottom. The first event in both strategies is an Opponent question on $\mathbb{B}_3$, initiating the computation. Then Player (the program) starts evaluating in parallel its two arguments ($q_1$ and $q_2$). These may return $\uparrow \text{tt}$ or $\downarrow \text{ff}$, the wiggly line indicating that they cannot reply both. Depending on these, Player may eventually answer $q_3$. In both diagrams, $\downarrow \text{ff}$ requires both arguments to evaluate to $\downarrow \text{ff}$; however they differ as to the events that trigger an answer $\uparrow \text{tt}$. These diagrams will be made more formal later, but we invite the readers to examine them and convince themselves that the first diagram represents a parallel implementation of the left or (diverging if its first argument diverges), whereas the second diagram represents parallel or.
**Event structures.** Such diagrams are formalized as event structures. We use here event structures with binary conflict, whereas those of [17, 7] have a more general set of consistent sets. Binary conflict is sufficient for our purposes, and preserved by all operations we need.
**Definition 1.** A (prime) event structure (with binary conflict, es for short) is $(E, \leq_E, \#_E)$ where $E$ is a set of events, $\leq_E$ is a partial order on $E$ called causality and $\#_E$ is an irreflexive symmetric binary relation called conflict, such that:

Definition 2. An arena is a triple \((A, \text{pol}_A, \lambda_A)\) where \(A\) is an event structure, \(\text{pol}_A : A \to \{-, +\}\) and \(\lambda_A : A \to \{\text{Q}, A\}\) are labelings for polarity and Questions/Answers, such that:
- The order \(\leq_A\) is forest-shaped: for \(a_1, a_2 \leq a \in A\), either \(a_1 \leq a_2\) or \(a_2 \leq a_1\).
- The relation \(\vdash_A\) is alternating: if \(a_1 \vdash a_2\), then \(\text{pol}_A(a_1) \neq \text{pol}_A(a_2)\).
- If \(\lambda_A(a_2) = A\), then there is \(a_1 \vdash a_2\), and \(\lambda_A(a_1) = \text{Q}\).
- \(A\) is race-free: if \(a_1 \approx a_2\), then \(\text{pol}_A(a_1) = \text{pol}_A(a_2)\).
An arena \(A\) is negative if all its minimal events have negative polarity.
Conflict aside, our arenas resemble those of [13]: the justification relation traditionally denoted by \(\vdash_A\) is simply \(\vdash\). Basic arenas include the empty arena \(\emptyset\), and the arena \([B]\) displayed in Figure 2, often written \(B\) by abuse of notation, for booleans.
We mention here some constructions on arenas. The dual \(A^\perp\) of \(A\) is obtained by taking \(\text{pol}_{A^\perp} = -\text{pol}_A\), and leaving the rest unchanged. The simple parallel composition \(A \parallel B\) is obtained as having events the tagged disjoint union \(\{1\} \times A \cup \{2\} \times B\), and all components inherited. The product \(A & B\) of negative \(A\) and \(B\) is obtained as \(A \parallel B\), with all events of \(A\) in conflict with events of \(B\). As types will be denoted by negative arenas, we need a negative arena to interpret \(\vdash\). This is done by setting \(A\) to depend on (negative) minimal events of \(B\). If \(B\) has at most one minimal event, this is easy:
Definition 3. Let $A, B$ be negative arenas. Assume that $B$ is well-opened, i.e. $\min(B)$ has at most one event. If $B = 1$, then $A \rightarrow B = 1$. Otherwise, $\min(B) = \{b_0\}$. We define $A \rightarrow B$ as $A^\perp \parallel B$, with the additional causal dependency $(2, b_0) \leq (1, a)$ for all $a \in A$.
Defining $A \rightarrow B$ for well-opened $B$ is sufficient to interpret $\text{aPCF}_{\text{por}}$ types, but would be insufficient in the presence of a tensor type; with e.g. $B \rightarrow (B \otimes B)$. The complication here is due to a disjunctive causality at the level of types: the left $B$ is caused by either of the occurrences of $B$ on the right. As for parallel-or, the inability of event structures to express that can be worked around by introducing two conflicting copies of the left $B$, one for each causal justification – this is done in [6], and is reminiscent of the standard arena construction of [13]. This works well for some purposes, but the informed reader may see why this threatens definability for a concurrent language with tensor: indeed a counter-strategy can then behave differently depending on the cause of an occurrence of the left $B$.
We avoid the issue here, and only build $\rightarrow$ for $\text{aPCF}_{\text{por}}$ types. Interestingly the issue vanishes in Section 3: Disj supports disjunctive causality in both arenas and strategies.
Prestrategies on arenas. Strategies are certain event structures labeled by arenas. More formally, the labeling function is required to be a map of event structures:
Definition 4. A prestrategy on arena $A$ is a function on events $\sigma : S \rightarrow A$ s.t. $\sigma$ is a map of event structures: it preserves configurations ($\forall x \in \mathcal{C}(S), x \in \mathcal{C}(A)$) and is locally injective ($\forall s_1, s_2 \in x \in \mathcal{C}(S), \sigma s_1 = \sigma s_2 \implies s_1 = s_2$).
Event structures and their maps form a category $\mathcal{E}$. Figure 1 displays such prestrategies $\sigma : S \rightarrow A$ – the event structure drawn is $S$, and events are annotated by their image in $A$ through $\sigma$. Strategies, introduced in the next section, will be subject to further conditions.
2.3 An interpretation of $\text{aPCF}_{\text{por}}$ as strategies
Following the methodology of denotational semantics, giving an interpretation of $\text{aPCF}_{\text{por}}$ consists in constructing a category out of certain prestrategies, with enough structure to interpret $\text{aPCF}_{\text{por}}$. For lack of space we can only sketch part of the construction, a more detailed account of the construction, originally from [17], can be found in [7].
A prestrategy from $A$ to $B$ is a prestrategy $\sigma : S \rightarrow A^\perp \parallel B$. Given also $\tau : T \rightarrow B^\perp \parallel C$, we wish to compose them. As usual in game semantics, this involves (1) parallel interaction where the strategies freely communicate, and (2) hiding. We focus on (1) first.
Interaction. The interaction of $\sigma : S \rightarrow A^\perp \parallel B$ and $\tau : T \rightarrow B^\perp \parallel C$ is an event structure $T \oplus S$, labeled by $A \parallel B \parallel C$ via $\tau \oplus \sigma : T \oplus S \rightarrow A \parallel B \parallel C$. The omitted definition [7] of $T \oplus S$ follows the lines of the third author’s event structure semantics for parallel composition in CCS [20]. It is uniquely determined (up to iso) as a pullback [7] in $\mathcal{E}$.
Hiding. Composition should yield a prestrategy from $A$ to $C$. Accordingly events of $T \oplus S$ that map to $A$ or $C$ are available to the outside world, and called visible. On the other hand, those mapping to $B$ are private synchronization events, dubbed invisible; that we wish to hide. The proposition below formalizes that event structures are expressive enough for that.
Proposition 5. Event structures support hiding: for $E$ an event structure and $V$ any subset of events, then there exists an event structure $E \downarrow V$ having $V$ as events, and as configurations exactly those $e \cap V$ for $e \in E(E)$.
The components of $E \downarrow V$ (causality, conflict) are simply inherited from $E$.
For $\sigma$ and $\tau$ as above, we first form $T \circ S = T \circ S \downarrow V$ with $V$ the visible events. The composition $\tau \circ \sigma : T \circ S \rightarrow A^1 \parallel C$ is simply the restriction of $\tau \circ \sigma$; and is a prestrategy.
A category. Composition is associative up to isomorphism of prestrategies, where $\sigma_1 : S_1 \rightarrow A$ and $\sigma_2 : S_2 \rightarrow A$ are isomorphic (written $\sigma_1 \cong \sigma_2$) if there is an iso between $S_1$ and $S_2$ in $E$ making the obvious triangle commute. Finally, for any arena $A$ there is a copycat prestrategy $a_A : C_A \rightarrow A^+ \parallel A$, which has events and immediate conflict those of $A^+ \parallel A$, and with causality that of $A^+ \parallel A$ where additionally each positive event is set to depend on its negative counterpart on the other side. Details can be found in [17, 7].
Copycat is idempotent, but is not an identity in general. Rideau and Winskel [17] characterise the prestrategies for which it acts as an identity as the strategies:
Theorem 6. For $\sigma : S \rightarrow A^+ \parallel B$, $a_B \circ \sigma \circ a_A \cong \sigma$ iff $\sigma$ is a strategy: it is receptive (for $x \in E(S)$, if $x \dashv \lhd$ there is a unique $x''$ s.t. $\sigma s = a$) and courteous (if $s_1 \rightarrow s_2$ and $\text{pol}(s_1) = +$ or $\text{pol}(s_2) = -$, then $\sigma s_1 \rightarrow_{A^+ \parallel B} \sigma s_2$).
We mention (see [7]) that $CG$ is a compact closed category, i.e. fit to interpret the linear $\lambda$-calculus. As $\text{aPCF}_{\text{por}}$ is affine, we work in a derived category of negative strategies.
Negative arenas and strategies. Negative arenas were defined earlier. We also have:
Definition 7. A strategy $\sigma : S \rightarrow A^+ \parallel B$ is negative iff $\forall s \in \text{min}(S)$, $\text{pol}(s) = -$.
There is a subcategory $CG_-$ of $CG$ with negative arenas as objects, and negative strategies as morphisms. The negativity assumption on strategies ensures that $1$ is terminal (the only negative strategy on $A^+ \parallel 1$, for $A$ negative, is the empty strategy $e_A$), which allows us to interpret weakening. The tensor product is defined as $A \otimes B = A \parallel B$ on arenas, and as the obvious relabelling $\sigma_1 \otimes \sigma_2 : S_1 \parallel S_2 \rightarrow (A_1 \parallel A_2)^+ \parallel B_1 \parallel B_2$ on strategies, for $\sigma_1 : S_1 \rightarrow A^+_1 \parallel B_1$ and $\sigma_2 : S_2 \rightarrow A^+_2 \parallel B_2$. Besides, $CG_-$ has products: for $\sigma : S \rightarrow A^+ \parallel B$ and $\tau : A^+ \parallel C$ their pairing $\langle \sigma, \tau \rangle : S \times T \rightarrow A^+ \parallel (B \times C)$ is the obvious labeling, and we have projections $\exists_A : C_A \rightarrow (A \times B)^+ \parallel A$ and $\exists_B : C_B \rightarrow (A \times B)^+ \parallel B$. For well-opened $C$, the negative strategies on $A^+ \parallel B^+ \parallel C$ and $A^+ \parallel B \rightarrow C$ are exactly the same. Because $A \rightarrow B$ is only defined for well-opened $B$, $CG_-$ is not monoidal closed; but well-opened arenas form an exponential ideal:
Figure 3 if $B^+_1 \parallel (B_2 \times B_3)^+_1 \parallel B_4$.
Proposition 8. $CG_-$ is a symmetric monoidal category with products, with $1$ terminal. For $A$ and well-opened $B$, $A \rightarrow B$ is an exponential object; and is well-opened.
Following standard lines, we interpret $\text{aPCF}_{\text{por}}$ in $CG_-$: contexts $\Gamma = x_1 : A_1, \ldots, x_n : A_n$ are interpreted as $[\Gamma] = [A_1] \parallel \cdots \parallel [A_n]$. Types are interpreted as well-opened arenas, each type construction by its arena counterpart. Terms $\Gamma \vdash M : A$ are interpreted as negative
strategies $\llbracket M \rrbracket \in \text{CG}_{\bot}([\Gamma], [A])$. Divergence $\bot$ is interpreted as the strategy (also written $\bot$) with no positive moves, closed by receptivity. Constants $\mathsf{tt}, \mathsf{ff}$ are the obvious strategies on $\mathbb{B}$. For $\Gamma \vdash M : \mathbb{B}$, $\Delta \vdash N_1, N_2 : \mathbb{B}$, $[\mathsf{if}\ M\ \mathsf{then}\ N_1\ \mathsf{else}\ N_2] = \mathsf{if}\ \odot ([M] \odot ([N_1], [N_2]))$, where $\mathsf{if}$ is given in Figure 3. Finally, parallel-or is the strategy on the right of Figure 1.
The interpretation $[\cdot]$ is adequate: for all $\vdash M : \mathbb{B}$, $M \Downarrow$ if $\llbracket M \rrbracket \neq \bot$. However, we will be better equipped to prove that in Section 4. For now, the desired induction invariant for soundness $M \Downarrow v \Rightarrow \llbracket M \rrbracket = [v]$ fails, as the model keeps track of too much intensional information: $\llbracket \mathsf{par}\ \mathsf{tt}\ \mathsf{tt} \rrbracket$ has two conflicting $\mathsf{tt}^+$ events, and is therefore not isomorphic to $\llbracket \mathsf{tt} \rrbracket$.
### 2.4 Disjunctive causality and observational determinism
We have seen that $\text{CG}_{\bot}$ is an adequate model of aPCF$_{\mathsf{par}}$. However, it is not intensionally fully abstract: strategies distinguish more than terms of aPCF$_{\mathsf{par}}$. In fact, as in [6] one can interpret e.g. linearly used boolean references in $\text{CG}_{\bot}$; these can obviously distinguish more than the input-output behaviour of terms. We can remove these by requiring conditions of visibility, well-bracketing and innocence as in [8]; and we will do so in Section 4.2.
One condition of [8] is however inadequate: determinism. Indeed, the strategy of Figure 1 is non-deterministic, and no deterministic strategy can implement parallel-or (indeed, all we used two properties of event structures: that the corresponding category $\mathcal{E}$ has pullbacks...
(for interactions), and then that event structures support hiding. We will show very soon that the first criterion holds. However, we run into an issue for the second:
**Proposition 10.** Configuration families do not support hiding. There is a cf $A$ and $V \subseteq |A|$ such that the set $\{ x \cap V \mid x \in A \}$ is not a configuration family.
**Proof.** Set $|A| = \{a, b, c, d\}$, and configurations those specified by: $(d$ is caused either by $a$, or by $b$ and $c$ together). With $V = \{b, c, d\}$, the obtained hidden set fails completeness: it contains $\{b\}, \{d\}, \{b, c, d\}$ but not $\{b, d\}$. □
It is part for this reason that prime event structures are used when building categories of games and strategies – as indeed, hiding is crucial. Removing either coincidence-freeness or completeness loses the correspondence with general event structures [21], and leads to various pathologies further down the road – for instance we lose the key lemma:
**Lemma 11.** Let $A$ a set of configurations on $|A|$ satisfying completeness. Then, it is coincidence-free iff for any $x, y \in A$ s.t. $x \subseteq y$, there is a covering chain in $A$: $x \prec \ldots \prec y$.
## 3 Disjunctive deterministic games
In this section we introduce the main contribution of the paper, a category $\text{Disj}$ of disjunctive deterministic strategies supporting the interpretation of parallel-or. This relies on a notion of deterministic configuration families, and the observation that those do support hiding.
### 3.1 Deterministic configuration families
First we adjoin cfs with polarities, and define determinism (for Player).
**Definition 12.** A cf with polarities (cfp) (resp. partial polarities (cfpp)) is a cf $A$ with $\text{pol}_A : |A| \rightarrow \{-, +\}$ (resp. $\text{pol}_A : |A| \rightarrow \{-, 0, +\}$).
A cfpp $A$ is deterministic (dfcpp/defcpp) iff for all $x \subseteq_A y$ and $x \subseteq z$, we have $y \cup z \in A$ – we write $x \subseteq_A y$ (resp. $x \subseteq_A y, x \subseteq_A^+ y$) to mean that $x \subseteq y$ and $\text{pol}(y \setminus x) \subseteq \{0, +\}$ (resp. $\text{pol}_A(y \setminus x) \subseteq \{-\}, \text{pol}_A(y \setminus x) \subseteq \{+\}$).
This means that only Opponent makes irreversible choices regarding the evolution of the play. We show that dfcpps support hiding of neutral events. In the sequel, given a dcfpp $A$, $V$ always denotes the set of non-neutral events in $|A|$. We write $A_x$ for the candidate cf on events $V$ with configurations those of the form $x_i = x \cap V$ for $x \in A$. The following key lemma is proved by successive applications of determinism and completeness.
**Lemma 13.** For $x \in A_y$, $y \in A$ such that $x \subseteq y$, for any $x' \in A$ a witness for $x$ (i.e. $x'_i = x$), there is $y' \subseteq y'$ in $A$ such that $x' \subseteq y'$.
From Lemma 13 follows that for \(x, y \in A\), if \(x \uparrow y\) in \(A\), then \(x \uparrow y\) in \(A\). Using that observation together with Lemma 13 and determinism, it is straightforward to prove:
\[\text{Proposition 14.} \text{ For } A \text{ a dcfpp, } A\downarrow \text{ is a dcfp.}\]
### 3.2 Deterministic disjunctive strategies and composition
A pregame is simply a cfpp – for \(A, B\) cfps, \(A\downarrow\) has the same events and configurations as \(A\) but inverted polarity. We will also write \(B^0\) for the cfpp with the same configurations as \(B\) but polarity set as globally 0. The cf \(A \parallel B\) has events the tagged disjoint union, and configurations \(x_A \parallel x_B\) for \(x_A \in A, x_B \in B; A \& B\) has the same events as \(A \parallel B\), but only those configurations with one side empty.
Unlike in Definition 4, disjunctive prestrategies are simply substructures of the (pre)games.
\[\text{Definition 15.} \text{ A prestrategy on } A \text{ is a dcfp } \sigma \text{ on } |A|, \text{ such that } \sigma \subseteq A \text{ – written } \sigma : A.\]
\[\text{Example 16.} \text{ On events (with polarities) } \{q^-, t^+, f^+\}, \mathcal{E}(B) \text{ is a cfpp – that by abuse of notation we still write } B. \text{ Then, Figure 5 denotes a prestrategy on } B^1 \parallel B^1 \parallel B.\]
Though disjunctive prestrategies are not primarily defined as maps, it will be helpful in composing them that they can be. Given two cfps \(A\) and \(B\), a map from \(A\) to \(B\) is a function on events \(f : |A| \rightarrow |B|\) which preserves configurations and is locally injective. Configuration families and their maps forms a category \(\text{Fam}\). Note that if \(A\) is an event structure, \(\mathcal{E}(A)\) is a configuration family on \(|A|\). The definition of maps of cfps is compatible with that of maps of event structures, making \(\mathcal{E}(-) : \mathcal{E} \rightarrow \text{Fam}\) a full and faithful functor. Finally, a prestrategy \(\sigma : A\) on \(A\) can be regarded as an identity-on-events \(\text{Fam}\)-morphism \(\sigma \rightarrow A\).
Like \(\mathcal{E}\), \(\text{Fam}\) has pullbacks. The interaction of \(\sigma\) on \(A\downarrow \parallel B\) and \(\tau\) on \(B^1 \parallel C\) is the pullback of identity-on-events maps \(\sigma \parallel C \rightarrow A \parallel B \parallel C\) and \(A \parallel \tau \rightarrow A \parallel B \parallel C\).
\[\text{Proposition 17.} \text{ The pullback above is (up to iso) the cf } \tau \circ \sigma \text{ with events } |A| \parallel |B| \parallel |C| \text{ and configurations those } x \in (\sigma \parallel C) \cap (A \parallel \tau) \text{ that are secured: for distinct } p_1, p_2 \in x \text{ there is } x \supseteq y \in (\sigma \parallel C) \cap (A \parallel \tau) \text{ s.t. } p_1 \in y \Leftrightarrow p_2 \notin y.\]
Equivalently, \(x \in (\sigma \parallel C) \cap (A \parallel \tau)\) is secured iff it has a covering chain, i.e. a sequence:
\[\emptyset = x_0 \subseteq x_1 \subseteq \ldots \subseteq x_n = x \text{ where for all } 0 \leq i \leq n, x_i \in (\sigma \parallel C) \cap (A \parallel \tau).\]
We regard \(\tau \circ \sigma\) as a cfpp by setting as polarities those of \(A\downarrow \parallel B^0 \parallel C\). To use Proposition 14 and finish defining composition, \(\tau \circ \sigma\) needs to be deterministic; a sufficient condition for that is that \(\sigma\) and \(\tau\) are receptive. A prestrategy \(\sigma\) on \(A\) is receptive iff for all \(x \in \sigma\), if \(x \cup \{a^-\} \in A\) then \(x \cup \{a^-\} \in \sigma\).
\[\text{Proposition 18.} \text{ If } \sigma \text{ and } \tau \text{ are receptive prestrategies, then } \tau \circ \sigma \text{ is a deterministic cfpp. Then, } \tau \circ \sigma = (\tau \circ \sigma\downarrow)\text{ is a receptive prestrategy on } A\downarrow \parallel C.\]
Hence, composition is well-defined on receptive prestrategies – it is also associative. To get a category, we now prove the copycat strategy to be an identity.
### 3.3 Copycat and the compact closed category \(\text{Disj}\)
We cannot replicate in pregames the definition of copycat sketched in Section 2.3. However, as observed in [18, 7], if \(A\) is an arena, the configurations of \(\mathcal{C}A\) are those configurations of \(A\downarrow \parallel A\), necessarily of the form \(x_i \parallel x_r\), such that every positive event of \(x_r\) (w.r.t. \(A\)) is already in \(x_i\), and every positive event in \(x_i\) (w.r.t. \(A\downarrow\)) is already in \(x_r\). In other words, \(x_i \supseteq x_r \cap x_r \subseteq x_r\), written \(x_r \subseteq x_i\) in [18, 7] and referred to as the “Scott order”.
Accordingly, on a pregame \( A \), given \( x,y \in A \) we write \( x \sqsubseteq_A y \) iff \( x \cap y \in A \) and the relation above holds. The candidate prestrategy copycat \( a_A \) comprises all \( x \parallel y \in A^1 \parallel A \) s.t. \( y \sqsubseteq_A x \). Indeed \( a_A \) is a configuration family and is receptive; but prestrategies must be deterministic. It turns out that as in [22], \( a_A \) is only deterministic when \( A \) is race-free:
**Proposition 19.** Let \( A \) be a pregame. Then, \( a_A \) is a prestrategy iff \( A \) is race-free: for all \( x,y,z \in A \) such that \( x \sqsubseteq_A y \) and \( x \sqsubseteq_A z \), we have \( y \uparrow_A z \).
We aim to reproduce Theorem 6 in this new setting, and characterise the prestrategies left invariant under composition with copycat. However, there is a new subtlety here: for arbitrary \( A \), \( a_A \) might not even be idempotent!
**Example 20.** Consider the cfp on events \( A = \{ \ominus_1, \ominus_2, \ominus_3 \} \) given on the right. This is a race-free pregame, so by Proposition 19, \( a_A \circ a_A \) is a prestrategy on \( A^1 \parallel A \). However, it is distinct from \( a_A \). Indeed, the following configuration of \( A^1 \parallel A^0 \parallel A \) belongs to \( a_A \circ a_A \):
\[
\begin{array}{ccc}
\ominus_2 & \ominus_2 & \ominus_1 \\
\ominus_3 & \ominus_3 & \ominus_3 \\
\end{array}
\]
After hiding, there is a change in the causal history of \( \ominus_3 \), not authorized by \( a_A \) but authorised by \( A \): it is caused by \( \ominus_2 \) on the left, but \( \ominus_1 \) on the right. This issue comes from the fact that in \( A \), the same event can be caused either by a positive or a negative move. Such behaviour will be banned in games:
**Proposition 21.** If \( A \) is a race-free pregame, the following are equivalent: (1) \( a_A \) is idempotent, (2) \( \sqsubseteq_A \) is a partial order, (3) \( A \) is co-race-free: for all \( x,y,z \in A \) with \( x \supseteq^- y, x \supseteq^+ z \), we have \( y \cap z \in A \).
A game will be a race-free, co-race-free pregame. Copycat on any game is an idempotent prestrategy – strategies are those prestrategies that compose well with copycat. We prove:
**Theorem 22.** Let \( \sigma \) be a receptive prestrategy on \( A^1 \parallel B \). Then, \( a_B \circ \sigma \circ a_A = \sigma \) iff \( \sigma \) is courteous: for any \( x \prec x \cup \{ a_1^+ \} \prec x \cup \{ a_1^-, a_2^+ \} \) in \( \sigma \), if \( x \cup \{ a_2^+ \} \in A^1 \parallel B \), then \( x \cup \{ a_2^+ \} \in \sigma \) as well. In this case, \( \sigma \) is a strategy, written: \( \sigma : A^1 \parallel B \).
It follows that there is a category \( \text{Disj} \) with games as objects, and strategies \( \sigma : A^1 \parallel B \) as morphisms from \( A \) to \( B \). It is fairly easy to show that just as \( \text{CG} \), this category is compact closed. But unlike \( \text{CG} \), \( \text{Disj} \) supports a deterministic interpretation of parallel-or: indeed the set of configurations of \( B^1 \parallel B^1 \parallel B \) denoted by the diagram of Figure 5 is a strategy.
### 3.4 An SMCC and deterministic interpretation of \( \text{aPCF}_{\text{por}} \)
As for \( \text{CG} \), \( \text{Disj} \) lacks structure to interpret \( \text{aPCF}_{\text{por}} \): the family of bottom strategies \( e_A : A^1 \parallel 1 \) (where again 1 is the empty game) fails naturality. As before, we hence restrict \( \text{Disj} \) to a subcategory of negative games and strategies.
**Definition 23.** A cfp \( A \) is negative if any non-empty \( x \in A \) includes a negative event.
Copycat on negative \( A \) is negative and negative strategies are stable under composition; so there is a subcategory \( \text{Disj}^\_ \) of \( \text{Disj} \) with negative games as objects and negative strategies as morphisms, inheriting a symmetric monoidal structure from \( \text{Disj} \). Moreover 1 is terminal, and \( \text{Disj}^\_ \) has products given by \( A \& B \).
To prove the monoidal closure, as in \( \text{CG} \), we have to deal with the fact that for \( A \) and \( B \) negative, \( A^1 \parallel B \) is not necessarily negative, so we define:
Definition 24. Let $A$ and $B$ be two negative games. The game $A \rightarrow B$ has the same events (and polarity) as $A^\perp \parallel B$, but non-empty configurations those $x_A \parallel x_B \in A^\perp \parallel B$ such that $x_B$ is non-empty (and hence includes a negative event).
Recall from Definition 3 that the arrow arena $A \rightarrow B$ was only defined for $B$ well-opened (i.e. with at most one minimal event). This was to avoid constructing arenas for types like $B \rightarrow (B \otimes B)$ (invalid for $aPCF_{par}$, but valid in an extension with a tensor type), where the left hand side $B$ can be opened because of either of the right hand side occurrences of $B$. In Disj, when constructing $A \rightarrow B$ there is no well-openness condition on $B$: exploiting disjunctive causality, we can express that $A$ is opened only after some event of $B$ – it does not matter which one. The negative strategies in $A^\perp \parallel B^\perp \parallel C$ and $A^\perp \parallel B \rightarrow C$ are the same, from which (using the compact closed structure of Disj) it follows that Disj is monoidal closed, instead of only having an exponential ideal. Like $CG_-$, it supports an adequate interpretation of $aPCF_{par}$. Unlike $CG_-$, its strategies are deterministic.
4 Glueing $CG_-$ and Disj, and full abstraction
By moving from $CG_-$ to Disj, we gain determinism. However, Disj is not intentionally fully abstract – there are some strategies that distinguish syntactically undistinguishable terms of $aPCF_{par}$. For instance, one has a strategy on $(B \rightarrow B \rightarrow B) \rightarrow B$ that calls its argument, feeds it $tt$ as first argument, and $tt$ as second argument only if the first argument has been evaluated; it then copies the final result to toplevel. Doing so, it distinguishes the observationally equivalent $\lambda xy.x$ if $x$ then $(if y then tt else $\bot$) else $\bot$ and $\lambda xy.y$ if $x$ then $(if x then tt else $\bot$) else $\bot$.
Getting rid of such strategies is the responsibility of the concept of innocence in Hyland-Ong games [13]. But the $P$-views of innocent strategies carry precisely the causal information that we have lost when moving from $CG_-$ to Disj! This causal information was crucial in [8] to give concurrent versions of well-bracketing and innocence, so as to capture the behaviour of parallel PCF strategies. So, the deterministic account of $aPCF_{par}$ is not enough; the causal (where innocence and well-bracketing are defined) and deterministic models should be related, to establish in what sense the causal model is already “observably” deterministic.
4.1 A glued games model
For any arena $A$ (Definition 2), $\mathcal{C}(A)$ is a game – it is race-free and co-race-free. Moreover, arena constructions and game constructions match: for any $A, B$, $\mathcal{C}(A^\perp) = \mathcal{C}(A)^\perp$, $\mathcal{C}(A \parallel B) = \mathcal{C}(A) \parallel \mathcal{C}(B)$; for negative $A, B$ we have $\mathcal{C}(A \& B) = \mathcal{C}(A) \& \mathcal{C}(B)$, and if $B$ is well-opened, $\mathcal{C}(A \rightarrow B) = \mathcal{C}(A) \rightarrow \mathcal{C}(B)$. Therefore, we will just take the objects of our glued games model to be arenas. The strategies, though, will be strategies in both categories.
Definition 25. Let $A$ be an arena. A strategy $\sigma : S \rightarrow A$ is observationally deterministic (odet) if (1) the display of $\sigma$, $O(\sigma) = \{ \sigma x \mid x \in \mathcal{C}(S) \}$ is a disjunctive deterministic strategy on $\mathcal{C}(A)$ (in Disj). It follows then that $\sigma$ is also a $\text{Fam}$-morphism $\sigma : \mathcal{C}(S) \rightarrow O(\sigma)$. We also ask that it has (2) the configuration extension property: for any $x \in \mathcal{C}(S)$, if $\sigma x \subseteq y \in O(\sigma)$, then there is $x' \subseteq x' \in \mathcal{C}(S)$ such that $\sigma x' = y$.
The configuration extension property ensures that two causal realizations of the same displayed configuration have bisimilar futures. The display operation yields a coarser equivalence on strategies: odet strategies $\sigma, \tau : A$ are display-equivalent, written $\sigma \approx \tau$, if $O(\sigma) = O(\tau)$. This coarser equivalence is key to ensure soundness of our interpretation, as the interpretation in $CG_-$ of $\text{por} tt$ and $tt$ are not isomorphic, but only display-equivalent.
Theorem 26. There is a compact closed category $\text{Odet}$ with arenas as objects and odet strategies up to $\approx$ as morphisms; with a symmetric monoidal subcategory $\text{Odet}_-$ of negative arenas and negative strategies with products and $1$ terminal. It has well-opened arenas as an exponential ideal. Finally, $\text{Odet}_-$ supports an adequate interpretation of $\text{aPCF}_{\text{par}}$.
Proof. For odet $\sigma$ and $\tau$, $O(\tau \circ \sigma) = O(\tau) \circ O(\sigma)$: $\subseteq$ is direct, $\supseteq$ exploits the extension property for $\sigma$ and $\tau$. So, $O(\tau \circ \sigma)$ is a deterministic disjunctive strategy, and it has the extension property – hence $\tau \circ \sigma$ is odet. In general, $O$ links the structure of $\text{CG}$ to that of $\text{Disj}$, forming $\text{Odet}$. We sketch adequacy.
Soundness. For all $M \not\vdash v$, $[M] = [v]$ (by induction on the evaluation derivations).
Adequacy. We prove by induction on the size of $M$ that if $M \not\vdash$, then $[M]$ is bottom. The size is an adequate measure because as $\text{aPCF}_{\text{par}}$ is affine, substitution is non-copying and each induction step is on a strictly smaller term.
As a result, interpretations in $\text{CG}_-$ and $\text{Disj}$ are also adequate. We have accommodated the causal representation of programs permitted by $\text{CG}$ and the determinism of $\text{Disj}$. Now, it remains to import the causal conditions on strategies of [8], and prove full abstraction.
4.2 Conditions
We now recall innocence and well-bracketing, introduced in [8]. Formulated for deterministic strategies (in the sense of $\text{CG}_-$), those would not suffice to prove full abstraction for a genuinely non-deterministic language. For that, further conditions are needed to ensure the locality of conflicts – those will appear in the first author’s forthcoming PhD thesis. However, we will show in Lemma 31 that for $\text{aPCF}_{\text{par}}$, distinguishable strategies can be distinguished via deterministic (in the sense of $\text{CG}_-$) contexts, so these simpler conditions will suffice.
All our conditions rely crucially on the notion of grounded causal chain.
Definition 27. A grounded causal chain (gcc) in $S$ is a sequence $\rho = \rho_0 \rightarrow \ldots \rightarrow \rho_n$ where $\rho_0$ is minimal. The set of gccs of $S$ is written $\text{gcc}(S)$.
By courtesy, gccs of strategies on arenas are always alternating. They give a notion of thread of a concurrent strategy. Two gccs $\rho, \rho' \in \text{gcc}(S)$ are forking when $\rho \cup \rho'$ is consistent in $S$, and there is $k \in \mathbb{N}$ s.t. $\rho_i = \rho'_i$ for $i < k$ and $\{\rho_i\}_{i \geq k}$ and $\{\rho'_i\}_{i \geq k}$ are non-empty and disjoint. They are negatively forking when $\text{pol}(\rho_{k+1}) = \text{pol}(\rho'_{k+1}) = -$, positively forking otherwise. Two forking gccs $\rho, \rho'$ are joined at $s \in S$ when $\rho_\omega \rightarrow s$ and $\rho'_\omega \rightarrow s$ ($\rho_\omega$ refers to the last event of $\rho$) as shown in this picture:
\[
\begin{array}{cccc}
\rho_0 & \rightarrow & \ldots & \rightarrow \rho_k \\
\downarrow & & & \downarrow \\
\rho_{k+1} & \rightarrow & \ldots & \rightarrow \rho_\omega \\
\end{array}
\]
Innocence. Innocence enforces independence between threads forked by Opponent.
Definition 28. Let $\sigma : S \rightarrow A$ be a strategy on a arena $A$. It is innocent when it is:
Visible: The image $\sigma \rho$ of a gcc (regarding $\rho \in \text{gcc}(S)$ as a set) is a configuration of $A$.
Preinnocent: Two negatively forking gccs are never joined.
Preinnocence is a locality condition, forcing Player to deal independently with threads forked by Opponent – in the sequential case, it coincides with Hyland-Ong innocence. In the concurrent case though, it is not by itself stable under composition; that is where visibility comes in. Together they are preserved under composition and under all the operations on strategies involved in the interpretation of $\text{aPCF}_{\text{par}}$ in $\text{CG}_-$ (and hence $\text{Odet}_-$).
Well-bracketing. Traditionally, *well-bracketing* in game semantics rules out strategies that behave like a control operator, manipulating the call stack. It exploits the question/answer labelling, reminiscent of the function calls/returns. In arenas from \text{aPCF}_{\text{par}}\text{-types}, answers to the same question are always conflicting, so every consistent set has at most one answer to any question. A consistent set \(X\) is *complete* when it has exactly one answer to any question. A question is *pending* in a set \(X\) if it has no answer in \(X\) and maximally so in \(X\).
**Definition 29.** A strategy \(\sigma : S \rightarrow A\) is *well-bracketed* when
1. if \(a \in S\) answers \(q \in S\), then \(q\) is pending in \([a]\) and
2. for \(\rho, \rho' \in \text{gcc}(S)\) forking at \(\rho_k = \rho'_k\), and joined, the segments \(\rho_{> k}\) and \(\rho'_{> k}\) must be complete.
The affinity condition of [8] comes for free here, thanks to the conflict in \(B\). Well-bracketing is proved stable under composition and other operations in [8]. A \text{aPCF}_{\text{par-}}\text{-strategy} is a negative, innocent, well-bracketed and odet strategy.
**Theorem 30.** There is a symmetric monoidal category \(\text{PorStrat}\) of negative arenas and \text{aPCF}_{\text{par-}}\text{-strategies}, with products, a terminal object and an exponential ideal comprising \(B\). Moreover, the interpretation of \text{aPCF}_{\text{par}}\text{-} in \text{Odet}_- factors through \(\text{PorStrat} \subseteq \text{Odet}_-\).
From now on, all strategies will be assumed to be \text{aPCF}_{\text{par-}}\text{-strategies.}
### 4.3 Intensional full abstraction
Two strategies \(\sigma, \tau : A\) are *observationally equivalent* \((\sigma \simeq_{\text{obs}} \tau)\) when for all strategies \(\alpha : A^\perp \parallel B\), \(\alpha \odot \sigma \approx \alpha \odot \tau\). In fact, there is no need to quantify over all strategies. A *path-strategy* is a strategy \(\sigma : S \rightarrow A\) such that there exists a configuration \(x \in \mathcal{C}(S)\) that contains all the positive moves of \(S\). Any distinguishing strategy yields by restriction a distinguishing path-strategy – this would fail without affinity, as the restriction would fail uniformity [8], and enforcing uniformity would make it non-deterministic.
**Lemma 31.** Two distinguishable strategies can be distinguished by a path-strategy.
**Lemma 32.** Every path-strategy can be defined by a term of \text{aPCF} up to \(\simeq_{\text{obs}}\).
**Proof.** As in [8] since path-strategies are deterministic – the only difference is the necessity to distribute the context among the subterms to ensure affine typing. \(\Box\)
A corollary is that in an affine setting, \text{par} adds no distinguishing power: two \text{aPCF}_{\text{par}}\text{-terms} are observationally equivalent if and only if they cannot be distinguished by a context from affine \text{PCF} without \text{par}. Intensional full abstraction follows by standard techniques.
**Theorem 33.** The interpretation of \text{aPCF}_{\text{par}}\text{-} into \text{PorStrat} is intensionally fully abstract: it preserves and reflects observational equivalence (see Section 2.1).
### 5 Conclusion
This leaves many avenues for further investigation. On the semantic front, we plan among other applications to exploit \text{Odet} to model non-interfering concurrent languages. On the foundational front we need to understand better how the present approach relates to the treatment of disjunctive causality in edcs [9].
**Acknowledgments.** We are grateful to Tamás Kispéter for interesting discussions about strategies as general event structures.
References
1. Samson Abramsky, Kohei Honda, and Guy McCusker. A fully abstract game semantics for
2. Samson Abramsky, Radha Jagadeesan, and Pasquale Malacaria. Full abstraction for PCF.
3. Samson Abramsky and Guy McCusker. Linearity, sharing and state: a fully abstract game
4. Samson Abramsky and Paul-André Melliès. Concurrent games and full completeness. In
6. Simon Castellan and Pierre Clairambault. Causality vs. interleavings in concurrent game
7. Simon Castellan, Pierre Clairambault, Silvain Rideau, and Glynn Winskel. Games and
8. Simon Castellan, Pierre Clairambault, and Glynn Winskel. The parallel intensionally fully
2017.
10. Dan R. Ghica. Applications of game semantics: From program analysis to hardware syn-
12. Tom Hirschowitz. Full abstraction for fair testing in CCS (expanded version). *Logical
Annual IEEE Symposium on Logic in Computer Science, Warsaw, Poland, June 29 - July
15. Paul-André Melliès and Samuel Mimram. Asynchronous games: Innocence without altern-
ation. In *CONCUR, Lisbon, Portugal, September 3-8, 2007*, volume 4703 of *Lecture Notes
18. Glynn Winskel. Strategies as profunctors. In *FOSSACS, Held as Part of ETAPS 2013,
20. Glynn Winskel. Event structure semantics for CCS and related languages. In *ICALP,
Advances in Petri Nets 1986, Part II, Proceedings of an Advanced Course, Bad Honnef,
660, 2012.
|
{"Source-Url": "http://perso.ens-lyon.fr/pierre.clairambault/fscd17.pdf", "len_cl100k_base": 14145, "olmocr-version": "0.1.50", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 67824, "total-output-tokens": 16376, "length": "2e13", "weborganizer": {"__label__adult": 0.0008668899536132812, "__label__art_design": 0.0008702278137207031, "__label__crime_law": 0.0008053779602050781, "__label__education_jobs": 0.0017957687377929688, "__label__entertainment": 0.00031566619873046875, "__label__fashion_beauty": 0.00040650367736816406, "__label__finance_business": 0.0004949569702148438, "__label__food_dining": 0.0012416839599609375, "__label__games": 0.0147705078125, "__label__hardware": 0.0015621185302734375, "__label__health": 0.0011472702026367188, "__label__history": 0.0008368492126464844, "__label__home_hobbies": 0.00018739700317382812, "__label__industrial": 0.0009927749633789062, "__label__literature": 0.0020198822021484375, "__label__politics": 0.0008330345153808594, "__label__religion": 0.001270294189453125, "__label__science_tech": 0.12890625, "__label__social_life": 0.0001856088638305664, "__label__software": 0.007228851318359375, "__label__software_dev": 0.83056640625, "__label__sports_fitness": 0.0008754730224609375, "__label__transportation": 0.0014104843139648438, "__label__travel": 0.00038695335388183594}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 54193, 0.04528]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 54193, 0.13797]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 54193, 0.84357]], "google_gemma-3-12b-it_contains_pii": [[0, 2903, false], [2903, 8252, null], [8252, 11448, null], [11448, 14980, null], [14980, 16737, null], [16737, 20614, null], [20614, 24726, null], [24726, 26704, null], [26704, 29559, null], [29559, 34242, null], [34242, 38474, null], [38474, 42879, null], [42879, 46990, null], [46990, 50688, null], [50688, 54193, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2903, true], [2903, 8252, null], [8252, 11448, null], [11448, 14980, null], [14980, 16737, null], [16737, 20614, null], [20614, 24726, null], [24726, 26704, null], [26704, 29559, null], [29559, 34242, null], [34242, 38474, null], [38474, 42879, null], [42879, 46990, null], [46990, 50688, null], [50688, 54193, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 54193, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 54193, null]], "pdf_page_numbers": [[0, 2903, 1], [2903, 8252, 2], [8252, 11448, 3], [11448, 14980, 4], [14980, 16737, 5], [16737, 20614, 6], [20614, 24726, 7], [24726, 26704, 8], [26704, 29559, 9], [29559, 34242, 10], [34242, 38474, 11], [38474, 42879, 12], [42879, 46990, 13], [46990, 50688, 14], [50688, 54193, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 54193, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
a560c0de320719e93c36fd7433bcdd5a4b84da6a
|
Nesl User's Manual
(For Nesl Version 3.1)
Guy E. Blelloch Jonathan C. Hardwick
Jay Sipelstein Marco Zagha
August 20, 1995
CMU-CS-95-169
School of Computer Science
Carnegie Mellon University
Pittsburgh, PA 15213
Abstract
This manual is a supplement to the language definition of Nesl version 3.1. It describes how to use the Nesl system interactively and covers features for accessing on-line help, debugging, profiling, executing programs on remote machines, using Nesl with GNU Emacs, and installing and customizing the Nesl system.
This research was sponsored in part by the Wright Laboratory, Aeronautical Systems Center, Air Force Materiel Command, USAF, and the Advanced Research Projects Agency (ARPA) under grant number F33615-93-1-1330 and contract number F19628-91-C-0168. It was also supported in part by an NSF Young Investigator Award and by Finmeccanica.
The views and conclusions contained in this document are those of the authors and should not be interpreted as necessarily representing the official policies or endorsements, either expressed or implied, of Wright Laboratory or the U. S. Government.
### Keywords:
Parallel programming languages, collection-oriented languages, remote execution, programming environments, supercomputers, nested parallelism
# Contents
1 Introduction ................................................. 2
1.1 System requirements ..................................... 2
1.2 Other sources of information ............................... 2
1.3 Conventions ............................................. 3
2 Using NESL ................................................... 3
2.1 Starting NESL ........................................... 3
2.2 How NESL evaluates expressions ......................... 3
2.3 Top-level expressions and commands ...................... 3
2.4 Errors .................................................. 5
2.5 The init file ............................................ 6
2.6 Exiting NESL ............................................ 6
2.7 Variable and Function Redefinition ....................... 6
3 Top-level Commands ........................................... 7
3.1 On-line help ............................................ 7
3.2 Loading NESL files ...................................... 7
3.3 Customizing output ..................................... 8
3.4 Monitoring execution .................................... 9
3.5 Configurations and remote execution ...................... 12
3.6 Background execution ................................... 13
3.7 Checking interpreter status .............................. 14
3.8 Saving NESL state ...................................... 14
4 Editor Support ............................................... 14
4.1 Using NESL with GNU Emacs ............................... 14
4.2 Using NESL with other editors ........................... 15
5 Installing NESL .............................................. 15
5.1 Getting the files ....................................... 15
5.2 Structure of NESL distribution ........................... 16
5.3 Building NESL .......................................... 17
5.4 Building Stand-alone NESL ............................... 18
5.5 Machine configurations .................................. 19
6 Bugs ......................................................... 21
6.1 Current bugs ........................................... 21
6.2 How to report bugs ..................................... 21
Bibliography .................................................... 21
A NESL Examples ................................................ 23
A.1 Example code in distribution ............................. 23
A.2 Sample session ......................................... 23
A.2.1 Scalar operations ................................... 23
A.2.2 Vector operations ................................... 25
A.2.3 An example: string searching ....................... 26
1 Introduction
This manual is a supplement to the language definition of Nesl version 3.1 [1], and assumes that the reader is familiar with the basics of the language. It describes how to use the Nesl system interactively and covers features for accessing on-line help, debugging, profiling, executing programs on remote machines, using Nesl with GNU Emacs, and installing and customizing the Nesl system.
Nesl 3.1 is available via anonymous FTP and runs on serial workstations, Connection Machines CM-2 and CM-5, Cray Y-MP (8, C90, J90, EL) vector machines, the MasPar, and most machines supporting MPI such as the IBM SP-1 and SP-2 and the Intel Paragon. The normal mode of operation is for the interactive front end to run on a user's workstation and for the computational back end to run on a remote supercomputer. The Nesl 3.1 system has the following features:
- Remote execution
- Profiling
- Tracing
- On-line documentation
- Background execution
1.1 System requirements
Nesl assumes the machine on which it is running uses some variant of the Unix operating system. Building Nesl requires a C compiler, lex and yacc and Common Lisp (GNU, Allegro, CMU, or Lucid). For the graphics functions it also requires the X11 library.
There is also a stand-alone version of Nesl only requiring Common Lisp. This version implements all of Nesl, but has no support for remote evaluation or graphics. See Section 5.4 for more on this version.
1.2 Other sources of information
There are several additional sources of information if you want to learn more about Nesl:
- A World Wide Web home page for the SCANDAL project, which always contains the latest information on Nesl. The URL is
\url{http://www.cs.cmu.edu/~scandal}
If you have any problems accessing the page send e-mail to \texttt{nesl-bugs@cs.cmu.edu}. This page contains links to many Nesl related web pages including an on-line demo and tutorial, html manual, and many examples.
- Our FTP site (see Section 5.1). The WWW home page contains a link to this site.
- A mailing list used to discuss Nesl and announce new patches and releases. If you want to be added to this list send e-mail to \texttt{nesl-request@cs.cmu.edu}.
- Papers on the implementation [2], uses [4], and teaching [3] of Nesl. These can be obtained from the FTP site, or viewed directly from the WWW home page.
1.3 Conventions
Within this document, interactions with the NESL system are shown in a **typewriter font**, and command arguments are shown in an **italic font**. Enumerated choices are shown within curly braces {on, off}, and optional arguments are shown within square brackets [[var]].
2 Using NESL
2.1 Starting NESL
To start NESL, type `runnesl`. This should load a dumped Common Lisp image containing the NESL system. If `runnesl` isn’t on your PATH, look for `bin/runnesl` in to the NESL distribution tree at your site. See Section 5 for instructions on installing NESL.
2.2 How NESL evaluates expressions
The NESL system is interactive: the current implementation is built on top of Common Lisp and implements a similar read-eval-print loop. Expressions are typed at the NESL prompt and terminated with a semicolon and a carriage return. For example:
```
<Nesl> 2 + 3;
Compiling...Writing...Loading...Running...
Exiting...Reading...
it = 5 : int
```
Expressions are compiled dynamically into an intermediate language called VCODE, which is then interpreted by a subprocess. The phases of executing an expression are:
- **Compiling**: Compiles the expression and any uncompiled needed to evaluate it into VCODE.
- **Writing**: Writes the compiled VCODE program out to a file.
- **Loading**: Starts up a subprocess for the VCODE interpreter and loads the VCODE program.
- **Running**: Subprocess executes the VCODE program.
- **Exiting**: Subprocess writes the results to a file.
- **Reading**: Reads the results back into the NESL system.
This setup makes it relatively easy to run code on remote machines, since the VCODE interpreter can be run remotely, communicating with the NESL system through a shared file system or through calls to `rsh` and `rcp`. This is how the CM-2, CM-5, Cray, Maspar, and MPI implementations work.
2.3 Top-level expressions and commands
At the NESL prompt you can type a NESL top-level expression, as defined by the language, or a top-level command, which is used to control or examine various aspects of the environment. The top-level commands are summarized in Figure 1 and most are described in Section 3.
A top-level expression is one of
NESL top-level forms:
function <name> <pattern> [: <typespec>] = <exp>; -- Function Definition
datatype <name> <typeexp>; -- Record Definition
<pattern> = <exp>; -- Top level Assignment
<exp>; -- Any NESL expression
Top-level Commands:
help; OR ? -- Print this message.
load [<exp>]; -- Load a file. If no arg, reload last file.
describe <funcname>; -- Describe a NESL function.
apropos <name>; -- List functions which contain <name>.
set arg_check {on,off}; -- Set the argument check switch.
set trace <funcname> <n>; -- Trace a NESL function.
0=off, 1=fname, 2=argvs, 3=varx, 4=vals
set profile <funcname> {on,off}; -- Set profiling for function <funcname>.
set print_length <n>; -- Set maximum sequence print length.
set verbose {on,off}; -- Set the verbose switch.
set editor <pathname>; -- Set the editor.
show status; -- List settings of current environment.
show bugs; -- List the known bugs.
show code <funcname>; -- Show the code for a function.
dump vcode <filename> <exp>; -- Dump VCODE for <exp> to file <filename>.
dump world [<filename>]; -- Dump current NESL environment to a file.
dump info [<filename>]; -- Dump info for bug reports (default=stdout).
edit [<filename>]; -- Edit & load a NESL file (default=last file).
<pattern> |= <exp>; -- Assign to a file variable.
exit; -- Exit NESL.
lisp; or `D -- Go to the Common Lisp interpreter.
Commands for running VCODE on remote machines:
defconfig <name> <args>; -- Define a new configuration.
set config <config>; -- Set the current configuration to <config>.
set memory_size <n>; -- Set the memory size of the current configuration.
show config; -- List the properties of the current configuration.
show configs; -- List the available configurations.
-- Execute exp in the background.
get <name>; -- Get a background result.
Figure 1: Top-level commands (screendump obtained by typing ?).
toplevel ::= function name pattern [: typedef] = exp ; function definition
datatype name typedef ; datatype definition
pattern = exp ; variable binding
exp ; expression
where exp is any expression and pattern can either be a single variable name or a parenthesized pattern of variable names (the square brackets indicate that the typedef in a function definition is optional). A full syntax for each of these is given in Appendix A of the Nesl language definition [1]. Some examples of top-level expressions include:
function double(a) = 2*a;
function add3(a,b,c) = a + b + c;
datatype complex(float,float);
foo = double(3) + add3(1,2,3);
foo;
Expressions that are not assigned to a user defined variable are assigned to the variable it.
If you hit Return before an expression is completed, either for readability or by mistake, a “>” is printed at the beginning of each new line until the expression is completed. For example:
<Nesl> 2
> +
> 3;
Compiling..Writing..Loading..Running..
Exiting..Reading..
it = 5 : int
If you get lost, instead of hitting Ctrl-C try typing a few semicolons to end the expression.
For an example Nesl session showing many features of the language, see Appendix A.
2.4 Errors
In Nesl most errors result in an error message being printed, and the system returns you to the Nesl prompt.
<Nesl> nosuchfunc(2);
Error at top level.
NOSUCHFUNC is undefined.
<Nesl>
Some errors, however, may cause you to abort out of Nesl and back to the Common Lisp prompt. The only case where this is supposed to happen is if you hit Ctrl-C. If it happens in any other situation, please report it as a bug (see Section 6.2). When it does happen, you can return to Nesl by getting back to the top level of your Common Lisp system and then typing (nesl).
Running out of memory: The VCode interpreter uses a fixed amount of memory for storing data. The default value depends on the configuration used (see Section 5.5), but is normally at least 1048576 (2^20) 64-bit words. It can be changed with the command set memory_size n (see Section 3.6).
If your program requires too much memory, you will get the following error,
Compiling..Writing..Loading..Running..
compacting vector memory...done
vinterp: ran out of vector memory. Rerun using larger `-m argument'.
Reading..
Error at top level.
Error: Error while running VCODE.
Before allocating more memory using set memory.size (see Section 3.5), think about why you are running out of memory. Your algorithm might require more memory than your machine can possibly supply. Or your algorithm might have a bug and be recursing infinitely. In any case, the memory size probably should not be set to more than your total physical memory. So if you have 16 Megabytes on your workstation, don't set memory.size to anything more than 2097152 (2 Megawords). If you want to find out more about the memory system used by NESL and the meaning of the compacting vector memory message, see the paper on the implementation of NESL [2].
Parse errors: Common syntax errors include functions with no arguments, mismatched parentheses, and empty vectors without types. Sometimes errors are a bit cryptic, for example:
function foo = sqrt(2.0); ⇒ = is missing its left argument.
Most semantic errors (such as type mismatches) produce more informative error messages.
Garbage collection: The NESL system will occasionally pause because of a garbage collection by the underlying Common Lisp. This does not affect the operation of NESL programs, and in particular has no affect on the running times of programs.
2.5 The init file
When NESL is started, it loads the file .nesl (if it exists) from your home directory. This file should be in the same format as any NESL file—it can contain definitions as well as top-level commands. It is typically used to modify environment defaults such as the preferred configuration (set config), memory size (set memory.size), editor (set editor), and maximum print length for sequences returned at the top level (set print.length). These commands are all described later in this manual.
2.6 Exiting NESL
lisp;
Exits NESL to Common Lisp. Ctrl-D can also be used. To get back to NESL type (nesl).
exit;
Exits both NESL and Common Lisp.
2.7 Variable and Function Redefinition
In most functional languages, when you define a variable with the same name as an existing variable, the new definition shadows the old definition but will not affect any previous references to the old variable. For example:
a = 22;
function foo(b) = a + b;
a = 1.0;
Now a is redefined to be 1.0, but foo would still refer to the value 22.
For the sake of convenience, Nesl adds the feature that when you define a variable at the top level and then later redefine it with the same name and type, the system changes all previous references to the variable to the new value (note that function names are variables, so the same is true with function definitions). This allows the user to redefine a variable or function without having to reload everything that depends on it. It is important to realize that previous references to the variable are not redefined if the new value is of a different type, including the redefinition of a function to have a new type (since such a redefinition would lead to type inconsistencies), and that redefining only happens at the top level. The system warns the user when defining a variable with an existing name but a new type. For example:
```
<Nesl> x=2;
x = 2 : int
<Nesl> x=0.0;
Redefining X with a new type. Old calls will not be modified.
x = 0.0 : float
```
3 Top-level Commands
The top-level commands are used for controlling and examining the Nesl environment. They are not part of the Nesl language and therefore are not found in the language definition. Top-level commands can be used either at the <Nesl> prompt, or at the top level within a file—they cannot appear within an expression.
3.1 On-line help
```
help;
```
Prints a list of all the top-level commands, as shown in Figure 1. The command ? (with or without a terminating semicolon) has the same effect.
```
describe funname;
```
This gives a description of function funname, including the documentation from the manual [1].
```
apropos name;
```
This prints the names of all the Nesl functions and variables that contain the string name in either their name or their documentation string.
```
show code funname;
```
This displays the Nesl code for function funname. Code cannot be shown for primitive functions.
3.2 Loading Nesl files
```
load [exp];
```
This loads a Nesl file into the current environment. If present, the expression exp must evaluate
to a string (sequence of characters) and be a valid filename. If the filename ends with the suffix ".nesl", the suffix can be omitted. If exp is omitted, it defaults to the last file that was loaded, or edited using edit (see Section 4.2). Files are loaded relative to the current directory. NESL files can contain any NESL top-level expressions or top-level commands; a file can therefore load other files, or set various environment variables, such as the memory size (see Section 3.5).
3.3 Customizing output
set verbose {on,off};
This turns the verbose mode on or off. When verbose mode is off, the NESL system no longer prints
Compiling..Writing..Loading..Running..
Exiting..Reading..
when evaluating an expression at the top level. Note that this command is local to a file, so that putting it in your .nesl file only turns verbose mode off while that file is being loaded. Verbose mode is often useful when debugging a new configuration (see Section 5.5).
set print.length n;
This sets the maximum print length for sequences returned at the top level. Only n elements of a sequence are printed on the screen, followed by "...". The default value for print.length is 100. The print length applies to each level of a nested sequence. For example:
<Nesl> set print.length 3;
<Nesl> x = [[1:10], [1:10], [1:10], [1:10]];
x = [[1, 2, 3,...], [1, 2, 3,...], [1, 2, 3,...], ...] : [[int]]
pattern |= exp;
By typing a |= exp; at the top level, the expression exp is assigned to the file variable a. The pattern can be any variable pattern. This construct is useful for evaluating expressions with a large return value, because the user does not have to wait for the result to be read back into NESL—only the type is returned. You can use file variables in expressions just like any other variables. Here is an example:
<Nesl> a |= index(10000);
a : [int]
<Nesl> sum(a);
it = 49995000 : int
<Nesl> function foo(n) = take(a,n);
foo = fn : int -> [int]
<Nesl> foo(10);
it = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] : [int]
File variables are stored in the temp.dir specified by the configuration (/tmp by default). This means that if you switch your configuration to a new configuration with a different temp.dir, the VCODE interpreter won't be able to find the variable and will give a runtime error message. Files created in this process are not removed by NESL.
3.4 Monitoring execution
set arg.check {on,off};
This turns argument checking on or off. Argument checking is on by default and includes bounds checking, divide by zero checking, and range checking. Runtime errors detected by argument checking print a message of the form:
< Nesl > let a = [2,3,4] in a[5];
Compiling..Writing..Loading..Running..
RUNTIME ERROR: Sequence reference (a[i]) out of bounds.
Exiting..Reading..
Argument checking takes time, so it can be turned off to generate faster code.
set trace funname n;
This sets the tracing level for any non-primitive function. Tracing is used for debugging and prints out a message each time the function is entered and exited. The argument n specifies the level of tracing. The choices are:
1 Print the function name when entering and exiting the function.
2 Print the function name along with the values of its arguments and the result.
3 Print the function name, and the variable names for each binding in the outermost let statement when it is assigned. This can be used to help locate a runtime error.
4 A combination of 2 and 3, plus it prints the value of each binding in the outermost let statement.
Tracing can be turned off for the function by specifying a trace level of 0. An example use is:
< Nesl > function norm(a,b) = sqrt(a^2 + b^2);
norm = fn : (float, float) -> float
< Nesl > set trace norm 2;
< Nesl > set trace sqrt 1;
< Nesl > norm(3.0,4.0);
Compiling..Writing..Loading..Running..
Entering NORM
A = 3.0000000e0
B = 4.0000000e0
Entering SQRT
Leaving SQRT
Leaving NORM
RESULT = 5.0000000e0
Exiting..Reading..
5.0 : float
NESSL primitives cannot be traced. If you would like to trace a primitive, you can create a stub function that calls the primitive, and then replace calls to the primitive with calls to the stub. For example:
```<Nesl> set trace sin 4;
```
SIN is a primitive, you cannot trace it.
```<Nesl> function my_sin (x) = sin(x);
```
```<Nesl> set trace my_sin 4;
```
When tracing prints values, it truncates sequences and only prints the first few elements. It also only prints nested sequences down to a fixed depth. The truncation of sequences for tracing is different than the print-length set by `set print_length`, and is controlled by the two variables, `trace_string_length` and `trace_string_depth`. They are ordinary NDSL variables and should be set at top level using `var = val`.
When tracing functions that are called in parallel, NDSL will only print an “entering foo” message once even though the functions is being entered many times in parallel. It will, however, print any values (arguments, results, or values of `let` statements) for each parallel call. For example:
```<Nesl> function foo(a) = a + 3;
```
```foo = fn : int -> int
```
```<Nesl> set trace foo 2;
```
```<Nesl> {foo(a): a in [2,3,4]};
```
Compiling..Writing..Loading..Running..
Entering FOO
A = 2
A = 3
A = 4
Leaving FOO
RESULT = 5
RESULT = 6
RESULT = 7
Exiting..Reading..
it = [5, 6, 7] : [int]
```
```set trace off;
```
Turns tracing off for all functions.
```set profile funname {on,off};
```
When profiling is turned on for a function, the time taken for each expression on the right of a `let` binding in that function are printed. The profiling also works on functions that are called in parallel; it prints the total time taken across all parallel calls.
In the example below, suppose we would like to profile a function that scrambles the order of elements in a vector. We could first use the `time` function to measure the total running time:
function scramble (vec) =
let
n = #vec;
rands = {rand(j): j in dist(n, n)};
random_permutation = rank(rands)
in
vec->random_permutation;
<NeSL> time(scramble([100000:200000]));
it = (100000, 0.042053647356) : int, float
To find out where the time is going, we turn on profiling as shown below. The timings indicate that the rank function is consuming most of the running time.
<NeSL> set profile scramble on;
Turning timing on for each let binding of SCRAMBLE.
<NeSL> #scramble([100000:200000]);
5.106242e-5 seconds for expression:
#vec
5.976757e-3 seconds for expression:
{rand(j): j in dist(n, n)}
3.046853e-2 seconds for expression:
rank(rands)
3.965526e-3 seconds for expression:
vec -> random_permutation
To get more accurate profiling, we turn argument checking off. Note that some functions are much faster with argument checking off.
<NeSL> set arg_check off;
<NeSL> #scramble([100000:200000]);
5.183748e-5 seconds for expression:
#vec
3.150965e-3 seconds for expression:
{rand(j): j in dist(n, n)}
3.034669e-2 seconds for expression:
rank(rands)
8.495138e-4 seconds for expression:
vec -> random_permutation
It should be noted that timing will give erroneous numbers when nested. This means that if you profile a recursive function, the times for the let bindings that do not make recursive calls will be accurate, but the time for the recursive call itself will meaningless. It should also be noted that to get reasonably accurate timings, the function should be profiled a few times, since the time taken by an expression can vary depending on the system load.
Redefining monitored functions: When you redefine a function with a new type, old calls to the function are still traced or profiled but the new version will not be monitored.
```
set profile off;
```
Turns profiling off for all functions.
### 3.5 Configurations and remote execution
In NESL it is possible to evaluate any top-level expression on a remote machine. The remote machine can be another workstation, a Cray Y-MP, a CM-2 or CM-5, a Maspar, or one of the supported MPI machines. To run expressions on a remote machine, configurations first need to be set up using `defconfig` (see Section 5.5). Assuming that the configuration files have already been set up, this section describes the top-level commands used for remote execution.
```
show configs;
```
This displays a list of currently-available machine configurations. Note that the same physical machine might be in multiple configurations, depending on the turnaround time requested for a job, how many processors are used, etc.
```
<Nesl> show configs;
The current machine configurations are: LOCAL CRAY CM5
To use one type: set config <config>;
```
```
set config config;
```
This causes all future NESL expressions (until the next `set config` command) to be evaluated on the machine configuration `config`. (In order to use NESL graphics, you must first give the remote machine access to your X server by executing the command `xhost + remote.machine` in a local shell.)
```
<Nesl> set config cray;
[...]
<Nesl> 2 + 3;
```
```
Compiling..Writing..Loading on PSC Cray C90 (mario).. Running..
Exiting..Reading..
it = 5 : int
```
```
show config;
```
This displays parameters of the current machine configuration. At a minimum, it displays the name of the configuration, the default memory size, the path to the VCDE interpreter, the path to the X11 graphics interface program `xneslplot` (see Section 5.5), and the directory used by the NESL system for temporary files. Remote configurations may also include the name of the remote machine, the `rsh` command used to start up the VCDE interpreter, and the name of the script used to submit batch jobs.
set memory.size n;
This sets the amount of memory that the VCODE interpreter allocates for data storage in the current configuration. In the standard configurations, it defaults to $1048576 \times 2^{20}$ 64-bit words. See Section 2.4 for what happens when you run out of memory.
### 3.6 Background execution
NESL allows background execution of expressions. This is most useful when evaluating expressions that might take a long time to complete. It is also useful on supercomputers that allow more machine resources (memory, processors, runtime) to be used for batch jobs than for interactive jobs.
```nesl
name &= exp [,mem := n] [,max.time := n] [,config := config];
```
This evaluates the expression `exp` in the background and assigns the result to the background variable `name`. The NESL prompt returns soon as the job has been submitted. For example:
```nesl
<Nesl> a &= sum([0:100000]);
Compiling..Writing..Submitting..
[1] 12782
background a : int
<Nesl>
```
The result is retrieved using the `get` command described below. The `&=` command has three optional arguments:
- **mem**: This specifies the amount of memory the job will need. It uses the same units as used by the `mem.size` field of a configuration, and defaults to the value specified in the current configuration.
- **max.time**: This specifies the maximum amount of time (in seconds) that the job will run. It is normally used when submitting jobs on supercomputers, since it can be used as a safety cap in case the job goes into an infinite loop.
- **config**: This is used to specify a configuration other than the current configuration on which to run the job.
```nesl
get name;
```
This is used to retrieve a background variable. If the job is not completed, the message "Variable
waiting for result” will be printed. If the job is completed, all output generated during execution will be printed, and if there was no error the result will be assigned to the variable name. The status of background jobs can be examined with the show status command described below.
3.7 Checking interpreter status
show status;
This command will report the current print_length and whether verbose mode and argument checking are turned on. It will also list all the functions that are being traced or profiled, and all the outstanding background variables and whether the corresponding jobs have completed.
<Nesl> show status;
verbose on
arg_check on
print_length = 100
traced functions:
sort
background variables:
X (done)
Y (waiting on PSC Cray C90 (mario))
3.8 Saving Nesl state
dump world [filename];
This dumps an executable Common Lisp image containing all of the current Nesl environment to filename. If filename is not specified it defaults to bin/runnesl relative to the Nesl distribution tree. The Nesl image can then be run directly rather than entering Common Lisp and loading the Nesl source files. Note that the image will typically be very large (from 3 to 35 Megabytes) and might take a long time to dump. Also, since it contains everything defined in the current session, you might want to start a fresh Nesl before executing dump world.
dump vcode filename exp;
In normal operation the Nesl system deletes the intermediate VCODE files after they have been read. This command writes a permanent copy of the VCODE program that evaluates exp to the file filename. It is normally used for reporting bugs (see Section 6.2). It can also be used for creating stand-alone VCODE applications, which can be executed by the VCODE interpreter outside of the Nesl system. This is useful for improving the startup time and memory usage of applications once they have been debugged in the Nesl system.
4 Editor Support
4.1 Using Nesl with GNU Emacs
Within the top-level Nesl directory there should be a subdirectory named emacs which contains the files nesl-mode.el and nesl.el, written by Tom Sheffler. If you use M-x load to load these files into your GNU Emacs, all files whose names end with .nesl will from then on be edited in nesl-mode (see the comments at the top of nesl-mode.el for how to load these automatically). This mode adds the following functions to GNU Emacs:
TAB Adjust indentation of current line.
C-M-x Evaluate the function containing or after point, and send it as input to the NESL process (nesl-send-defun).
C-M-a Move to the beginning of current or preceding function (beginning-of-nesl-function).
C-M-e Move to the end of current or following function (end-of-nesl-function).
C-c t Insert function type for the function containing or after the point (nesl-insert-function-type).
For nesl-mode to find the end of a function, the function needs to be terminated with a dollar-sign ($) sign instead of a semicolon (;). In NESL, the dollar-sign and semicolon can be used interchangeably to mark the end of a function definition. C-M-x only works if it can find the end of the function.
The following variables can be set by the user:
<table>
<thead>
<tr>
<th>Variable</th>
<th>Default</th>
<th>Documentation</th>
</tr>
</thead>
<tbody>
<tr>
<td>nesl-process</td>
<td>"nesl"</td>
<td>String name of the inferior NESL process.</td>
</tr>
<tr>
<td>nesl-mode-hook</td>
<td>nil</td>
<td>Function hook called on entry to nesl-mode.</td>
</tr>
<tr>
<td>inferior-nesl-program</td>
<td>"runnesl"</td>
<td>Program to execute on M-x run-nesl.</td>
</tr>
<tr>
<td>nesl-indent-level</td>
<td>4</td>
<td>Indentation to be used inside NESL blocks.</td>
</tr>
</tbody>
</table>
A NESL subprocess can be started with M-x run-nesl.
4.2 Using NESL with other editors
set editor command;
This sets the command line that gets invoked when using edit. The argument command must be a string. When you run edit, it will prepend the string to the filename specified (with a space) and run it as a shell command. The command can therefore include flags. For example, "set editor "xterm -e vi";" will set up the editor so that it will invoke vi within a new xterm.
edit [filename];
This starts the editor (which must have previously been set with set editor) on the NESL file filename. If no filename is specified, it defaults to the last file loaded. If the filename ends with the suffix ".nesl", the suffix can be omitted. When you exit from the editor, you will be asked if you want to load the file you just edited.
5 Installing NESL
5.1 Getting the files
FTP to nesl.scandal.cs.cmu.edu (currently 128.2.198.40), login as anonymous, enter your e-mail address as the password, cd code/nesl, and get nesl.tar.Z. Depending on your FTP client you may need to set the transfer mode to binary first. Finally, uncompress and untar the file.
% ftp nesl.scandal.cs.cmu.edu
Name: anonymous
Password: me@my.site.name
ftp> cd /afs/cs/project/scandal/public/code/nesl
\(^1\)If you have GNU gzip, nesl.tar.gz is also available, and should be more compact)
ftp> binary
ftp> get nesl.tar.Z
ftp> quit
% uncompressed nesl.tar
% tar -xf nesl.tar
The system requires about 3.5 Megabytes of disk space uncompressed and without binaries. Some of this can be removed if you are not going to be using all of the parallel machines (see the cvl directories in the next section), or if you don’t need to keep the manuals. The biggest use of space will be for the dumped Common Lisp image you will make after building NESL; this will occupy from 3 to 35 Megabytes, depending on the version of Lisp you use and the machine on which you are running (see the discussion of building the system in Section 5.3).
5.2 Structure of NESL distribution
The nesl distribution unpacks into the following directory tree. The files in slanted font will be created during the build process.
BUILD
COPYRIGHT
Makefile
README
bin/
runesl
vinterp.*
xneslplot
foreground--*
background--*
config.nesl
CVL/
cm2/
cm5/
cray/
mpi/
serial/
DOC/
cvl.ps
manual.ps
user.ps
vcode-ref.ps
EMACS/
examples/
include/
LIB/
libcvl.a
neslseqsrc/
neslsrc/
Directions on how to build NESL
The NESL executable
The VCODE interpreter, for various architectures
X11 interface used for NESL graphics
Scripts for executing VCODE from NESL in the foreground
Scripts for executing VCODE from NESL in the background
Definitions of configurations
Source code for the CM-2 version of CVL
Source code for the CM-5 version of CVL
Source code for the CRAY version of CVL
Source code for the MPI version of CVL
Source code for the serial version of CVL
The CVL manual
The NESL manual
This user’s guide
The VCODE manual
NESL editing mode for GNU Emacs
Collection of NESL examples (see Appendix A)
cvl.h include file
The CVL library
Source code for stand-alone NESL
Source code for NESL
16
5.3 Building NESL
Once you have unpacked the NESL distribution, the following steps should be sufficient to build a version of NESL to run on your local workstation:
1. Run make from the top-level NESL directory. This builds CVL, VCODE, and xnpsplot, leaving vinterp.serial and xnpsplot in the bin directory.
2. Start a Common Lisp (either GNU, Allegro, CMU, or Lucid) in the top-level NESL directory, and enter (load "neslsrc/build.lisp").
3. Follow the instructions for dumping an executable version of NESL. This will create a file bin/runnesl, which can be executed directly to start NESL.
4. The simplest test of the system is to enter 1+1; which should exercise all the phases of the system as explained in Section 2.2. For a more complete test, try load "neslsrc/test"; followed by testall(0); which runs through a series of test functions.
The rest of this section discusses what can be changed if the above procedure does not work or if you don't want to create a dumped NESL Lisp image. The next section discusses how to set up configurations for remote execution.
The C compiler: The default C compiler and optimization level is gcc -02. This can be changed by setting the variable CC at the top of the Makefile.
Making CVL, VCODE and xnpsplot separately: The top-level Makefile recursively calls make in the subdirectories cvl/serial, vcode, and utils, and then moves the result to either the lib or bin directory. These builds can also be done by hand if necessary.
Compiling CVL and VCODE for a Connection Machine, Cray, or Maspar: To build a version of the VCODE interpreter for a CM-2, CM-5, Cray or Maspar, you will need to make cm2, make cm5, make cray or make maspar.
Compiling CVL and VCODE for MPI: To build a version of the VCODE interpreter using MPI, you will need to do some customization. Read the cm5.mpi, paragon.mpi and spi.mpi entries in the top-level Makefile, and modify one of them (and the appropriate mpiCC and mpi-dir variables) to match your MPI installation. Note that only the MPICH implementation of MPI from Argonne/Mississippi State is directly supported by this release, and that ANSI C is required. See the Makefile and README files in the vcode and cvl/mpi subdirectories for further details. A comparison of the CM-2, CM-5 and MPI versions is given in [5].
Compiling for multiple serial architectures: If you want versions of Nesl for multiple serial architectures, you will need copies of vinterp.serial, xnesiplot and runnesl for each architecture. If you are using a shared file system, before building a version for a new machine, you should run make clean from the top-level directory to clean up any old object files. Before dumping the executable image of Nesl (runnesl), you should make sure that the configuration points to the correct versions of vinterp.serial and xnesiplot (see the next section for an explanation of configurations).
Compiling for Linux To compile Nesl for Linux, do a make linux. The Linux version assumes you have Gnu Common Lisp, the Gnu C compiler, flex and bison.
Avoiding dumping Nesl: Because Common Lisp images can be quite large, the Nesl executable (runnesl) may require a comparatively large amount of space: between 3 and 35 Megabytes (the exact amount depends on which version of Common Lisp you are using, and can be as big as 35 Megabytes for Lucid Common Lisp). If this is too much space for your liking, then the variable *nesl-path* should be set in the file neslsrc/load.lisp, and the user should start up Common Lisp and load neslsrc/load.lisp each time they want to run Nesl. This will load in the compiled files and be much faster than doing a build, but not as fast as starting up a dumped executable.
Portable pseudorandom numbers: For users who want a pseudorandom number generator that is portable across parallel machines, we supply hooks in the MPI and CM-5 code to use the additive lagged-Fibonacci generator described in [6]. This is available to US residents via FTP from ftp://ftp.super.org/pub/mascagni/lfibrng6a.tar.Z. See the appropriate CVL Makefile for instructions on how to enable the hooks.
What can be deleted?: After building working versions of the three binaries vinterp.serial, xnesiplot and runnesl, the only files that are necessary for running Nesl are all in the bin directory: anything else can be deleted, if so desired. If you run make clean in the top-level directory, it will remove all unnecessary object files.
5.4 Building Stand-alone Nesl
This release of Nesl includes an experimental stand-alone version of the language that does not use Vcode or CVL. This version runs inside a Common Lisp environment and is limited in several respects:
1. no graphics
2. no remote execution
3. no tracing
4. no profiling
5. no spawn function
6. significantly slower on large problems
However, this version is much easier to build and is a good way for a new user to experiment with Nesl.
To build the stand-alone Nesl, first cd neslseqsrc and then start up Common Lisp. Load the Nesl files into the lisp with (load "neslseqsrc/load.lisp"). To start up Nesl, type (nesl).
5.5 Machine configurations
The top-level Nesl command defconfig is used to define new configurations. Machine configurations are mostly used for remote execution, but can also be used to define aspects of the environment for local execution, such as the amount of memory allocated by the VCODE process, or a VCODE interpreter file other than the default. This section describes defconfig and outlines how remote execution is implemented. The mechanism for remote execution was designed to be quite flexible; we hope that you will be able to adapt it to the idiosyncrasies of your environment.
Machine configurations for your local site should be defined in the config.nesl file in the top-level directory. This file includes several example configurations. Users can also specify their own configurations by using defconfig either from the interpreter or within a file (it is common to put defconfigs within the .nesl init file). The syntax for defconfig is:
```
defconfig name [,memory.size := n] [,interp.file := str] [,temp.dir := str]
[,foreground.command := str] [,background.command := str]
[,max.time := n] [,arguments := str];
```
This command takes several optional arguments, described below. These optional arguments can appear in any order. In its simplest form, defconfig name, it defines a configuration with all the default settings. In the following, nesl_path refers to the pathname to the nesl distribution.
- memory_size: This specifies the memory size used by the VCODE interpreter. The default is 1048576 (2^20) double precision floating point numbers (64 bits each on most machines).
- interp_file: The executable file for the interpreter for this configuration. The default is nesl_path/bin/vinterp.
- temp_dir: This is the directory to which the VCODE source file and the output from the VCODE interpreter are written each time an expression is executed. If the remote machine shares a file system with the local machine, this should be a shared directory so that both Nesl (running locally) and the VCODE interpreter (running remotely) can access it. The default is /tmp/.
- rsh_command: This is the command used to initiate remote execution. If both the local and remote machine support rsh and the user name is the same locally as remotely, then this can simply be rsh machinename. If the remote user name is different, then rsh -1 username machinename should work. Of course, the user needs to set the .rhosts appropriately at the remote host (see your local manual page for rsh(1), and note that some sites may restrict its use because of security considerations). If your system does not support rsh, but supports some other command for remote procedure calls, it may be possible to substitute that. The default for rsh.command is the empty string, specifying that the VCODE interpreter should be executed locally.
- **machine.name**: This is used to specify the remote machine's name. It is only used to print messages for the user, so it need not be the "official" name. The default is the empty string.
- **plot_file**: The Nesl graphics routines work by starting a subprocess from within the Vcode interpreter using the spawn function. This subprocess is then passed commands from the interpreter through a pipe to its standard input, and translates them into X11 calls. The file that is used to invoke the subprocess is specified by plot_file. The default is nesl_path/bin/xneslplot.
- **foreground_command**: The shell script used for foreground execution. The script is first searched for on the user's path, and then in *nesl_path*/bin. The default is foreground-unix.
- **background_command**: The shell script used for background execution. It is searched for as above. The default is background-unix.
- **max.time**: The maximum number of seconds allowed for background jobs. This can be overridden with the max.time option of the name & command.
- **arguments**: This string is passed directly to the shell scripts specified by foreground_command and background_command. It can be used for various purposes, such as specifying the number of processors for the CM-2 or CM-5 implementations. The default is the empty string.
Remote execution works as follows: After Nesl writes out the vcode file, it starts up a subprocess by executing one of the background-* or foreground-* scripts from the bin directory. The script to be used is specified by the foreground_command and background_command in the configuration definition. Nesl passes these scripts the following 7 arguments:
1. **rsh.command**: This is passed directly from the configuration. The scripts are set up so that if this argument is the empty string, the interpreter will run locally.
2. **interp.file**: Passed directly from the configuration.
3. **memory.size**: Passed directly from the configuration.
4. **temp.dir**: Passed directly from the configuration.
5. **job.ident**: A unique job identifier (used to generate filenames).
6. **max.time**: Passed directly from the configuration.
7. **arguments**: Passed directly from the configuration.
Nesl always writes the Vcode program to the file temp.dir/job.ident.code. Vcode, in turn, writes the result to the file temp.dir/job.ident.output, where Nesl expects to find it. With background mode, two additional files temp.dir/job.ident.err and temp.dir/job.ident.check are created. The err file is used to store all output generated during the execution of the Vcode interpreter, including any errors. The check file is written to after the Vcode interpreter has completed, and is used so that Nesl can determine when it can read the result. It should be noted that if the job was successful, all these files are deleted after being read.
It might be necessary to create new background and foreground scripts for your local site. Looking at the existing scripts should help in defining new ones.
6 Bugs
6.1 Current bugs
\texttt{show bugs;}
This displays a list of known bugs, and possible workarounds, in the current release of the \texttt{NESL}
system.
6.2 How to report bugs
If you find a bug not listed by \texttt{show bugs}, please send a bug report to \texttt{nesl-bugs@cs.cmu.edu}. You can help us identify and correct the bug by first finding the smallest example demonstrating the bug and by including the following information in your bug report. Include the \texttt{NESL} source, and in addition, the \texttt{VCODE} output from the \texttt{NESL} compiler using the \texttt{dump vcode} command. Use the \texttt{dump info} command to generate a description of your \texttt{Lisp} and hardware platform. If you found the bug on a parallel machine, does your local serial configuration exhibit the same problem?
We will try to respond to your bug report promptly, but can make no guarantees – \texttt{NESL} is a research tool rather than a production system.
If you make any improvements to the system, please send them to us so we can incorporate them in future versions.
Acknowledgements
Margaret Reid-Miller and Girija Narlikar provided useful comments on this manual. Tom Sheffler implemented the GNU Emacs \texttt{nesl-mode}. Martin Santavy suggested several of the features included in the \texttt{NESL} system.
References
A **NESL Examples**
A.1 **Example code in distribution**
The subdirectory examples contains the following examples of NESL code:
- **adaptive-integration** Adaptive integration of single-variable functions.
- **awerbuch-shiloach** Algorithm for finding the connected components of a graph.
- **convex-hull** The QuickHull algorithm for finding convex hulls. Includes a graphics demo.
- **hash-table** An implementation of a parallel hash table.
- **line-fit** Least-squares fit of a line to a set of points.
- **micro-shell** A micro shell that keeps track of current directory and executes commands.
- **nas-cg** The NAS conjugate gradient benchmark.
- **median** Recursively finds the \( k \)th largest element of a vector.
- **primes** Work-efficient parallel implementation of the prime sieve algorithm.
- **separator** Geometric separator code. Includes a graphics demo of it running on an airfoil.
- **sort** Various sorts: quicksort, Batcher's bitonic sort and Batcher's odd-even mergesort.
- **spectral** Spectral separator code. Includes a graphics demo using everyone's favorite airfoil.
- **string-search** Fast string search algorithm.
The class notes [3] contain other examples.
A.2 **Sample session**
This transcript of a NESL session shows many of the language features.
A.2.1 **Scalar operations**
```
<Nesl> 2 * (3 + 4);
Compiling..Writing..Loading..Running..
Exiting..Reading..
it = 14 : int
<Nesl> set verbose off; % turns off verbose compiler messages %
<Nesl> (2.2 + 1.1) / 5.0;
it = 0.66 : float
<Nesl> t or f;
it = T : bool
<Nesl> 'a < 'd; % that's a backquote, not a quote %
```
it = T : bool
<Nesl> 3;
it = 3 : int
<Nesl> 1.6 + 7; % these aren't the same type %
Error at top level.
For function + in expression
1.6 + 7
inferred argument types don't match function specification.
Argument types: float, int
Function types: a, a :: (a in number)
<Nesl> 1.6 + float(7);
it = 8.6 : float
<Nesl> sin(.6);
it = 0.56464247395035 : float
<Nesl> a = 4;
a = 4 : int
<Nesl> a + 5;
it = 9 : int
<Nesl> if (4 < 5) then 11 else 12;
it = 11 : int
<Nesl> let a = 3 * 4 % the '>' is a prompt for you to enter more %
in a + (a * 5);
it = 72 : int
<Nesl> let a = 3 * 4;
> b = a + 5
> in a + b;
it = 29 : int
<Nesl> function fact(i) = % you can define functions at top level %
> if (i == 1)
> then 1
> else i * fact(i-1);
fact = fn : int -> int
<Nesl> fact(5);
it = 120 : int
<Nesl> function circarea(r) = pi * r * r; % pi is predefined %
circarea = fn : float -> float
<Nesl> circarea(3.0);
it = 28.2743338823081 : float
<Nesl> (2, 'a);
it = (2, 'a) : int, char
<Nesl> function div_rem(a, b) = (a / b, rem(a, b));
div_rem = fn : (int, int) -> (int, int)
<Nesl> div_rem (20, 6);
it = (3, 2) : int, int
A.2.2 Vector operations
<Nesl> [2, 5, 1, 3];
it = [2, 5, 1, 3] : [int]
<Nesl> "this is a vector";
it = "this is a vector" : [char]
<Nesl> [(2, 3.4), (8, 8.9)]; % a vector of tuples %
it = [(2, 3.4), (8, 8.9)] : [(int, float)]
<Nesl> ["this", "is", "a", "nested", "vector"];
it = ["this", "is", "a", "nested", "vector"] : [[char]]
<Nesl> [2, 3.0, 4]; % vectors must have homogeneous elements %
Error at top level.
For function make_sequence in expression
[2, 3.0]
inferred argument types don't match function specification.
Argument types: [int], float
Function types: [a], a :: (a in any)
<Nesl> {a + 1: a in [2, 3, 4]};
it = [3, 4, 5] : [int]
<Nesl> let a = [2, 3, 4] in {a + 1: a};
it = [3, 4, 5] : [int]
<Nesl> {a + b: a in [2, 3, 4]; b in [4, 5, 6]};
it = [6, 8, 10] : [int]
<Nesl> let a = [2, 3, 4]; b = [4, 5, 6] in {a + b: a; b};
it = [6, 8, 10] : [int]
<Nesl> {a == b: a in "this"; b in "that"};
it = [T, T, F, F] : [bool]
<Nesl> {fact(a): a in [1, 2, 3, 4, 5]};
it = [1, 2, 6, 24, 120] : [int]
<Nesl> {div_rem(100, a): a in [5, 6, 7, 8]};
it = [(20, 0), (16, 4), (14, 2), (12, 4)] : [(int, int)]
<Nesl> sum([2, 3, 4]);
it = 9 : int
<Nesl> dist(5, 10);
it = [5, 5, 5, 5, 5, 5, 5, 5, 5] : [int]
<Nesl> [2:50:3];
it = [2, 5, 8, 11, 14, 17, 20, 23, 26, 29, 32, 35, 38, 41, 44, 47] : [int]
<Nesl> "big" ++ " boy";
it = "big boy" : [char]
<Nesl> {x in "wombat" | x <= 'm};
it = "mba" : [char]
<Nesl> {sum(a): a in [[2, 3, 4], [1], [7, 8, 9]]};
it = [9, 1, 24] : [int]
<Nesl> bottop("testing"); % split sequence into two parts %
it = ["test", "ing"] : [[char]]
<Nesl> partition("break into words", [5, 5, 6]);
it = ["break", " into", " words"] : [[char]]
<Nesl> function my_sum(a) =
> if (#a == 1) then a[0]
> else
> let res = {my_sum(x): x in bottop(a)}
> in res[0] + res[1];
my_sum = fn : [a] -> a :: (a in number)
<Nesl> my_sum([7, 2, 6]);
it = 15 : int
A.2.3 An example: string searching
The algorithm shown here is explained in [3]. The example illustrates the way in which Nesl functions can be developed “from the inside out”, using the interactive system to test each new addition.
<Nesl> teststr = "string small strap asop string";
teststr = "string small strap asop string" : [char]
<Nesl> candidates = [0:#teststr-5];
candidates = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24] : [int]
<Nesl> {a == 's: a in teststr -> candidates};
<Nesl> candidates = {c in candidates;
> a in teststr -> candidates | a == 's};
candidates = [0, 7, 13, 20, 24] : [int]
<Nesl> candidates = {c in candidates;
> a in teststr -> {candidates+1:candidates}
> | a == 't};
candidates = [0, 13, 24] : [int]
<Nesl> candidates = {c in candidates;
> a in teststr -> {candidates+2:candidates}
> | a == 'r};
candidates = [0, 13, 24] : [int]
<Nesl> candidates = {c in candidates;
> a in teststr -> {candidates+3:candidates}
> | a == 'i};
candidates = [0, 24] : [int]
<Nesl> candidates = {c in candidates;
> a in teststr -> {candidates+4:candidates}
> | a == 'n};
candidates = [0, 24] : [int]
<Nesl> function next_cands(cands, w, s, i) =
> if (i == #w) then cands
> else
> let letter = w[i];
> next_chars = s -> {cands + i: cands};
> new_cands = {c in cands; l in next_chars | l == letter}:
> in next_cands(new_cands, w, s, i + 1);
next_cands = fn : ([int], [a], [a], int) -> [int] : (a in ordinal)
<Nesl> > function string_search(w, s) =
> next_cands([0:#s - (#w - i)], w, s, 0);
string_search = fn : ([a], [a]) -> [int] : (a in ordinal)
<Nesl> longstr =
> "This course will be a hands on class on programming parallel
algorithms. It will introduce several parallel data structures and a
variety of parallel algorithms, and then look at how they can be
programmed. The class will stress the clean and concise expression of
parallel algorithms and will present the opportunity to program
non-trivial parallel algorithms and run them on a few different
parallel machines. The course should be appropriate for graduate
students in all areas and for advanced undergraduates. The
prerequisite is an algorithms class. Undergraduates also require
permission of the instructor.";
longstr =
"This course will be a hands on class on programming parallel
algorithms. It will introduce several parallel data structures and a variety of parallel algorithms, and then look at how they can be programmed. The class will stress the clean and concise expression of parallel algorithms and will present the opportunity to program non-trivial parallel algorithms and run them on a few different parallel machines. The course should be appropriate for graduate students in all areas and for advanced undergraduates. The prerequisite is an algorithms class. Undergraduates also require permission of the instructor."
: [char]
<Nesl> string_search("will", longstr);
it = [12, 77, 219, 291] : [int]
<Nesl> string_search("student", longstr);
it = [461] : [int]
Index
> continuation character, 5
|=, 8
&=, 13
apropos, 7
argument checking, 9
background execution, 13, 14
bug reporting, 21
bugs, 21
building Nesl, 17
Common Lisp, 2, 3, 5, 6, 14
config.nesl, 16, 19
configurations, 12, 19
Ctrl-C, 5
Ctrl-D, 6
Cvl, 16, 17
defconfig, 12, 19
describe, 7
dump info, 21
dump vcode, 14, 21
dump world, 14
dump, 6
dump variables, 2
FTP, 2, 15
garbage collection, 6
get, 13
gnu-emacs, 14, 16
errors, 5
exit, 6
file variables, 8
init file, 6, 8, 19
Linux, 18
lisp, 6
load, 7
mailing lists, 2, 21
memory size, 5, 6, 13, 19
Nesl distribution, 16
Nesl system requirements, 2, 16
nesl-bugs mailing list, 21
nesl-request mailing list, 2
.nesl init file, 6, 8, 19
nested sequences, 8, 10
out of memory error, 5
patches, 2
print length, 6, 8
profiling, 10, 12, 14
redefining, 6, 8
remote execution, 12, 20
runnesl, 3, 14
runtime errors, 9
set arg.check, 9
set config, 6, 12
set editor, 6, 15
set memory.size, 5, 6, 13, 19
set print.length, 6, 8
set profile, 10, 12
set trace, 9, 10
set verbose, 8
setmemory.size, 6
shell scripts, 20
show bugs, 21
show code, 7
show config, 12
show configs, 12
show status, 14
stand-alone Nesl, 2, 18
starting Nesl, 3
status, 14
temp.dir, 8
timing, 10
toplevel commands, 3
toplevel expressions, 3
trace_string_depth, 10
trace_string_length, 10
tracing, 9, 10, 14
Vcode, 3, 14
Vcode interpreter, 5, 12, 13, 19
verbose mode, 8
World Wide Web, 2
xneslplot, 12, 16, 17, 20
|
{"Source-Url": "http://www.dtic.mil/dtic/tr/fulltext/u2/a302160.pdf", "len_cl100k_base": 14905, "olmocr-version": "0.1.53", "pdf-total-pages": 31, "total-fallback-pages": 0, "total-input-tokens": 35437, "total-output-tokens": 17515, "length": "2e13", "weborganizer": {"__label__adult": 0.0002608299255371094, "__label__art_design": 0.00036787986755371094, "__label__crime_law": 0.00018775463104248047, "__label__education_jobs": 0.0026683807373046875, "__label__entertainment": 9.703636169433594e-05, "__label__fashion_beauty": 0.0001169443130493164, "__label__finance_business": 0.00022542476654052737, "__label__food_dining": 0.0002135038375854492, "__label__games": 0.000637054443359375, "__label__hardware": 0.00102996826171875, "__label__health": 0.00019609928131103516, "__label__history": 0.0002613067626953125, "__label__home_hobbies": 0.00010764598846435548, "__label__industrial": 0.0003981590270996094, "__label__literature": 0.0003056526184082031, "__label__politics": 0.00019538402557373047, "__label__religion": 0.000457763671875, "__label__science_tech": 0.0201263427734375, "__label__social_life": 0.0001118183135986328, "__label__software": 0.0163421630859375, "__label__software_dev": 0.955078125, "__label__sports_fitness": 0.0001767873764038086, "__label__transportation": 0.0003643035888671875, "__label__travel": 0.00015175342559814453}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 59716, 0.0514]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 59716, 0.53137]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 59716, 0.80745]], "google_gemma-3-12b-it_contains_pii": [[0, 1131, false], [1131, 1287, null], [1287, 4056, null], [4056, 6410, null], [6410, 8602, null], [8602, 11570, null], [11570, 13771, null], [13771, 16126, null], [16126, 18285, null], [18285, 20648, null], [20648, 22269, null], [22269, 24213, null], [24213, 25869, null], [25869, 28002, null], [28002, 29775, null], [29775, 32177, null], [32177, 34870, null], [34870, 36673, null], [36673, 38986, null], [38986, 41495, null], [41495, 44711, null], [44711, 47735, null], [47735, 50138, null], [50138, 50388, null], [50388, 52022, null], [52022, 53010, null], [53010, 54262, null], [54262, 55680, null], [55680, 57558, null], [57558, 58274, null], [58274, 59716, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1131, true], [1131, 1287, null], [1287, 4056, null], [4056, 6410, null], [6410, 8602, null], [8602, 11570, null], [11570, 13771, null], [13771, 16126, null], [16126, 18285, null], [18285, 20648, null], [20648, 22269, null], [22269, 24213, null], [24213, 25869, null], [25869, 28002, null], [28002, 29775, null], [29775, 32177, null], [32177, 34870, null], [34870, 36673, null], [36673, 38986, null], [38986, 41495, null], [41495, 44711, null], [44711, 47735, null], [47735, 50138, null], [50138, 50388, null], [50388, 52022, null], [52022, 53010, null], [53010, 54262, null], [54262, 55680, null], [55680, 57558, null], [57558, 58274, null], [58274, 59716, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 59716, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 59716, null]], "pdf_page_numbers": [[0, 1131, 1], [1131, 1287, 2], [1287, 4056, 3], [4056, 6410, 4], [6410, 8602, 5], [8602, 11570, 6], [11570, 13771, 7], [13771, 16126, 8], [16126, 18285, 9], [18285, 20648, 10], [20648, 22269, 11], [22269, 24213, 12], [24213, 25869, 13], [25869, 28002, 14], [28002, 29775, 15], [29775, 32177, 16], [32177, 34870, 17], [34870, 36673, 18], [36673, 38986, 19], [38986, 41495, 20], [41495, 44711, 21], [44711, 47735, 22], [47735, 50138, 23], [50138, 50388, 24], [50388, 52022, 25], [52022, 53010, 26], [53010, 54262, 27], [54262, 55680, 28], [55680, 57558, 29], [57558, 58274, 30], [58274, 59716, 31]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 59716, 0.00705]]}
|
olmocr_science_pdfs
|
2024-12-11
|
2024-12-11
|
a7ad3ddc1bc63dcb326a0eb734a8dd1e72d6c37f
|
Recall: Multithreaded Stack Example
- Consider the following code blocks:
```plaintext
proc A() {
B();
}
proc B() {
while(TRUE) {
yield();
}
}
```
- Suppose we have 2 threads:
- Threads S and T
Thread S's switch returns to Thread T's (and vice versa)
Recall: Use of Timer Interrupt to Return Control
- Solution to our dispatcher problem
- Use the timer interrupt to force scheduling decisions
- Timer Interrupt routine:
```plaintext
TimerInterrupt() {
DoPeriodicHouseKeeping();
run_new_thread();
}
```
Hardware context switch support in x86
- Syscall/Intr (U → K)
- PL 3 → 0;
- TSS ← EFLAGS, CS:EIP;
- SS:ESP ← k-thread stack (TSS PL 0);
- push (old) SS:ESP onto (new) k-stack
- push (old) eflags, cs:eip, <err>
- CS:EIP ← <k target handler>
- Then
- Handler then saves other regs, etc
- Does all its works, possibly choosing other threads, changing PTBR (CR3)
- kernel thread has set up user GPRs
- iret (K → U)
- PL 0 → 3;
- EFLAGS, CS:EIP ← popped off k-stack
- SS:ESP ← popped off k-stack
Recall: Fix banking problem with Locks!
- Identify critical sections (atomic instruction sequences) and add locking:
```
Deposit acctId, amount {
Lock.acquire() // Wait if someone else in critical section!
acct = GetAccount(acctId);
acct.balance += amount;
StoreAccount(acct);
Lock.release() // Release someone into critical section
}
```
- Must use SAME lock with all of the methods (Withdraw, etc…)
Recall: Red-Black tree example
- Here, the Lock is associated with the root of the tree
- Restricts parallelism but makes sure that tree always consistent
- No races at the operation level
- Threads are exchange information through a consistent data structure
- Could you make it faster with one lock per node? Perhaps, but must be careful!
- Need to define invariants that are always true despite many simultaneous threads
Producer-Consumer with a Bounded Buffer
- **Problem Definition**
- Producer(s) put things into a shared buffer
- Consumer(s) take them out
- Need synchronization to coordinate producer/consumer
- Don’t want producer and consumer to have to work in lockstep, so put a fixed-size buffer between them
- Need to synchronize access to this buffer
- Producer needs to wait if buffer is full
- Consumer needs to wait if buffer is empty
- Example 1: GCC compiler
- `cpp | cc1 | cc2 | as | ld`
- Example 2: Coke machine
- Producer can put limited number of Cokes in machine
- Consumer can’t take Cokes out if machine is empty
- Others: Web servers, Routers, ….
Circular Buffer Data Structure (sequential case)
```c
typedef struct buf {
int write_index;
int read_index;
<type> *entries[BUFSIZE];
} buf_t;
```
- Insert: write & bump write ptr (enqueue)
- Remove: read & bump read ptr (dequeue)
- How to tell if Full (on insert) Empty (on remove)?
- And what do you do if it is?
- What needs to be atomic?
Circular Buffer – first cut
```c
mutex buf_lock = <initially unlocked>
Producer(item) {
acquire(&buf_lock);
while (buffer full) {; // Wait for a free slot
enqueue(item);
release(&buf_lock);
}
}
Consumer() {
acquire(&buf_lock);
while (buffer empty) {; // Wait for arrival
item = dequeue();
release(&buf_lock);
return item
}
```
Will we ever come out of the wait loop?
Circular Buffer – 2nd cut
```c
mutex buf_lock = <initially unlocked>
Producer(item) {
acquire(&buf_lock);
while (buffer full) {
release(&buf_lock);
acquire(&buf_lock);
}
enqueue(item);
release(&buf_lock);
}
Consumer() {
acquire(&buf_lock);
while (buffer empty) {
release(&buf_lock);
acquire(&buf_lock);
}
item = dequeue();
release(&buf_lock);
return item
}
```
What happens when one is waiting for the other?
- Multiple cores?
- Single core?
Higher-level Primitives than Locks
- What is right abstraction for synchronizing threads that share memory?
- Want as high a level primitive as possible
- Good primitives and practices important!
- Since execution is not entirely sequential, really hard to find bugs, since they happen rarely
- UNIX is pretty stable now, but up until about mid-80s (10 years after started), systems running UNIX would crash every week or so – concurrency bugs
- Synchronization is a way of coordinating multiple concurrent activities that are using shared state
- This lecture and the next presents some ways of structuring sharing
Recall: Semaphores
- Semaphores are a kind of generalized lock
- First defined by Dijkstra in late 60s
- Main synchronization primitive used in original UNIX
- Definition: a Semaphore has a non-negative integer value and supports the following two operations:
- Down() or P(): an atomic operation that waits for semaphore to become positive, then decrements it by 1
» Think of this as the wait() operation
- Up() or V(): an atomic operation that increments the semaphore by 1, waking up a waiting P, if any
» This of this as the signal() operation
- Note that P() stands for "proberen" (to test) and V() stands for "verhogen" (to increment) in Dutch
Semaphores Like Integers Except…
- Semaphores are like integers, except:
- No negative values
- Only operations allowed are P and V – can’t read or write value, except initially
- Operations must be atomic
» Two P’s together can’t decrement value below zero
» Thread going to sleep in P won’t miss wakeup from V – even if both happen at same time
- POSIX adds ability to read value, but technically not part of proper interface!
- Semaphore from railway analogy
- Here is a semaphore initialized to 2 for resource control:
Two Uses of Semaphores
Mutual Exclusion (initial value = 1)
- Also called “Binary Semaphore” or “mutex”.
- Can be used for mutual exclusion, just like a lock:
```
semap(&mysem);
// Critical section goes here
semaV(&mysem);
```
Scheduling Constraints (initial value = 0)
- Allow thread 1 to wait for a signal from thread 2
- thread 2 schedules thread 1 when a given event occurs
- Example: suppose you had to implement ThreadJoin which must wait for thread to terminate:
```
Initial value of semaphore = 0
ThreadJoin {
semaP(&mysem);
}
ThreadFinish {
semaV(&mysem);
}
```
Revisit Bounded Buffer: Correctness constraints for solution
- **Correctness Constraints:**
- Consumer must wait for producer to fill buffers, if none full (scheduling constraint)
- Producer must wait for consumer to empty buffers, if all full (scheduling constraint)
- Only one thread can manipulate buffer queue at a time (mutual exclusion)
- **Remember why we need mutual exclusion**
- Because computers are stupid
- Imagine if in real life: the delivery person is filling the machine and somebody comes up and tries to stick their money into the machine
- **General rule of thumb:** *Use a separate semaphore for each constraint*
- Semaphore fullBuffers; // consumer's constraint
- Semaphore emptyBuffers; // producer's constraint
- Semaphore mutex; // mutual exclusion
**Full Solution to Bounded Buffer (coke machine)**
```c
Semaphore fullSlots = 0; // Initially, no coke
Semaphore emptySlots = bufSize; // Initially, num empty slots
Semaphore mutex = 1; // No one using machine
Producer(item) {
semaP(&emptySlots); // Wait until space
Enqueue(item);
semaP(&mutex); // Wait until machine
semaV(&fullSlots); // Tell consumers there is more coke
}
Consumer() {
semaP(&fullSlots); // Check if there's a coke
item = Dequeue(); // Wait until machine
semaV(&mutex); // Tell producer need more
semaV(&emptySlots); // tell producer need more
return item;
}
```
**Discussion about Solution**
- **Why asymmetry?**
- Producer does: `semaP(&emptyBuffer), semaV(&fullBuffer)`
- Consumer does: `semaP(&fullBuffer), semaV(&emptyBuffer)`
- **Is order of P's important?**
- Decrease # of empty slots
- Increase # of occupied slots
- **Is order of V's important?**
- Decrease # of occupied slots
- Increase # of empty slots
- **What if we have 2 producers or 2 consumers?**
**Administrivia**
- **Midterm 1:** October 1st, 5-7PM (Three weeks from tomorrow!)
- We understand that this partially conflicts with CS170, but those of you in CS170 can start that exam after 7PM (according to CS170 staff)
- Video Proctored, No curve, Use of computer to answer questions
- More details as we get closer to exam
- **Midterm Review:** Tuesday September 29th, 7-9pm
- Details TBA
Where are we going with synchronization?
- We are going to implement various higher-level synchronization primitives using atomic operations
- Everything is pretty painful if only atomic primitives are load and store
- Need to provide primitives useful at user-level
Motivating Example: “Too Much Milk”
- Great thing about OS’s – analogy between problems in OS and problems in real life
- Help you understand real life problems better
- But, computers are much stupider than people
- Example: People need to coordinate:
<table>
<thead>
<tr>
<th>Time</th>
<th>Person A</th>
<th>Person B</th>
</tr>
</thead>
<tbody>
<tr>
<td>3:00</td>
<td>Look in Fridge. Out of milk</td>
<td></td>
</tr>
<tr>
<td>3:05</td>
<td>Leave for store</td>
<td></td>
</tr>
<tr>
<td>3:10</td>
<td>Arrive at store</td>
<td>Look in Fridge. Out of milk</td>
</tr>
<tr>
<td>3:15</td>
<td>Buy milk</td>
<td>Leave for store</td>
</tr>
<tr>
<td>3:20</td>
<td>Arrive home, put milk away</td>
<td>Arrive at store</td>
</tr>
<tr>
<td>3:25</td>
<td>Buy milk</td>
<td></td>
</tr>
<tr>
<td>3:30</td>
<td>Arrive home, put milk away</td>
<td></td>
</tr>
</tbody>
</table>
Recall: What is a lock?
- **Lock**: prevents someone from doing something
- Lock before entering critical section and before accessing shared data
- Unlock when leaving, after accessing shared data
- **Wait** if locked
- Important idea: all synchronization involves waiting
- For example: fix the milk problem by putting a key on the refrigerator
- Lock it and take key if you are going to go buy milk
- Fixes too much: roommate angry if only wants OJ
Too Much Milk: Correctness Properties
- Need to be careful about correctness of concurrent programs, since non-deterministic
- Impulse is to start coding first, then when it doesn’t work, pull hair out
- Instead, think first, then code
- Always write down behavior first
- What are the correctness properties for the “Too much milk” problem???
- Never more than one person buys
- Someone buys if needed
- First attempt: Restrict ourselves to use only atomic load and store operations as building blocks
- Of Course – We don’t know how to make a lock yet
- Let’s see if we can answer this question!
Too Much Milk: Solution #1
• Use a note to avoid buying too much milk:
– Leave a note before buying (kind of “lock”)
– Remove note after buying (kind of “unlock”)
– Don't buy if note (wait)
• Suppose a computer tries this (remember, only memory read/write are atomic):
if (noMilk)
if (noNote)
leave Note;
buy milk;
remove note;
• Result?
– Still too much milk but only occasionally!
– Thread can get context switched after checking milk and note but before buying milk!
• Solution makes problem worse since fails intermittently
– Makes it really hard to debug...
– Must work despite what the dispatcher does!
Too Much Milk: Solution #1
• Use a note to avoid buying too much milk:
– Leave a note before buying (kind of “lock”)
– Remove note after buying (kind of “unlock”)
– Don't buy if note (wait)
• Suppose a computer tries this (remember, only memory read/write are atomic):
Thread A
if (noMilk)
if (noNote)
leave Note;
buy milk;
remove Note;
Thread B
if (noMilk)
if (noNote)
leave Note;
buy Milk;
remove Note;
• What happens here?
– Well, with human, probably nothing bad
– With computer: no one ever buys milk
Too Much Milk: Solution #1½
• Clearly the Note is not quite blocking enough
– Let’s try to fix this by placing note first
• Another try at previous solution:
leave Note;
if (noMilk)
if (noNote)
buy milk;
remove Note;
• What happens here?
– Well, with human, probably nothing bad
– With computer: no one ever buys milk
Too Much Milk Solution #2
• How about labeled notes?
– Now we can leave note before checking
• Algorithm looks like this:
Thread A Thread B
leave note A; leave note B;
if (noNote B) { if (noNoteA) {
if (noMilk) { if (noMilk) {
buy Milk; buy Milk;
} }
} }
remove note A; remove note B;
• Does this work?
• Possible for neither thread to buy milk
– Context switches at exactly the wrong times can lead each to think that
the other is going to buy
• Really insidious:
– Extremely unlikely this would happen, but will at worse possible time
– Probably something like this in UNIX
Too Much Milk Solution #2: problem!
• I’m not getting milk, You’re getting milk
• This kind of lockup is called “starvation!”
Too Much Milk Solution #3
• Here is a possible two-note solution:
Thread A Thread B
leave note A; leave note B;
while (note B) { if (noNote A) {
do nothing; do nothing; \Y
} }
if (noMilk) { if (noMilk) {
buy milk; buy milk; \Y
} }
remove note A; remove note B;
• Does this work? Yes. Both can guarantee that:
– It is safe to buy, or
– Other will buy, ok to quit
• At X:
– If no note B, safe for A to buy,
– Otherwise wait to find out what will happen
• At Y:
– If no note A, safe for B to buy
– Otherwise, A is either buying or waiting for B to quit
Case 1
• “leave note A” happens before “if (noNote A)”
Case 1
- “leave note A” happens before “if (noNote A)”
```c
leave note A;
while (note B) {
do nothing;
}
if (noMilk) {
buy milk;
} else {
remove note B;
}
if (noMilk) {
buy milk;
} else {
remove note A;
}
```
Case 2
- “if (noNote A)” happens before “leave note A”
```c
leave note A;
while (note B) {
do nothing;
}
if (noMilk) {
buy milk;
} else {
remove note B;
}
if (noMilk) {
buy milk;
} else {
remove note A;
}
```
• "leave note A" happens before "if (noNote A)"
Wait for note B to be removed
• "if (noNote A)" happens before "leave note A"
Case 2
- "if (noNote A)" happens before "leave note A"
```
leave note A;
while (note B) {
do nothing;
};
if (noMilk) {
remove note B;
if (noNote A) {
leave note B;
if (noMilk) {
buy milk;
}
remove note B;
}
}
if (noMilk) {
buy milk;
}
remove note A;
```
This Generalizes to n Threads...
- Leslie Lamport’s “Bakery Algorithm” (1974)
A New Solution of Dijkstra’s Concurrent Programming Problem
Leslie Lamport
Massachusetts Computer Associates, Inc.
A simple solution to the mutual exclusion problem is presented which allows the system to continue to operate
Solution #3 discussion
- Our solution protects a single “Critical-Section” piece of code for each thread:
```
if (noMilk) {
buy milk;
}
```
- Solution #3 works, but it’s really unsatisfactory
- Really complex – even for this simple an example
- Hard to convince yourself that this really works
- A’s code is different from B’s – what if lots of threads?
- Code would have to be slightly different for each thread
- While A is waiting, it is consuming CPU time
- This is called “busy-waiting”
- There’s got to be a better way!
- Have hardware provide higher-level primitives than atomic load & store
- Build even higher-level programming abstractions on this hardware support
Too Much Milk: Solution #4?
- Recall our target lock interface:
- acquire(&milklock) – wait until lock is free, then grab
- release(&milklock) – Unlock, waking up anyone waiting
- These must be atomic operations – if two threads are waiting for the lock and both see it’s free, only one succeeds to grab the lock
- Then, our milk problem is easy:
```
acquire(&milklock);
if (nomilk)
buy milk;
release(&milklock);
```
Back to: How to Implement Locks?
- **Lock**: prevents someone from doing something
- Lock before entering critical section and before accessing shared data
- Unlock when leaving, after accessing shared data
- Wait if locked
» Important idea: all synchronization involves waiting
» Should *sleep* if waiting for a long time
- Atomic Load/Store: get solution like Milk #3
- Pretty complex and error prone
- Hardware Lock instruction
- Is this a good idea?
- What about putting a task to sleep?
» What is the interface between the hardware and scheduler?
- Complexity?
» Done in the Intel 432
» Each feature makes HW more complex and slow
Naïve use of Interrupt Enable/Disable
- How can we build multi-instruction atomic operations?
- Recall: dispatcher gets control in two ways.
» Internal: Thread does something to relinquish the CPU
» External: Interrupts cause dispatcher to take CPU
- On a uniprocessor, can avoid context-switching by:
» Avoiding internal events (although virtual memory tricky)
» Preventing external events by disabling interrupts
- Consequently, naïve Implementation of locks:
LockAcquire { disable Ints; }
LockRelease { enable Ints; }
- Problems with this approach:
- Can’t let user do this! Consider following:
LockAcquire();
While(TRUE) {;}
- Real-Time system—no guarantees on timing!
» Critical Sections might be arbitrarily long
- What happens with I/O or other important events?
» “Reactor about to meltdown. Help?”
Naïve use of Interrupt Enable/Disable
Better Implementation of Locks by Disabling Interrupts
- Key idea: maintain a lock variable and impose mutual exclusion only during operations on that variable
```
int value = FREE;
Acquire() {
disable interrupts;
if (value == BUSY) {
put thread on wait queue;
Go to sleep();
// Enable interrupts?
} else {
value = BUSY;
}
enable interrupts;
}
Release() {
disable interrupts;
if (anyone on wait queue) {
take thread off wait queue
Place on ready queue;
} else {
value = FREE;
}
enable interrupts;
}
```
New Lock Implementation: Discussion
- Why do we need to disable interrupts at all?
- Avoid interruption between checking and setting lock value
- Otherwise two threads could think that they both have lock
Acquire() {
disable interrupts;
if (value == BUSY) {
put thread on wait queue;
Go to sleep();
// Enable interrupts?
} else {
value = BUSY;
}
enable interrupts;
}
- Note: unlike previous solution, the critical section (inside Acquire()) is very short
- User of lock can take as long as they like in their own critical section: doesn’t impact global machine behavior
- Critical interrupts taken in time!
Interrupt Re-enable in Going to Sleep
• What about re-enabling ints when going to sleep?
Acquire() {
disable interrupts;
if (value == BUSY) {
put thread on wait queue;
Go to sleep();
} else {
value = BUSY;
}
enable interrupts;
}
• Before Putting thread on the wait queue?
– Release can check the queue and not wake up thread
Enable Position
• Before Putting thread on the wait queue?
Interrupt Re-enable in Going to Sleep
- What about re-enabling ints when going to sleep?
```c
Acquire() {
disable interrupts;
if (value == BUSY) {
put thread on wait queue;
Go to sleep();
} else {
value = BUSY;
}
enable interrupts;
}
```
- Before putting thread on the wait queue?
- Release can check the queue and not wake up thread
- After putting the thread on the wait queue?
- Release puts the thread on the ready queue, but the thread still thinks it needs to go to sleep
- Misses wakeup and still holds lock (deadlock!)
How to Re-enable After Sleep()?
- In scheduler, since interrupts are disabled when you call `sleep`:
- Responsibility of the next thread to re-enable ints
- When the sleeping thread wakes up, returns to acquire and re-enables interrupts
```c
Thread A
disable ints
sleep
context switch
sleep return
enable ints
Thread B
disable ints
sleep
context switch
sleep return
enable ints
```
In-Kernel Lock: Simulation
```c
INIT int value = 0;
Acquire() {
disable interrupts;
if (value == 1) {
put thread on wait-queue;
go to sleep();
} else {
value = 1;
}
enable interrupts;
}
```
```c
lock.Acquire();
disable interrupts;
if (value == 1) {
put thread on wait-queue;
go to sleep();
} else {
value = 1;
}
enable interrupts;
}
```
```c
lock.Release();
if anyone on wait-queue{
take thread off wait-queue
Place on ready-queue;
} else {
value = 0;
}
enable interrupts;
```
In-Kernel Lock: Simulation
```c
INIT
int value = 0;
Acquire() {
disable interrupts;
if (value == 1) {
put thread on wait-queue;
go to sleep() //??
} else {
value = 1;
}
enable interrupts;
}
Release() {
disable interrupts;
if anyone on wait queue {
take thread off wait-queue
Place on ready queue;
} else {
value = 0;
}
enable interrupts;
}
```
Thread A
Running
Value: 1
waiters
owner
Thread B
Running
Value: 1
waiters
owner
INIT
int value = 0;
Acquire() {
disable interrupts;
if (value == 1) {
put thread on wait-queue;
go to sleep() //??
} else {
value = 1;
}
enable interrupts;
}
Release() {
disable interrupts;
if anyone on wait queue {
take thread off wait-queue
Place on ready queue;
} else {
value = 0;
}
enable interrupts;
}
In-Kernel Lock: Simulation
Recall: Multithreaded Server
- **Bounded** pool of worker threads
- Allocated in advance: no thread creation overhead
- Queue of pending requests
Simple Performance Model
- Given that the overhead of a critical section is X
- User->Kernel Context Switch
- Acquire Lock
- Kernel->User Context Switch
- <perform exclusive work>
- User->Kernel Context Switch
- Release Lock
- Kernel->User Context Switch
- Even if everything else is infinitely fast, with any number of threads and cores
- What is the maximum rate of operations that involve this overhead?
More Practical Motivation
Back to Jeff Dean's "Numbers everyone should know"
- $X = 1 \text{ms} \Rightarrow 1,000 \text{ ops/sec}$
Uncontended Many-Lock Case
What if sys overhead is $Y$, even when the lock is free?
What if the OS can only handle one lock operation at a time?
Recall: Basic cost of a system call
- Min System call ~ 25x cost of function call
- Scheduling could be many times more
- Streamline system processing as much as possible
- Other optimizations seek to process as much of the call in user space as possible (eg, Linux vDSO)
Atomic Read-Modify-Write Instructions
- Problems with previous solution:
- Can't give lock implementation to users
- Doesn't work well on multiprocessor
- Disabling interrupts on all processors requires messages and would be very time consuming
- Alternative: atomic instruction sequences
- These instructions read a value and write a new value atomically
- Hardware is responsible for implementing this correctly
- on both uniprocessors (not too hard)
- and multiprocessors (requires help from cache coherence protocol)
- Unlike disabling interrupts, can be used on both uniprocessors and multiprocessors
Examples of Read-Modify-Write
- **test&set (&address)**
/* most architectures */
result = M[address]; // return result from "address" and
M[address] = 1; // set value at "address" to 1
return result;
- **swap (&address, register)**
/* x86 */
temp = M[address]; // swap register's value to
M[address] = register; // value at "address"
register = temp;
- **compare&swap (&address, reg1, reg2)**
/* 68000 */
if (reg1 == M[address]) {
M[address] = reg2; // then put reg2 => memory
return success;
} else {
return failure; // Otherwise do not change memory
}
- **load-linked&store-conditional(&address)**
/* R4000, alpha */
loop:
li r1, M[address]; // Can do arbitrary computation
st r1, M[object]; // Save link in new object
beqz r1, loop; // repeat until no conflict
until (compare&swap(&root,r1,object));
Using of Compare&Swap for queues
- **compare&swap (&address, reg1, reg2)**
/* 68000 */
if (reg1 == M[address]) {
M[address] = reg2;
return success;
} else {
return failure;
}
Here is an atomic add to linked-list function:
```c
addToQueue(&object) {
do {
ld r1, M[root] // Get ptr to current head
st r1, M[object] // Save link in new object
} until (compare&swap(&root,r1,object));
}
```
Implementing Locks with test&set
- Another flawed, but simple solution:
```c
int value = 0; // Free
Acquire() {
while (test&set(value)); // while busy
}
Release() {
value = 0;
}
Simple explanation:
- If lock is free, test&set reads 0 and sets value=1, so lock is now busy.
- If lock is busy, test&set reads 1 and sets value=1 (no change)
- When we set value = 0, someone else can get lock.
- Busy-Waiting: thread consumes cycles while waiting
- For multiprocessors: every test&set() is a write, which makes value
ping-pong around in cache (using lots of network BW)
Problem: Busy-Waiting for Lock
- Positives for this solution
- Machine can receive interrupts
- User code can use this lock
- Works on a multiprocessor
- Negatives
- This is very inefficient as thread will consume cycles waiting
- Waiting thread may take cycles away from thread holding lock (no one wins!)
- Priority Inversion: If busy-waiting thread has higher priority than thread holding
lock ⇒ no progress!
- Priority Inversion problem with original Martian rover
- For semaphores and monitors, waiting thread may wait for an arbitrary long
time!
- Thus even if busy-waiting was OK for locks, definitely not ok for other primitives
- Homework/exam solutions should avoid busy-waiting!
Multiprocessor Spin Locks: test&test&set
- A better solution for multiprocessors:
```c
int mylock = 0; // Free
Acquire() {
do {
while(mylock); // Wait until might be free
} while(test&set(&mylock)); // exit if get lock
}
Release() {
mylock = 0;
}
```
- Simple explanation:
- Wait until lock might be free (only reading – stays in cache)
- Then, try to grab lock with test&set
- Repeat if fail to actually get lock
- Issues with this solution:
- Busy-Waiting: thread still consumes cycles while waiting
» However, it does not impact other processors!
Better Locks using test&set
- Can we build test&set locks without busy-waiting?
- Can't entirely, but can minimize!
- Idea: only busy-wait to atomically check lock value
```c
int guard = 0;
int value = FREE;
Acquire() {
// Short busy-wait time
while (test&set(guard));
if (value == BUSY) {
put thread on wait queue;
go to sleep() & guard = 0;
} else {
value = BUSY;
guard = 0;
}
}
Release() {
// Short busy-wait time
while (test&set(guard));
if anyone on wait queue {
take thread off wait queue
Place on ready queue;
} else {
value = FREE;
guard = 0;
}
}
```
Recall: Locks using Interrupts vs. test&set
Compare to "disable interrupt" solution
```c
int value = FREE;
Acquire() {
disable interrupts;
if (value == 1) {
put thread on wait-queue;
go to sleep() //??
} else {
value = 1;
enable interrupts;
}
}
Release() {
// Short busy-wait time
disable interrupts;
if anyone on wait queue {
take thread off wait-queue
Place on ready queue;
} else {
value = 1;
enable interrupts;
}
}
```
Basically we replaced:
- disable interrupts → while (test&set(guard));
- enable interrupts → guard = 0;
Recap: Locks using interrupts
```c
int value = 0;
lock.Acquire();
... critical section;...
lock.Release();
Acquire() {
disable interrupts;
if (value == 1) {
put thread on wait-queue;
// Enable interrupts?
} else {
value = 1;
enable interrupts;
}
}
Release() {
disable interrupts;
if anyone on wait queue {
take thread off wait-queue
Place on ready queue;
} else {
value = 0;
enable interrupts;
}
}
```
Recap: Locks using test & set
```c
int guard = 0;
int value = 0;
Acquire() {
// Short busy-wait time
while (test&set(guard));
if (value == 1) {
put thread on wait-queue;
go to sleep();
guard = 0;
} else {
value = 1;
guard = 0;
}
}
Release() {
// Short busy-wait time
while (test&set(guard));
if anyone on wait queue {
take thread off wait-queue
Place on ready queue;
} else {
value = 0;
}
guard = 0;
}
```
Threads waiting to enter critical section busy-wait
---
Linux futex: Fast Userspace Mutex
```c
#include <linux/futex.h>
#include <sys/time.h>
int futex(int *uaddr, int futex_op, int val,
const struct timespec *timeout);
```
- `uaddr` points to a 32-bit value in user space
- `futex_op`
- `FUTEX_WAIT` – if val == *uaddr sleep till FUTEX_WAIT
- Atomic check that condition still holds
- `FUTEX_WAKE` – wake up at most val waiting threads
- `FUTEX_FD`, `FUTEX_WAKE_OP`, `FUTEX_CMP_REQUEUE`
- `timeout` – ptr to a timespec structure that specifies a timeout for the op
---
Linux futex: Fast Userspace Mutex
- Idea: Userspace lock is syscall-free in the uncontended case
- Lock has three states
- Free (no syscall when acquiring lock)
- Busy, no waiters (no syscall when releasing lock)
- Busy, possibly with some waiters
- futex is not exposed in libc; it is used within the implementation of pthreads
---
Example: Userspace Locks with futex
```c
int value = 0; // free
bool maybe_waiters = false;
Acquire() {
while (test&set(value)) {
maybe_waiters = true;
futex(&value, FUTEX_WAIT, 1);
// futex: sleep if lock is acquired
maybe_waiters = true;
}
}
Release() {
value = 0;
if (maybe_waiters) {
maybe_waiters = false;
futex(&value, FUTEX_WAKE, 1);
// futex: wake up a sleeping thread
}
}
```
- This is syscall-free in the uncontended case
- Temporarily falls back to syscalls if multiple waiters, or concurrent acquire/release
- But it can be considerably optimized!
- See “Futexes are Tricky” by Ulrich Drepper
Conclusion
• Important concept: Atomic Operations
– An operation that runs to completion or not at all
– These are the primitives on which to construct various synchronization primitives
• Talked about hardware atomicity primitives:
– Disabling of Interrupts, test&set, swap, compare&swap, load-locked & store-conditional
• Showed several constructions of Locks
– Must be very careful not to waste/tie up machine resources
» Shouldn’t disable interrupts for long
» Shouldn’t spin wait for long
– Key idea: Separate lock variable, use hardware mechanisms to protect modifications of that variable
|
{"Source-Url": "https://cs162.eecs.berkeley.edu/static/lectures/7.pdf", "len_cl100k_base": 8260, "olmocr-version": "0.1.53", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 57409, "total-output-tokens": 9671, "length": "2e13", "weborganizer": {"__label__adult": 0.0003275871276855469, "__label__art_design": 0.000324249267578125, "__label__crime_law": 0.0002701282501220703, "__label__education_jobs": 0.0011739730834960938, "__label__entertainment": 6.449222564697266e-05, "__label__fashion_beauty": 0.00012600421905517578, "__label__finance_business": 0.00016546249389648438, "__label__food_dining": 0.0003933906555175781, "__label__games": 0.0006914138793945312, "__label__hardware": 0.0026378631591796875, "__label__health": 0.0003807544708251953, "__label__history": 0.0002856254577636719, "__label__home_hobbies": 0.00019109249114990232, "__label__industrial": 0.0007262229919433594, "__label__literature": 0.00020778179168701172, "__label__politics": 0.0002636909484863281, "__label__religion": 0.000530242919921875, "__label__science_tech": 0.021820068359375, "__label__social_life": 0.00010377168655395508, "__label__software": 0.004909515380859375, "__label__software_dev": 0.96337890625, "__label__sports_fitness": 0.0003633499145507813, "__label__transportation": 0.0006809234619140625, "__label__travel": 0.00020432472229003904}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 31769, 0.00721]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 31769, 0.34086]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 31769, 0.84385]], "google_gemma-3-12b-it_contains_pii": [[0, 1077, false], [1077, 1953, null], [1953, 3928, null], [3928, 6370, null], [6370, 8633, null], [8633, 10910, null], [10910, 12454, null], [12454, 14097, null], [14097, 14689, null], [14689, 16471, null], [16471, 19351, null], [19351, 19783, null], [19783, 21403, null], [21403, 22319, null], [22319, 22922, null], [22922, 24107, null], [24107, 26742, null], [26742, 29007, null], [29007, 31153, null], [31153, 31769, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1077, true], [1077, 1953, null], [1953, 3928, null], [3928, 6370, null], [6370, 8633, null], [8633, 10910, null], [10910, 12454, null], [12454, 14097, null], [14097, 14689, null], [14689, 16471, null], [16471, 19351, null], [19351, 19783, null], [19783, 21403, null], [21403, 22319, null], [22319, 22922, null], [22922, 24107, null], [24107, 26742, null], [26742, 29007, null], [29007, 31153, null], [31153, 31769, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 31769, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 31769, null]], "pdf_page_numbers": [[0, 1077, 1], [1077, 1953, 2], [1953, 3928, 3], [3928, 6370, 4], [6370, 8633, 5], [8633, 10910, 6], [10910, 12454, 7], [12454, 14097, 8], [14097, 14689, 9], [14689, 16471, 10], [16471, 19351, 11], [19351, 19783, 12], [19783, 21403, 13], [21403, 22319, 14], [22319, 22922, 15], [22922, 24107, 16], [24107, 26742, 17], [26742, 29007, 18], [29007, 31153, 19], [31153, 31769, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 31769, 0.00905]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
ee6c8587faade6c9f84a8d73ff5ca5a790a1fc7b
|
Parallel Systems Course: Chapter II
Shared-Memory Paradigm
Multithreading
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
I. Message-Passing Architectures
- Each process got his own local memory
- Communication through messages
- *Process is in control*
II. Shared Address-space Architectures
- Example: multiprocessors
- **PRAM**: Paralleled Random Access Memory
- Idealization: No communication costs
- But, unavoidability: the possibility of *race conditions*
Intel Core Duo
- Doubled memory bandwidth
- MESI cache coherence protocol, see later
- Introduces overhead
- One processor can utilize the whole L2 cache
**AMD Dual Core Opteron**
- System Request Interface handles memory coherence
- MOESI protocol
- HyperTransport: for RAM requests
- Can be combined with that of other processors => SMPs
<table>
<thead>
<tr>
<th></th>
<th>HyperTransport</th>
<th>Memory-controller</th>
</tr>
</thead>
<tbody>
<tr>
<td>Crossbar interconnect</td>
<td>System Request Interface</td>
<td></td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th></th>
<th>L2 cache</th>
<th>L2 cache</th>
</tr>
</thead>
<tbody>
<tr>
<td>1MB</td>
<td>L1-I</td>
<td>L1-I</td>
</tr>
<tr>
<td></td>
<td>L1-D</td>
<td>L1-D</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th></th>
<th>Processor</th>
<th>L1-I</th>
</tr>
</thead>
<tbody>
<tr>
<td>64K</td>
<td>P0</td>
<td>L1-D</td>
</tr>
<tr>
<td>32 bit</td>
<td>Processor P1</td>
<td>L1-I</td>
</tr>
</tbody>
</table>
Parallel Systems: Multi-threading
Pag.
Quadcores
More useful than a dual core?
- Office applications: NO
- Special applications such as photoshop: YES
- Games: IF MULTI-THREADED
- Scientific applications: IF MULTI-THREADED
Example: **A file server on a LAN**
- It needs to handle several file requests over a short period
- Hence, it is more efficient to create (and destroy) a single thread for each request
- Multiple threads can possibly be executed simultaneously on different processors (mapped by Operating System)
Running threads on same core
- Executed one by one
- Context switch
- Thread’s state in core: instruction fetch buffer, return address stack, register file, control logic/state, ...
- Supported by hardware
- Takes time!
Coarse-grain multithreading
Running threads on multiple cores
- Active threads in thread pool
- Scheduled by operating system
- Threads (or processes) can be migrated from 1 core to another
Hardware threads
Software threads: scheduling and context switching is performed by Operating System
- Has a cost (overhead).
Hardware thread:
- Scheduling and context switching done by hardware.
- Separate registers & logic for each thread.
- Context switching is cheap.
- Each hardware thread appears as a logical processor core to the OS!
In INTEL processors: Hyperthreading
In GPUs: 1000s of threads are possible without overhead!
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
Multicores:
The following should be provided by hardware and/or system
1. Connect PROCs to MEMs (the interconnect)
2. Address concurrent read/writes
3. Cache coherence
Figure 2.5 Typical shared-address-space architectures: (a) Uniform-memory-access shared-address-space computer; (b) Uniform-memory-access shared-address-space computer with caches and memories; (c) Non-uniform-memory-access shared-address-space computer with local memory only.
Bus-based Interconnects
With local memory/cache
Crossbar switches
Memory Banks
0 1 2 3 4 5 b-1
Processing Elements
0 1 2 3 4 5 6 p-1
A switch element
Symmetric Multiprocessor Architectures (SMPs)
- Cf AMD architecture
- Bus is potential bottleneck
- Number of SMPs is limited
Sun Fire E25K (SMP)
- Up to 72 processors
- Each can handle 2 **hardware threads**
- Total: 1.15TB
- 3 crossbars
- ~ $n^2$
Intel’s Xeon Phi coprocessor
Intel’s response to GPUs...
- 60 cores
Intel’s Xeon Phi’s core
Thread scheduler
4 hardware threads
512-bit Vector unit (SIMD)
Parallel Systems: Multi-threading
10/3/2014 Pag.22
2. PRAM Architectures
Handling of simultaneous memory accesses:
- Read operation
- Exclusive-read, concurrent-read
- Write operation
- Exclusive-write, concurrent-write
4 implementations:
- EREW: access to a memory location is exclusive
- CREW: multiple write accesses are serialized
- ERCW
- CRCW: most powerful PRAM model
Concurrent Write Access Requires Arbitration
- **Common**: write is allowed if the new values are identical
- **Arbitrary**: an arbitrary processor is allowed to write, the rest fails.
- **Priority**: processor with the highest priority succeeds
- **Sum**: the sum of the values is written. Any other operator can be used.
3. Caching & memory coherence
- **Caching**: copies are brought closer to processor
- By cache lines of 64/128 Bytes
- **Cache coherence mechanism**: to update copies
Several copies of same data reside in memory
False sharing
- Based on cache line (64 or 128 bytes)
- 2 processors do not share data but share a cache line
- each processor has some data in the same cache line
- cache line is kept coherent, *unnecessarily*...
Cache Coherence Mechanisms
To keep copies of data in different memory levels consistent!
- Is not always performed. Best effort.
- Or by explicit synchronization triggered by software (see later).
**Update protocol**
```
P0
load x
x = 1
P1
load x
x = 1
Memory
x = 1
write #3, x
x = 3
Update
P1
x = 3
Memory
```
**Cache Coherence Mechanisms**
- **Update protocol**
- Excess in updates if variable is only read once in P1
- *False sharing*: processes update different parts of same cache line
- Used nowadays: Invalidate protocols
---
**Invalidate protocol**
<table>
<thead>
<tr>
<th></th>
<th>P0</th>
<th>P1</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>load x</strong></td>
<td>load x</td>
<td>write #3, x</td>
</tr>
<tr>
<td>x = 1</td>
<td>x = 1</td>
<td>x = 3</td>
</tr>
<tr>
<td>Memory</td>
<td>Memory</td>
<td>x - 1</td>
</tr>
</tbody>
</table>
Invalidate protocol
### MESI-protocol
**Possible states of a cache line:**
<table>
<thead>
<tr>
<th>State</th>
<th>Cacheline Valid?</th>
<th>Valid in memory?</th>
<th>Copy in other cache?</th>
<th>Write access</th>
</tr>
</thead>
<tbody>
<tr>
<td>Modified</td>
<td>Yes</td>
<td>No</td>
<td>No</td>
<td>Cache</td>
</tr>
<tr>
<td>Exclusive</td>
<td>Yes</td>
<td>Yes</td>
<td>No</td>
<td>Cache</td>
</tr>
<tr>
<td>Shared</td>
<td>Yes</td>
<td>Yes</td>
<td>Possible</td>
<td>Cache/Memory</td>
</tr>
<tr>
<td>Invalid</td>
<td>No</td>
<td>Unknown</td>
<td>Possible</td>
<td>Memory</td>
</tr>
</tbody>
</table>
- Complex, but effective protocol
- Used by Intel
- AMD adds an `owned` state => MOESI-protocol
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
A **thread** is basically a *lightweight* process.
A process: unit of resource ownership
- A virtual address space to hold the process image
- Control of some resources (files, I/O devices...)
A thread is an execution path
- Has access to the memory address space and resources of its process.
- Shares it with other threads.
- Has its own function call stack.
```
main()
{
thread.start();
...
...
...
...
...
}
run()
{
...
...
...
...
}
```
Example: Matrix Multiplication
```
for (r = 0; r < n; r++)
for (c = 0; c < n; c++)
c[r][c] = create_thread(dot_product(get_row(a, r), get_col(b, c)));
```
- One thread per C-element
- Concurrent read must be possible
- No synchronization necessary
- Too many threads=a lot of overhead
In this case, one may think of the thread as an instance of a function that returns before the function has finished executing.
Why Threads?
- **Software Portability**
- run on serial and parallel machines
- **Latency Hiding**
- While one thread has to wait, others can utilize CPU
- For example: file reading, message reading, reading data from higher-level memory
- **Scheduling and Load Balancing**
- Large number of concurrent tasks
- System-level dynamic mapping to processors
- **Ease of Programming**
- Easier to write than message-passing programs (at first sight)
Latency Hiding
Faster CPU
More threads
4 cores: x4
Latency hiding: x3
Example why synchronization is necessary.
- x is initially set to 1
- One thread executes: \( x = 10; \text{print}(x); \)
- Second thread executes: \( x = 5; \text{print}(x); \)
- Both threads are started at the same time
- What is the output?
**Indeterminism**
- When 2 threads run simultaneously, we cannot determine which one is first or which one is faster...
- **Race condition**
- “a flaw in an electronic system or process whereby the output and/or result of the process is unexpectedly and critically dependent on the sequence or timing of other events.”
- The term originates with the idea of two signals *racing each other* to influence the output first.
- **Synchronization necessary**
```
Thread 1
x = 5;
print(x);
```
```
Thread 2
x = 10;
print(x);
```
Results can be:
<p>| | | | | |</p>
<table>
<thead>
<tr>
<th></th>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>5</td>
<td>10</td>
<td>5</td>
<td>10</td>
<td></td>
</tr>
<tr>
<td>10</td>
<td>10</td>
<td>5</td>
<td>10</td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
---
Parallel Systems: Multi-threading
10/3/2014 Pag.36
Synchronization of Critical Sections
- When multiple threads attempt to manipulate the same data item, the results can often be incoherent if proper care is not taken to synchronize them.
- Example:
```c
/* each thread tries to update variable best_cost */
if (my_cost < best_cost)
best_cost = my_cost;
```
- Assume that there are two threads, the initial value of best_cost is 100, and the values of my_cost are 50 and 75 at threads t1 and t2.
- Depending on the schedule of the threads, the value of best_cost could be 50 or 75!
- The value 75 does not correspond to any serialization of the threads.
A naïve critical section solution
```java
boolean access_x = true;
while (!access_x)
;
access_x = false;
if (my_cost < best_cost)
best_cost = my_cost;
access_x = true;
```
**Problems:**
- What if `access_x` is accessed at the same time?
- Thread consumes CPU time while waiting
**Operating System support needed!**
*Ps. There is a software solution for this: Peterson Algorithm*
Critical sections trigger cache coherence
- System will not perform cache coherence all the time
- Too costly
- Critical sections indicate shared data
Producers-Consumers Scenario
1. **Thread synchronization**
- **Producer Threads**
- Produce thing
- Put in buffer
- ...
- If buffer=full
- wait
- Put in buffer
- Signal non-emptiness
- ...
- **Consumer Threads**
- Get from buffer
- Consume thing
- ...
- If buffer=empty
- wait
- Get from buffer
- Consume thing
- Signal non-fullness
- ...
2. **Question**: can synchronization be implemented with only locks?
2. **Also needed**: proper locking of critical sections, see later…
Multi-threading primitives
Should minimally allow the following:
1. Thread creation
2. Locking of critical sections
3. Thread synchronization
**Multi-threading primitives**
*Should minimally allow the following:*
1. Thread creation
2. Locking of critical sections
3. Thread synchronization
With *primitives* we mean the minimal set of functions you need to write any multi-threaded program.
# Pthreads (C, C++, ...) & Java
<table>
<thead>
<tr>
<th></th>
<th>PThreads</th>
<th>Java</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>How?</strong></td>
<td>library</td>
<td>Built-in language Encapsulation: object manages thread-safety</td>
</tr>
<tr>
<td><strong>Thread creation</strong></td>
<td>pthread_create function</td>
<td>Thread class Runnable interface</td>
</tr>
<tr>
<td><strong>Critical sections</strong></td>
<td>Locks</td>
<td>Synchronized methods</td>
</tr>
<tr>
<td><strong>Thread synchronization</strong></td>
<td>Condition variables</td>
<td>wait & notify</td>
</tr>
</tbody>
</table>
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
public synchronized void start()
Starts this Thread and returns immediately after invoking the run() method.
Throws IllegalThreadStateException if the thread was already started.
public void run()
The body of this Thread, which is invoked after the thread is started.
public final synchronized void join(long millis)
Waits for this Thread to die. A timeout in milliseconds can be specified, with a timeout of 0 milliseconds indicating that the thread will wait forever.
public static void yield()
Causes the currently executing Thread object to yield the processor so that some other runnable Thread can be scheduled.
public final int getPriority()
Returns the thread’s priority.
public final void setPriority(int newPriority)
Sets the thread’s priority.
Thread creation
class PrimeThread extends Thread {
long minPrime;
PrimeThread(long minPrime) {
this.minPrime = minPrime;
}
public void run() {
// compute primes larger
// than minPrime
}
}
PrimeThread p = new PrimeThread(143);
p.start();
class PrimeRun implements Runnable {
long minPrime;
PrimeRun(long minPrime) {
this.minPrime = minPrime;
}
public void run() {
// compute primes larger
// than minPrime
}
}
PrimeRun p = new PrimeRun(143);
new Thread(p).start();
Synchronized methods & blocks
1. `synchronized void updateCost(int my_cost){
if (my_cost < best_cost)
best_cost = my_cost;
}
2. `Synchronized(object) {
if (my_cost < best_cost)
best_cost = my_cost;
}
is identical to
```java
void updateCost(int my_cost){
Synchronized(object) {
if (my_cost < best_cost)
best_cost = my_cost;
}
}
```
**Static methods**
```java
synchronized static void method(){
...
}
```
synchronized on the associated 'Class' object:
<theClass>.class is used for locking
Java objects act as Monitors
- When one thread is executing a synchronized method for an object, all other threads that invoke synchronized methods for the same object block (suspend execution) until the first thread is done with the object.
- When a synchronized method exits, the new state of the object are visible to all threads.
Thread synchronization happens through objects.
Example: Counting 3s
```java
int count = 0;
for (int i = 0; i < array.length; i++)
if (array[i] == 3)
count++;
```
- Parallelism? Yes.
- Multithreaded solution: divide counting
Parallel Counting 3s (wrong version)
count=0;
Thread[] threads = new Thread[nbrThreads];
for(int t=0;t<nbrThreads;t++){
final int T = t;
threads[t] = new Thread{
public void run{
int length_per_thread=array.length/ nbrThreads;
int start=T*length_per_thread;
for(int i=start;i<start+length_per_thread; i++)
if (array[i] == 3)
count++;
}
};
threads[t].start();
}
// wait until all threads have finished
for(int t=0;t<nbrThreads;t++)
try {
threads[t].join();
} catch (InterruptedException e) {}
Parallel Counting 3s: experiments
Counting 3s in an array of 1000 elements and 4 threads:
* Seq : counted 100 3s in 234us
* Par 1: counted 100 3s in 3ms 615us
* Par 2: counted 100 3s in 13ms 83us
* Par 3: counted 100 3s in 5ms 23us
* Par 4: counted 100 3s in 3ms 845us
Counting 3s in an array of 40000000 elements and 4 threads:
* Seq : counted 4000894 3s in 147ms
* Par 1: counted 3371515 3s in 109ms
* Par 2: counted 4000894 3s in 762ms
* Par 3: counted 4000894 3s in 93ms 748us
* Par 4: counted 4000894 3s in 77ms 14us
Parallel Counting 3s II
Problem in previous: access to the same data
Solution: synchronized method
```java
synchronized void addOne() { count++; }
count = 0;
final int NBR_THREADS = nbrThreads;
Thread[] threads = new Thread[nbrThreads];
for (int t = 0; t < nbrThreads; t++) {
final int T = t;
threads[t] = new Thread()
public void run()
{
int length_per_thread = array.length / NBR_THREADS;
int start = T * length_per_thread;
for (int i = start; i < start + length_per_thread; i++)
if (array[i] == 3)
addOne();
}
}
threads[t].start();
// wait until all threads have finished
for (int t = 0; t < nbrThreads; t++)
try {
threads[t].join();
} catch (InterruptedException e) {}
```
Problem in previous:
- locking overhead
- lock contention
- cache coherence overhead
Solution: Use local subtotals
Problem in previous: false sharing (see earlier slide)
Solution: padding
synchronized void addCount(int n) { count+=n; }
count=0;
final int NBR_THREADS = nbrThreads;
Thread[] threads = new Thread[nbrThreads];
for(int t=0; t<nbrThreads; t++) {
final int T = t;
threads[t] = new Thread() {
private_count=0;
int p1, p2, p3, p4, p5, p6, p7, p8, p9, p10, p11, p12, p13, p14, p15;
public void run() {
int length_per_thread = array.length / NBR_THREADS;
int start = T * length_per_thread;
for(int i=start; i<start+length_per_thread; i++)
if (array[i] == 3)
private_count++;
addCount(private_count);
}
};
threads[t].start();
}
// wait until all threads have finished
for(int t=0; t<nbrThreads; t++)
threads[t].join();
Volatile Variables
The Java language allows threads to keep private working copies of these variables (= caching). This enables a more efficient execution of the two threads. For example, when each thread reads and writes these variables, they can do so on the private working copies instead of accessing the variables from main memory. The private working copies are reconciled with main memory only at specific synchronization points.
Volatile variables: Private working memory is reconciled with main memory on each variable access. = Light-weight synchronization
Which code is thread-safe?
```java
volatile int x;
...
X++;
...
```
```java
volatile int x;
...
X=5;
...
```
```java
volatile int best_cost;
...
if (my_cost < best_cost)
best_cost = my_cost;
...
```
```java
volatile int lower, upper;
public void setLower(int value) {
if (value > upper)
throw new IllegalArgumentException(...);
lower = value;
}
public void setUpper(int value) {
if (value < lower)
throw new IllegalArgumentException(...);
upper = value;
}
```
**Conditions:**
1. Writes to the variable do not depend on its current value.
2. The variable does not participate in invariants with other variables.
Incorrectly synchronized programs exhibit surprising behaviors
- Initially, \( A = B = 0 \)
- Then:
<table>
<thead>
<tr>
<th>Thread 1</th>
<th>Thread 2</th>
</tr>
</thead>
<tbody>
<tr>
<td>1: ( r2 = A; )</td>
<td>3: ( r1 = B; )</td>
</tr>
<tr>
<td>2: ( B = 1; )</td>
<td>4: ( A = 2; )</td>
</tr>
</tbody>
</table>
- End result \( r2 == 2, r1 == 1 \) is possible!!
- Compilers are allowed to reorder the instructions in either thread, when this does not affect the execution of that thread in isolation (being independent)
- Reordering instructions might improve performance
The Java Memory Model
- Describes how threads interact through memory.
- Specifies the legal behaviors for a multithreaded program.
- The compiler/virtual machine is allowed to make optimizations.
- Tries to provide safety, but also flexibility (allowing optimizations to improve performance).
📍 Trade-off!
Thread Synchronization
Via Object class
public final void wait() throws InterruptedException
- Causes the current thread to wait until another thread invokes the notify() method or the notifyAll() method.
- The current thread must own this object's monitor. The thread releases ownership of this monitor.
public final void wait(long timeout, int nanos) throws InterruptedException
public final void notify()
- Wakes up a single thread that is waiting on this object's monitor.
- The awakened thread will not be able to proceed until the current thread relinquishes the lock on this object.
public final void notifyAll()
Put synchronization in critical section
Producer Threads
... Produce thing while buffer=full wait() Put in buffer notify() ...
Consumer Threads
... while buffer=empty wait() Get from buffer notify() Consume thing ...
synchronized void put()
{
while buffer=full wait()
Put in buffer notify()
}
synchronized void get()
{
while buffer=empty wait()
Get from buffer notify()
}
OK?
Race condition possible!
Lock is released on wait()
Vector versus ArrayList
- Vector is synchronized, ArrayList is not
- Only one thread:
- Reported: Vector is slower <> my tests: no difference
- Recent java versions automatically choose best version
- Multiple threads:
- Vector OK
- Use Collections.synchronizedList(new ArrayList(...)) ;
Atomic Objects
http://java.sun.com/docs/books/tutorial/essential/concurrency/atomicvars.html
Liveness problem:
- Waiting threads due to (unnecessary) synchronization
More Advanced ...
- **Explicit lock objects**
- `tryLock()`: provides means to back out of lock
- **Executors**: more advanced threads
- Thread pools: reuse of finished threads
- **Concurrent Collections**: concurrent data structures that can be accessed by multiple threads simultaneously
- BlockingQueues
- ConcurrentMa
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
The POSIX Thread API
- Commonly referred to as Pthreads, POSIX has emerged as the standard threads API (1995), supported by most vendors.
- The concepts discussed here are largely independent of the API and can be used for programming with other thread APIs (NT threads, Solaris threads, Java threads, etc.) as well.
#include <pthread.h>
int pthread_create (pthread_t *thread_handle, const pthread_attr_t *attribute, void * (*thread_function)(void *), void *arg);
int pthread_join ( pthread_t thread, void **ptr);
- The function pthread_create invokes function thread_function as a thread.
- The function pthread_join waits for the thread to be finished and the value passed to pthread_exit (by the terminating thread) is returned in the location pointer **ptr.
Example
```c
#include <pthread.h>
#include <stdlib.h>
#define MAX_THREADS 512
void *compute_pi (void *);
main() {
pthread_t p_threads[MAX_THREADS];
pthread_attr_t attr;
pthread_attr_init (&attr);
for (i=0; i< num_threads; i++) {
hits[i] = i;
pthread_create(&p_threads[i], &attr, compute_pi, (void *) &hits[i]);
}
for (i=0; i< num_threads; i++) {
pthread_join(p_threads[i], NULL);
total_hits += hits[i];
}
}
```
Executed on a 4-processor SGI Origin: speedup of 3.91 with 32 threads.
This corresponds to a parallel efficiency of 0.98!
The code in the previous example corresponds to a **critical segment** or **critical section**; i.e., a segment that must be executed by only one thread at any time.
Critical segments in Pthreads are implemented using **mutex locks**.
Mutex-locks have two states: locked and unlocked. At any point of time, only one thread can lock a mutex lock. A lock is an atomic operation.
A thread entering a critical segment first tries to get a lock. It goes ahead when the lock is granted. Otherwise it is blocked until the lock relinquished.
Mutual Exclusion
The Pthreads API provides the following functions for handling mutex-locks:
- `int pthread_mutex_init ( pthread_mutex_t *mutex_lock, const pthread_mutexattr_t *lock_attr);`
- `int pthread_mutex_lock ( pthread_mutex_t *mutex_lock);`
- `int pthread_mutex_unlock (pthread_mutex_t *mutex_lock);`
We can now write our previously incorrect code segment as:
```c
pthread_mutex_t minimum_value_lock;
...
main() {
....
pthread_mutex_init(&minimum_value_lock, NULL);
....
}
void *find_min(void *list_ptr) {
....
pthread_mutex_lock(&minimum_value_lock);
if (my_min < minimum_value)
minimum_value = my_min;
/* and unlock the mutex */
pthread_mutex_unlock(&minimum_value_lock);
}
```
Disadvantages lock
- Deadlock possible, see later
- Performance degradation
- Due to locking overhead
- Due to idling of locked threads (if no other thread is there to consume available processing time)
- Alleviate locking overheads
- Minimize size of critical sections
- Encapsulating large segments of the program within locks can lead to significant performance degradation.
- \texttt{create\_task()} and \texttt{process\_task()} are left outside critical section!
Alleviate locking overheads
Test a lock:
- `int pthread_mutex_trylock (pthread_mutex_t *mutex_lock);`
- Returns 0 if locking was successful, EBUSY when already locked by another thread.
- `pthread_mutex_trylock` is typically much faster than `pthread_mutex_lock` since it does not have to deal with queues associated with locks for multiple threads waiting on the lock.
Example: write result to global data if lock can be acquired, otherwise temporarily store locally
KUMAR: ‘Finding matches in a list’
A condition variable allows a thread to block itself until specified data reaches a predefined state.
A condition variable is associated with this predicate. When the predicate becomes true, the condition variable is used to signal one or more threads waiting on the condition.
A single condition variable may be associated with more than one predicate.
A condition variable always has a *mutex* associated with it. A thread locks this mutex and tests the predicate defined on the shared variable.
If the predicate is not true, the thread waits on the condition variable associated with the predicate using the function `pthread_cond_wait`.
Synchronization in Pthreads
Pthreads provides the following functions for condition variables:
```c
int pthread_cond_wait(pthread_cond_t *cond,
pthread_mutex_t *mutex);
int pthread_cond_signal(pthread_cond_t *cond);
int pthread_cond_broadcast(pthread_cond_t *cond);
int pthread_cond_init(pthread_cond_t *cond,
const pthread_condattr_t *attr);
int pthread_cond_destroy(pthread_cond_t *cond);
```
Parallel Systems: Multi-threading
Producer-consumer work queues
- The **producer threads** create tasks and inserts them into a work queue.
- The **consumer threads** pick up tasks from the queue and executes them.
- Synchronization!
The producer-consumer scenario imposes the following constraints:
- The producer thread must not overwrite the shared buffer when the previous task has not been picked up by a consumer thread.
- The consumer threads must not pick up tasks until there is something present in the shared data structure.
- Individual consumer threads should pick up tasks one at a time.
```c
1 pthread_mutex_t lock=PTHREAD_MUTEX_INITIALIZER;
2 pthread_cond_t nonempty=PTHREAD_COND_INITIALIZER;
3 pthread_cond_t nonfull=PTHREAD_COND_INITIALIZER;
4 Item buffer[SIZE];
5 int put=0; // Buff index for next insert
6 int get=0; // Buff index for next remove
7
8 void insert(Item x) // Producer thread
9 {
10 pthread_mutex_lock(&lock);
11 while((put>get&& (put-get)==SIZE-1)|| // While buffer is
12 (put<get&&(put+get)==SIZE-1)) // full
13 {
14 pthread_cond_wait(&nonfull, &lock);
15 }
16 buffer[put]=x;
17 put=(put+1)%SIZE;
18 pthread_cond_signal(&nonempty);
19 pthread_mutex_unlock(&lock);
20 }
21
22 Item remove() // Consumer thread
23 {
24 Item x;
25 pthread_mutex_lock(&lock);
26 while(put==get) // While buffer is empty
27 {
28 pthread_cond_wait(&nonempty, &lock);
29 }
30 x=buffer[get];
31 get=(get+1)%SIZE;
32 pthread_cond_signal(&nonfull);
33 pthread_mutex_unlock(&lock);
34 return x;
35 }
```
The Pthreads API allows a programmer to change the default properties of entities (thread, mutex, condition variable) using attributes objects.
An attributes object is a data-structure that describes entity properties.
Once these properties are set, the attributes object can be passed to the method initializing the entity.
Enhances modularity, readability, and ease of modification.
Attributes Objects for Threads
gebruik de functie `pthread_attr_init` om een attributenobject te creëren.
Individuele eigenschappen verbonden met het attributenobject kunnen worden gewijzigd met behulp van de volgende functies:
- `pthread_attr_setdetachstate`
- `pthread_attr_setguardsize_np`
- `pthread_attr_setstacksize`
- `pthread_attr_setinheritsched`
- `pthread_attr_setschedpolicy`
- `pthread_attr_setschedparam`
Threads locks multiple times
```c
pthread_mutex_lock(&lock1);
...
pthread_mutex_lock(&lock1);
...
pthread_mutex_unlock(&lock1);
...
pthread_mutex_unlock(&lock1);
```
E.g. happens when in one critical section we call code with also a critical section protected by the same lock
What will happen?
➢ depends on type of lock
Types of Mutexes
- Pthreads supports three types of mutexes - normal, recursive, and error-check.
- A **normal mutex** deadlocks if a thread that already has a lock tries a second lock on it. *This is the default.*
- A **recursive mutex** allows a single thread to lock a mutex as many times as it wants. It simply increments a count on the number of locks. A lock is relinquished by a thread when the count becomes zero.
- An **error check mutex** reports an error when a thread with a lock tries to lock it again (as opposed to deadlocking in the first case, or granting the lock, as in the second case).
- The type of the mutex can be set in the attributes object before it is passed at time of initialization.
Attributes Objects for Mutexes
- Initialize the attributes object using function: `pthread_mutexattr_init`.
- **The function** `pthread_mutexattr_settype_np` can be used for setting the type of mutex specified by the mutex attributes object.
```c
pthread_mutexattr_settype_np (pthread_mutexattr_t *attr, int type);
```
- **Here, type specifies the type of the mutex and can take one of:**
- PTHREAD_MUTEX_NORMAL_NP
- PTHREAD_MUTEX_RECURSIVE_NP
- PTHREAD_MUTEX_ERRORCHECK_NP
Thread Cancellation
```c
int pthread_cancel(pthread_t *thread);
```
- Terminates another thread
- Can be dangerous
- In Java: deprecated suspend() method. Use of it is discouraged.
- But sometimes useful, e.g. as long as the user is staying at a certain view in your application, you calculate extra information, as soon as he leaves the view, you stop the calculation.
- A thread can protect itself against cancellation
- `pthread_exit`: exit thread (yourself) without exiting the process
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
Condition variables & locking
- Condition variables should be protected by a lock
- Signal of non-emptiness can happen just between check and when consumer thread goes into waiting
- Should the signal also be protected by the lock?
- No
Thread-safe?
```c
pthread_mutex_lock(&lock);
while (apples==0)
pthread_cond_wait(&more_apples, &lock);
while (oranges==0)
pthread_cond_wait(&more_oranges, &lock);
// eat apple & orange
pthread_mutex_unlock(&lock);
```
NOK!!
```c
pthread_mutex_lock(&lock);
while (apples==0 || oranges==0){
pthread_cond_wait(&more_apples, &lock);
pthread_cond_wait(&more_oranges, &lock);
}
// eat apple & orange
pthread_mutex_unlock(&lock);
```
Still NOK!!
Thread-safe!
pthread_mutex_lock(&lock);
boolean allConditionsPassed;
do {
allConditionsPassed = true;
if (apples == 0){
pthread_cond_wait(&more_apples, &lock);
allConditionsPassed = false; }
if (oranges == 0){
pthread_cond_wait(&more_oranges, &lock);
allConditionsPassed = false; }
} while (!allConditionsPassed);
// eat apple & orange
pthread_mutex_unlock(&lock);
pthread_mutex_lock(&lock);
while (apples==0 || oranges==0){
pthread_cond_wait(&more_apples_or_more_oranges, &lock);
}
// eat apple & orange
pthread_mutex_unlock(&lock)
Mistake in PPP on page 173!!
By the boolean, you can easily add more conditions. Also OK, no boolean:
} while(apples == 0 || oranges == 0)
Only 1 cond variable
The Dining Philosophers
The philosophers do not speak to each other and there is no arbiter organizing the resources
⇒ Deadlock...
Deadlocks
Four conditions
1. Mutual exclusion
2. Hold and wait: threads hold some resources and request other
3. No preemption: resource can only be released by the thread that holds it
4. Circular wait: cycle in waiting of a thread for a resource of another
Resource allocation graph
Livelocks
- Similar to a deadlock, except that the states of the processes involved in the livelock constantly change with regard to one another, none progressing.
- *Real-world example*: two people meet in a narrow corridor, each moves aside to let the other pass, but they end up swaying from side to side.
- A risk with algorithms that detect and recover from deadlock.
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
Composite Synchronization Constructs
- By design, **Pthreads** provide support for a basic set of operations.
- Higher level constructs can be built using basic synchronization constructs.
- We discuss two such constructs - *barriers* and *read-write locks*.
Barriers
- Holds a thread until all threads participating in the barrier have reached it.
- Can be implemented using a counter, a mutex and a condition variable.
- A single integer is used to keep track of the number of threads that have reached the barrier.
- If the count is less than the total number of threads, the threads execute a condition wait.
- The last thread entering (and setting the count to the number of threads) wakes up all the threads using a condition broadcast.
- Release of lock and reactivation of threads must happen atomically
- Otherwise problematic when barrier is reused...
Barriers
typedef struct {
pthread_mutex_t count_lock;
pthread_cond_t ok_to_proceed;
int count;
} mylib_barrier_t;
void mylib_init_barrier(mylib_barrier_t *b) {
b -> count = 0;
pthread_mutex_init(&(b -> count_lock), NULL);
pthread_cond_init(&(b -> ok_to_proceed), NULL);
}
Barriers
```c
void mylib_barrier (mylib_barrier_t *b, int num_threads) {
pthread_mutex_lock(&b->count_lock);
b->count ++;
if (b->count == num_threads) {
b->count = 0;
pthread_cond_broadcast(&b->ok_to_proceed);
} else
while (pthread_cond_wait(&b->ok_to_proceed,
&b->count_lock)) != 0);
pthread_mutex_unlock(&b->count_lock);
}
```
Barriers
The barrier described above is called a *linear barrier*.
The trivial lower bound on execution time of this function is therefore $O(n)$ for $n$ threads.
- Threads are released one by one, since mutex `count_lock` is passed among them one after the other!
Can be speeded up using multiple barrier variables organized in a tree.
Log Barrier
- We use \( n/2 \) condition variable-mutex pairs for implementing a barrier for \( n \) threads.
- At the lowest level, threads are paired up and each pair of threads shares a single condition variable-mutex pair.
- Once both threads arrive, one of the two moves on, the other one waits.
- This process repeats up the tree.
- This is also called a log barrier and its runtime grows as \( O(\log n) \).
“Many threads must access the same shared memory at one time, some reading and some writing, with the natural constraint that no process may access the share for reading or writing while another process is in the act of writing to it.”
Data structure is read frequently but written infrequently
- use read-write locks *instead of traditional locking.*
A read lock is granted when there are other threads that may already have read locks.
If there is a write lock on the data (or if there are queued write locks), the thread performs a condition wait.
**Pending writers get priority over pending readers.**
If there are multiple threads requesting a write lock, they must perform a condition wait.
With this description, we can design functions for read locks `mylib_rwlock_readlock`, write locks `mylib_rwlock_writelock`, and unlocking `mylib_rwlock_unlock`.
Read-Write Locks
The lock data type `mylib_rwlock_t` holds the following:
- a count of the number of readers,
- a count of pending writers
- A boolean specifying whether a writer is present,
- a mutex `read_write_lock` associated with the shared data structure,
- a condition variable `readers_proceed` that is signaled when readers can proceed,
- a condition variable `writer_proceed` that is signaled when one of the writers can proceed
typedef struct {
int readers;
bool writer;
pthread_cond_t readers_proceed;
pthread_cond_t writer_proceed;
int pending_writers;
pthread_mutex_t read_write_lock;
} mylib_rwlock_t;
void mylib_rwlock_init (mylib_rwlock_t *l) {
l->readers = l->pending_writers = 0;
l->writer = false;
pthread_mutex_init(&(l->read_write_lock), NULL);
pthread_cond_init(&(l->readers_proceed), NULL);
pthread_cond_init(&(l->writer_proceed), NULL);
}
Read-Write Locks
```c
void mylib_rwlock_readlock(mylib_rwlock_t *l) {
/* if there is a write lock or pending writers, perform
condition wait.. else increment count of readers and grant
read lock */
pthread_mutex_lock(&(l->read_write_lock));
while ((l->pending_writers > 0) || l->writer)
pthread_cond_wait(&(l->readers_proceed),
&(l->read_write_lock));
l->readers ++;
pthread_mutex_unlock(&(l->read_write_lock));
}
```
void mylib_rwlock_writelock(mylib_rwlock_t *l) {
/* if there are readers or a writer, increment pending writers count and wait. On being woken, decrement pending writers count and set writer */
pthread_mutex_lock(&l->read_write_lock);
while (l->writer || (l->readers > 0)) {
l->pending_writers++;
pthread_cond_wait(&(l->writer_proceed), &l->read_write_lock);
l->pending_writers--;
}
l->writer = true;
pthread_mutex_unlock(&l->read_write_lock);
}
Error in Kumar: should be inside loop
void mylib_rwlock_unlock(mylib_rwlock_t *l) {
/* if there is a write lock then unlock, else if there are read
locks, decrement count of read locks. If the count is 0 and
there is a pending writer, let it through, else if there are
pending readers, let them all go through */
pthread_mutex_lock(&l->read_write_lock);
if (l->writer) // I'm a writer
l->writer = false;
else if (l->readers > 0) // I'm a reader
l->readers --;
pthread_mutex_unlock(&l->read_write_lock);
if ((l->readers == 0) && (l->pending_writers > 0))
pthread_cond_signal(&l->writer_proceed);
else if (l->readers > 0)
pthread_cond_broadcast(&l->readers_proceed);
}
What if pending writers should get access asap
What if readers unlock, but there are other readers busy (readers > 0) and there are pending writers and pending readers
- Readers should not be released!
- Should be: else if (l->pending_writers == 0)
Conditional wait inside a loop
When waiting on condition variables, the wait should be inside a loop, not in a simple if statement because of **spurious wakeups**.
- You are not guaranteed that if a thread wakes up, it is the result of a signal or broadcast call.
Spurious wakeups may occur => the return does not imply anything about the value of the predicate => the predicate should be re-evaluated.
while (!predicate)
pthread_cond_wait(…)
Parallel Systems: Multi-threading
10/3/2014 Pag.105
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
OpenMP Philosophy
- The OpenMP Application Program Interface (API) supports multi-platform shared-memory parallel programming in C/C++ and Fortran.
- Portable, scalable model with a simple and flexible interface for developing parallel applications.
- Augment sequential programs in minor ways to identify code that can be executed in parallel.
- Simpler to use
- More restrictive in terms of parallel interactions than Java/POSIX
- Standardized (Sun, Intel, Fujitsu, IBM, ...)
- http://www.openmp.org
How?
✦ Add pragmas to program
✦ `#pragma omp <specifications>`
✦ The `#pragma` directives offer a way for each compiler to offer machine- and operating system-specific features. If the compiler finds a pragma it does not recognize, it issues a warning, but compilation continues.
✦ An OpenMP-compliant compiler will generate appropriate multithreaded code
✦ Other compilers simply ignore the pragmas and generate sequential code.
```
int count3s()
{
int i, count_p;
count=0;
#pragma omp parallel shared(array, count, length)\
private(count_p)
{
count_p=0;
#pragma omp parallel for private(i)
for(i=0; i<length; i++)
{
if(array[i]==3)
{
count_p++;
}
}
#pragma omp critical
{
count+=count_p;
}
}
return count;
}
The iterations can execute in any order
the iterations can execute in parallel.
count instead of count_p is wrong!
Reduction pragma for computations that combine variables globally
```c
count=0;
#pragma omp parallel for reduction(+,count)
for(i=0; i<length; i++)
count += array[i]==3 ? 1 : 0;
```
Handling data dependencies
```c
#pragma omp critical
{
count += count_p;
}
```
Critical section that will be protected by locks
```c
#pragma omp atomic
score += 3
```
Memory update is noninterruptible
Sections to express task parallelism
```c
#pragma omp sections
{
#pragma omp section
{
Task_A();
}
#pragma omp section
{
Task_B();
}
#pragma omp section
{
Task_C();
}
}
```
OpenACC for GPU computing
- A dialect of OpenACC especially for GPU computing
- Easier than OpenCL/CUDA
- The future??
- Based on OpenHMPP from CAPS enterprise (Bretagne, France)
Matlab: parallel for
- Parallel computing toolbox provides simple constructs to allow parallel execution
- Parallel for (when iterations are independent)
- ...
- Automatic parallel execution
- Create pool of computers that will work together
- Many functions of libraries run in parallel and even (automatically) on GPU!
Overview
1. // processors and // instructions sequences
2. Architecture
3. Usage
4. Java Threads
5. POSIX Threads
6. Thread Safety
7. Synchronization Constructs
8. OpenMP and related
9. End Notes
Keep in mind when Designing Asynchronous Programs
- Never rely on scheduling assumptions when exchanging data.
- Never rely on liveness of data resulting from assumptions on scheduling.
- Do not rely on scheduling as a means of synchronization.
- Use synchronization mechanisms with mutexes.
- Where possible, define and use group synchronizations and data replication.
Methods for multi-threading
1. POSIX: low-level
• Complete
2. Java Threads: integrated in the language
• Complete, although some things need ‘dirty’ solutions
• For example: allow multiple synchronized methods of an object to be executed simultaneously.
3. OpenMP (and others): high-level
• Incomplete, you can’t program everything you want...
4. OpenCL
• For fine-grain parallelism
• For algorithms with massive inherent parallelism
• Thread synchronization is hidden for the user!
Which one should we prefer?
A bit of history: Semaphores
- One of the first concepts for critical sections & thread synchronization.
- Invented by Dutch computer scientist Edsger Dijkstra.
- Found widespread use in a variety of operating systems as basic primitive for avoiding race conditions.
- Based on a protected variable for controlling access by multiple processes to a common resource.
- By atomic operations you can decrement or increment semaphores.
- **Binary** (flag) or integer (counting)
- *When binary*: similar to mutexes
- *When integer*: The value of the semaphore $S$ is the number of units of the resource that have not been claimed.
Why multithreading
Performance (speedup):
- Exploit parallel hardware
- Latency hiding
- More than 1 thread per core
- Load balancing
- More than 1 thread per core
- More high-level memory available
Convenience
- E.g. one thread per client request
- Background computations
Disadvantages of multi-threading
- More difficult to understand
- More difficult to debug
- Indeterminism!
- Finding unsafe constructions through testing is difficult!
Multi-threading without speedup
- Webserver: a thread for each client
- Multi-threading for convenience
- = distributed computing, not parallel computing
- But: one can loose performance!
- 4 requests, each request takes 10 seconds to finish.
- A single thread: user #1 has to wait 10 seconds, user #2 will wait 20 seconds, user #3 will wait 30 seconds and user #4 will wait 40 seconds.
- Average waiting time = 25 seconds
- Four threads are activated: they must split the available processor time. Each thread will take four times as long. So each request will complete at about 40 seconds.
- Waiting time = 40 seconds (+37.5%)
|
{"Source-Url": "http://parallel.vub.ac.be/education/parsys/notes2014/Parsys_SharedMemory.pdf", "len_cl100k_base": 11547, "olmocr-version": "0.1.53", "pdf-total-pages": 121, "total-fallback-pages": 0, "total-input-tokens": 169938, "total-output-tokens": 16181, "length": "2e13", "weborganizer": {"__label__adult": 0.00031638145446777344, "__label__art_design": 0.0003566741943359375, "__label__crime_law": 0.0002713203430175781, "__label__education_jobs": 0.0006909370422363281, "__label__entertainment": 7.641315460205078e-05, "__label__fashion_beauty": 0.00013709068298339844, "__label__finance_business": 0.00013387203216552734, "__label__food_dining": 0.0002703666687011719, "__label__games": 0.00113677978515625, "__label__hardware": 0.004184722900390625, "__label__health": 0.00027441978454589844, "__label__history": 0.00029206275939941406, "__label__home_hobbies": 0.00012600421905517578, "__label__industrial": 0.0006694793701171875, "__label__literature": 0.00019168853759765625, "__label__politics": 0.0002341270446777344, "__label__religion": 0.0005755424499511719, "__label__science_tech": 0.044586181640625, "__label__social_life": 6.0677528381347656e-05, "__label__software": 0.00951385498046875, "__label__software_dev": 0.9345703125, "__label__sports_fitness": 0.0003993511199951172, "__label__transportation": 0.0005369186401367188, "__label__travel": 0.00018966197967529297}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 46019, 0.02411]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 46019, 0.61068]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 46019, 0.77431]], "google_gemma-3-12b-it_contains_pii": [[0, 76, false], [76, 273, null], [273, 470, null], [470, 603, null], [603, 815, null], [815, 972, null], [972, 1592, null], [1592, 1778, null], [1778, 2077, null], [2077, 2331, null], [2331, 2494, null], [2494, 2933, null], [2933, 3130, null], [3130, 3299, null], [3299, 3577, null], [3577, 3626, null], [3626, 3733, null], [3733, 3860, null], [3860, 3990, null], [3990, 4060, null], [4060, 4205, null], [4205, 4535, null], [4535, 4859, null], [4859, 5075, null], [5075, 5294, null], [5294, 5615, null], [5615, 6042, null], [6042, 6737, null], [6737, 6935, null], [6935, 7400, null], [7400, 7828, null], [7828, 8289, null], [8289, 8362, null], [8362, 8607, null], [8607, 9323, null], [9323, 9934, null], [9934, 10327, null], [10327, 10481, null], [10481, 11072, null], [11072, 11216, null], [11216, 11468, null], [11468, 12088, null], [12088, 12285, null], [12285, 13108, null], [13108, 13683, null], [13683, 14230, null], [14230, 14614, null], [14614, 14805, null], [14805, 15418, null], [15418, 15942, null], [15942, 16744, null], [16744, 16860, null], [16860, 17709, null], [17709, 18278, null], [18278, 18935, null], [18935, 19445, null], [19445, 19754, null], [19754, 20383, null], [20383, 20828, null], [20828, 21127, null], [21127, 21295, null], [21295, 21628, null], [21628, 21825, null], [21825, 22143, null], [22143, 22591, null], [22591, 23187, null], [23187, 23724, null], [23724, 24035, null], [24035, 24455, null], [24455, 24932, null], [24932, 25442, null], [25442, 26087, null], [26087, 26529, null], [26529, 26765, null], [26765, 27134, null], [27134, 28152, null], [28152, 28540, null], [28540, 28970, null], [28970, 29295, null], [29295, 30017, null], [30017, 30504, null], [30504, 31001, null], [31001, 31198, null], [31198, 31441, null], [31441, 31896, null], [31896, 32641, null], [32641, 32774, null], [32774, 33062, null], [33062, 33438, null], [33438, 33635, null], [33635, 33896, null], [33896, 34508, null], [34508, 34806, null], [34806, 35213, null], [35213, 35554, null], [35554, 35970, null], [35970, 36836, null], [36836, 37277, null], [37277, 37747, null], [37747, 38230, null], [38230, 38763, null], [38763, 39464, null], [39464, 39715, null], [39715, 40342, null], [40342, 40539, null], [40539, 41046, null], [41046, 41484, null], [41484, 41924, null], [41924, 42228, null], [42228, 42437, null], [42437, 42671, null], [42671, 42856, null], [42856, 43182, null], [43182, 43380, null], [43380, 43751, null], [43751, 43751, null], [43751, 44290, null], [44290, 44921, null], [44921, 45201, null], [45201, 45372, null], [45372, 46019, null]], "google_gemma-3-12b-it_is_public_document": [[0, 76, true], [76, 273, null], [273, 470, null], [470, 603, null], [603, 815, null], [815, 972, null], [972, 1592, null], [1592, 1778, null], [1778, 2077, null], [2077, 2331, null], [2331, 2494, null], [2494, 2933, null], [2933, 3130, null], [3130, 3299, null], [3299, 3577, null], [3577, 3626, null], [3626, 3733, null], [3733, 3860, null], [3860, 3990, null], [3990, 4060, null], [4060, 4205, null], [4205, 4535, null], [4535, 4859, null], [4859, 5075, null], [5075, 5294, null], [5294, 5615, null], [5615, 6042, null], [6042, 6737, null], [6737, 6935, null], [6935, 7400, null], [7400, 7828, null], [7828, 8289, null], [8289, 8362, null], [8362, 8607, null], [8607, 9323, null], [9323, 9934, null], [9934, 10327, null], [10327, 10481, null], [10481, 11072, null], [11072, 11216, null], [11216, 11468, null], [11468, 12088, null], [12088, 12285, null], [12285, 13108, null], [13108, 13683, null], [13683, 14230, null], [14230, 14614, null], [14614, 14805, null], [14805, 15418, null], [15418, 15942, null], [15942, 16744, null], [16744, 16860, null], [16860, 17709, null], [17709, 18278, null], [18278, 18935, null], [18935, 19445, null], [19445, 19754, null], [19754, 20383, null], [20383, 20828, null], [20828, 21127, null], [21127, 21295, null], [21295, 21628, null], [21628, 21825, null], [21825, 22143, null], [22143, 22591, null], [22591, 23187, null], [23187, 23724, null], [23724, 24035, null], [24035, 24455, null], [24455, 24932, null], [24932, 25442, null], [25442, 26087, null], [26087, 26529, null], [26529, 26765, null], [26765, 27134, null], [27134, 28152, null], [28152, 28540, null], [28540, 28970, null], [28970, 29295, null], [29295, 30017, null], [30017, 30504, null], [30504, 31001, null], [31001, 31198, null], [31198, 31441, null], [31441, 31896, null], [31896, 32641, null], [32641, 32774, null], [32774, 33062, null], [33062, 33438, null], [33438, 33635, null], [33635, 33896, null], [33896, 34508, null], [34508, 34806, null], [34806, 35213, null], [35213, 35554, null], [35554, 35970, null], [35970, 36836, null], [36836, 37277, null], [37277, 37747, null], [37747, 38230, null], [38230, 38763, null], [38763, 39464, null], [39464, 39715, null], [39715, 40342, null], [40342, 40539, null], [40539, 41046, null], [41046, 41484, null], [41484, 41924, null], [41924, 42228, null], [42228, 42437, null], [42437, 42671, null], [42671, 42856, null], [42856, 43182, null], [43182, 43380, null], [43380, 43751, null], [43751, 43751, null], [43751, 44290, null], [44290, 44921, null], [44921, 45201, null], [45201, 45372, null], [45372, 46019, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 46019, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 46019, null]], "pdf_page_numbers": [[0, 76, 1], [76, 273, 2], [273, 470, 3], [470, 603, 4], [603, 815, 5], [815, 972, 6], [972, 1592, 7], [1592, 1778, 8], [1778, 2077, 9], [2077, 2331, 10], [2331, 2494, 11], [2494, 2933, 12], [2933, 3130, 13], [3130, 3299, 14], [3299, 3577, 15], [3577, 3626, 16], [3626, 3733, 17], [3733, 3860, 18], [3860, 3990, 19], [3990, 4060, 20], [4060, 4205, 21], [4205, 4535, 22], [4535, 4859, 23], [4859, 5075, 24], [5075, 5294, 25], [5294, 5615, 26], [5615, 6042, 27], [6042, 6737, 28], [6737, 6935, 29], [6935, 7400, 30], [7400, 7828, 31], [7828, 8289, 32], [8289, 8362, 33], [8362, 8607, 34], [8607, 9323, 35], [9323, 9934, 36], [9934, 10327, 37], [10327, 10481, 38], [10481, 11072, 39], [11072, 11216, 40], [11216, 11468, 41], [11468, 12088, 42], [12088, 12285, 43], [12285, 13108, 44], [13108, 13683, 45], [13683, 14230, 46], [14230, 14614, 47], [14614, 14805, 48], [14805, 15418, 49], [15418, 15942, 50], [15942, 16744, 51], [16744, 16860, 52], [16860, 17709, 53], [17709, 18278, 54], [18278, 18935, 55], [18935, 19445, 56], [19445, 19754, 57], [19754, 20383, 58], [20383, 20828, 59], [20828, 21127, 60], [21127, 21295, 61], [21295, 21628, 62], [21628, 21825, 63], [21825, 22143, 64], [22143, 22591, 65], [22591, 23187, 66], [23187, 23724, 67], [23724, 24035, 68], [24035, 24455, 69], [24455, 24932, 70], [24932, 25442, 71], [25442, 26087, 72], [26087, 26529, 73], [26529, 26765, 74], [26765, 27134, 75], [27134, 28152, 76], [28152, 28540, 77], [28540, 28970, 78], [28970, 29295, 79], [29295, 30017, 80], [30017, 30504, 81], [30504, 31001, 82], [31001, 31198, 83], [31198, 31441, 84], [31441, 31896, 85], [31896, 32641, 86], [32641, 32774, 87], [32774, 33062, 88], [33062, 33438, 89], [33438, 33635, 90], [33635, 33896, 91], [33896, 34508, 92], [34508, 34806, 93], [34806, 35213, 94], [35213, 35554, 95], [35554, 35970, 96], [35970, 36836, 97], [36836, 37277, 98], [37277, 37747, 99], [37747, 38230, 100], [38230, 38763, 101], [38763, 39464, 102], [39464, 39715, 103], [39715, 40342, 104], [40342, 40539, 105], [40539, 41046, 106], [41046, 41484, 107], [41484, 41924, 108], [41924, 42228, 109], [42228, 42437, 110], [42437, 42671, 111], [42671, 42856, 112], [42856, 43182, 113], [43182, 43380, 114], [43380, 43751, 115], [43751, 43751, 116], [43751, 44290, 117], [44290, 44921, 118], [44921, 45201, 119], [45201, 45372, 120], [45372, 46019, 121]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 46019, 0.03015]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
4888208412361567947efd0a61f13fc4390e7a45
|
A New Approach for System Requirements Elicitation Using Discount Focus Subgroups Method
Mohanad Halaweh
University of Dubai, mohanadhalaweh@gmail.com
Follow this and additional works at: http://aisel.aisnet.org/pacis2015
Recommended Citation
http://aisel.aisnet.org/pacis2015/216
This material is brought to you by the Pacific Asia Conference on Information Systems (PACIS) at AIS Electronic Library (AISeL). It has been accepted for inclusion in PACIS 2015 Proceedings by an authorized administrator of AIS Electronic Library (AISeL). For more information, please contact elibrary@aisnet.org.
A NEW APPROACH FOR SYSTEM REQUIREMENTS ELICITATION USING DISCOUNT FOCUS SUBGROUPS METHOD
Mohanad Halaweh, College of Information Technology, University of Dubai, Dubai, UAE, mhalaweh@ud.ac.ae
Abstract
Requirements elicitation is a key and critical activity for software/system development success. Several methods and techniques have been developed and used for requirements elicitation. Prior research referred to many problems and shortfalls with existing group-based methods (e.g., brainstorming, focus groups, and joint application development [JAD]). This paper provides a new approach for requirements elicitation using a novel method called discount focus subgroups (DFSG). The current paper demonstrates that DFSG is an alternative effective technique to improve requirements elicitation activity by addressing pitfalls and problems with existing group-based methods. The method is effective in several situations such as when the development team aims to minimize the costs of system development, large numbers of stakeholders need to be involved in large projects like enterprise systems (ERP), the system is novel and where no similar systems have been developed before.
Keywords: Requirements elicitation, systems analysis, group elicitation methods, focus group, JAD, brainstorming, software engineering, DFSG.
Requirements elicitation is the first stage of requirements engineering. It can be defined as a process of seeking, uncovering, acquiring, and elaborating requirements for information systems (Zowghi & Coulin 2005). It is essential and crucial for software/system development success. There is agreement among practitioners and researchers that poor requirements elicitation will cause serious errors, which might lead to system failure or extra time and costs if not discovered until the implementation stage (Mulla & Girase 2012; Avison & Fitzgerald 2006; Hickey & Davis 2003). The main aim of requirements elicitation is to understand stakeholders’ needs of the system. Requirements elicitation is mainly dependent on the people involved (Zhang 2007). Those involved have different backgrounds and different organizational and individual goals, positions, and personalities. They have different ways of understanding and expressing information and of communicating with others. They might not understand the process of system development. On the other hand, system analysts are likely to be unfamiliar with the application domain and business concepts (Avison & Fitzgerald 2006; Nuseibeh & Easterbrook 2000). This gap creates a communications barrier between the analyst and the domain experts and users. According to many researchers, the main challenge of requirements elicitation stems from poor and complex communication between stakeholders and analysts (Pa & Zin 2011; Aranda, Vizcaíno & Piattini 2010; Coughlan et al. 2003; Coughlan & Marcredie 2002). User-analyst communication is certainly an important part of requirements elicitation. The effectiveness of this communication depends on the communication styles, and these are determined by the elicitation techniques and methods that the system analyst selects.
There are many proposed methods and techniques used for requirements elicitation. Researchers have classified these into different categories, such as traditional, conversational, observational, agile, collaboration, analytical, and group-work techniques and methods (Arshad, Shah & Shahzad 2013; Zowghi & Coulin 2005; Nuseibeh & Easterbrook 2000). This paper will focus mainly on group-based ones. Prior research referred to many problems and shortfalls associated with existing group-based methods (e.g., focus groups, brainstorming, joint application development [JAD]) as illustrated in the next section. The current paper reviews existing techniques for requirements elicitation, with the emphasis on group-based methods and techniques, and proposes a new innovative group-based method (i.e., discount focus subgroups [DFSG]) as an alternative to overcome some of the pitfalls and problems with existing methods.
The remainder of this paper is organized as follows. Section 2 presents a review of the literature on existing requirements elicitation methods and techniques. Section 3 provides an overview of the DFSG method. Section 4 presents the rationale for, and the applicability of, the DFSG method for requirements elicitation. Section 5 provides a discussion on application of DFSG method and potential future work.
2 LITERATURE REVIEW OF REQUIREMENTS ELICITATION TECHNIQUES
Most of the methods and techniques used in requirements elicitation are derived from the social sciences (Zowghi & Coulin 2005), and this is expected as it is a human-based activity. Nuseibeh and Easterbrook (2000) developed a classification system according to the needs of a project. They divided the methods into six categories: 1) traditional techniques (e.g., questionnaires, interviews, and analysis of existing documentation); 2) group elicitation methods (e.g., brainstorming, focus groups, and JAD workshops); 3) prototyping (e.g., early versions of user interfaces); 4) contextual techniques (e.g., observations); 5) cognitive techniques (e.g., protocol analysis, laddering, card sorting and repertory grids); and 6) model-driven techniques (e.g., goal-based and scenario-based methods). In contrast, Arshad et al. (2013) classified them based on user involvement into traditional methodologies (e.g., interviews, questionnaire, ethnography, focus groups) and agile methods (e.g., extreme programming,
crystal methodology). In traditional methods, user involvement is not continuous throughout the whole process, whereas in agile methods, which are iterative, the users are involved throughout the entire development process in several meetings. Zhang (2007) distinguished between four types of elicitation methods according to the means of communication: conversational (e.g., interviews, workshop focus groups, and brainstorming), observational (e.g., observation and ethnography), analytics (e.g., documentations, requirements in reuse, laddering, card sorting, and repertory grids), and synthetic (e.g., scenarios, passive storyboards, prototyping, and contextual inquiry). Each type represents a specific interaction model between analysts and stakeholders. Selecting the appropriate requirements elicitation method/technique depends on several factors, such as the project environment, features of the technique, stakeholders’ characteristics, requirement sources, characteristics of the problem, and the solution domain (Hickey & Davis 2004; Anwar & Razali 2012). In this paper, we do not advocate the use of one particular method from any specific category, as in some cases multiple methods are preferred to elicit requirements at different stages and conditions. For example, focus groups, which belong to the conversational/group category, can be followed at a later stage of the system development process by a technique from the analysis category, such as prototype. The idea that there is no ideal technique or method in requirements elicitation applies to all situations (Hickey & Davis 2004; Davis & Hickey 2002; Glass 2002; Maiden & Rugg 1996). Multiple methods may sometimes complement each other, with the limitations of one approach being compensated for by the strengths of a number of methods (Anwar & Razali 2012; Zhang 2007; Sutcliffe 1997). In this paper, we focus on existing group-based methods. It is worth mentioning that the intent of this paper is not to provide an alternative method to other methods, such as prototyping, repertory grids, and model-driven techniques, in other categories. Thus, comparisons between the proposed DFSF method and existing methods from other categories are irrelevant. Instead, the proposed method is compared with others from the same category (i.e. group-based methods). It is important to mention that, except for group-based methods, a detailed description of all the other methods and techniques is not the intent of this paper. These are beyond the current paper’s scope and have already been intensively reviewed in the literature (Mulla, & Girase 2012; Zowghi & Coulin 2005; Zhang 2007; Tuunanen 2003; Nuseibeh & Easterbrook 2000; Maiden & Rugg 1996).
Among the various methods and techniques, we focus on group elicitation methods, also named conversational methods. These methods provide a means of verbal communication between two or more people to understand the problems and elicit the requirements. Methods in this category include focus groups, brainstorming, and JAD. This category also includes software tools, which automate the aforementioned original methods. They include group support systems workshops (McGoff et al. 1990) and global software development (Aranda et al. 2010). Both were developed to solve group communication problems by bringing efficiency and anonymity to group sessions of people located in different geographical locations. The use of many other automated methods has also been proposed, for example, E-JAD (Carmel et al. 1995), video conferencing of interviews (Lloyd et al. 2002), web-based focus groups (Farinha & Silva 2011), and electronic brainstorming (EBS) (Liikkanen et al. 2011). However, we do not discuss software/groupware tools in this paper because they merely enable and enhance the method that is used. In other words, these are not new methods per se. These software tools were built based on the principles and canons of the methods. We also consider that tools are different from methods/methodologies in the context of systems analysis and design domain.
McGraw and Harbison (1997) referred to several advantages of group-based methods. For example, they noted that sessions with a group help reveal multiple perspectives and lines of reasoning, thereby producing better collective information and more creative solutions than a single source. Turban and Aronson (1998) also highlighted the benefits of group methods, which are shown in Table 1.
Table 1. Potential benefits of group work (Turban & Aronson 1998, p. 351).
The following subsections discuss common face-to-face group elicitation methods and highlight their main pitfalls and problems.
2.1 Brainstorming
Brainstorming was originally developed by Alex Osborn in 1939 as a method for creative problem solving. He subsequently published his book *Applied Imagination* (1953), in which he provided systemic guidelines for applying brainstorming. Brainstorming is a process where participants from different stakeholder groups engage in informal discussion to rapidly generate as many ideas as possible without focusing on any one in particular (Zowghi & Coulin 2005). Leffingwell and Widrig (2000) divided brainstorming sessions into two phases: idea generation and idea reduction. The primary goal during idea generation is to produce as many ideas as possible. The principal aim during idea reduction is to analyze all the ideas generated. The idea reduction phase includes refining, ranking, and grouping. With the stakeholders’ consent, the most usable ideas will become requirements for the product (Robertson & Robertson 1999). One of the advantages of using brainstorming is that it promotes freethinking and expression and so-called “out-of-the-box” thinking and allows the discovery of new and innovative solutions to existing problems (Zowghi & Coulin 2005; Leffingwell & Widrig 2003). In brainstorming sessions, people are told that all ideas are acceptable no matter how crazy they may seem and that they must not slow the process down by criticizing or debating the merits of various ideas (Leffingwell & Widrig 2000). In fact, the main principle of brainstorming is deferring any judgment about the quality of the ideas (Osborn 1953), and it is sometimes referred to as “no criticism” of ideas. According to Osborn, another main principle of brainstorming is to focus on quantity rather than quality. This “imagination of ideas” might be seen as a challenge or a criticism of the method because it is difficult to keep focused and stay within the boundaries of the problem that should be solved (Jonasson 2012). However, it is not usually the intended purpose of brainstorming sessions to resolve major issues or make key decisions (Zowghi & Coulin 2005). The disadvantage of this method is that participants are not permitted to criticize or judge ideas (this comes later) because the sole focus is on generating ideas. Maiden and Robertson (2005) used brainstorming to stimulate creative ideas and to uncover requirements for air traffic management system, concluding that the overall process was successful but not in all workshop sessions. Liikkanen et al. (2011) recommended that a large number of participants is not advisable in brainstorming sessions. Studies found that when there were more than three participants around a table, there was a greater likelihood of them distracting each other and blocking the production of ideas by the person speaking or blocking others from thinking (Wilson 2006; Stroebe et al. 1992). This increased the risk of ideas being forgotten and the likelihood of free riding and increased pressure for social conformity. Diehl and Stroebe (1991, 1987) earlier confirmed this view in a review of 22 studies, demonstrating that group brainstorming produces fewer ideas than individual brainstorming, with the individual working alone.
2.2 Joint Application Development (JAD)
JAD was originally developed by IBM in the late 1970s (Jonasson 2012). It is an organized and structured technique for requirements elicitation (Maiden & Rugg 1996) where all key stakeholders, including sponsors, project managers, business users, and IT professionals (system analyst/software engineer), as well as the JAD session facilitator and scribe, are involved in discussing the requirements, analyzing these requirements, and designing user interfaces. The main aim of JAD is to build consensus and agreement among stakeholders about the system requirements (Jonasson 2012). JAD differs from brainstorming because it determines requirements during the design phase, after establishing the main goals of the system (Zowghi & Coulin 2005). Jonasson (2012) pointed out that the optimal number of participants in a JAD session is between five and 10. Liou and Chen (1993) suggested that the following formed the core of JAD sessions: a focused workshop facilitated by JAD leaders and scribes, users’ participation and management’s commitment, development of shared requirements and design specifications, application of structured procedures and methods, and an accelerated approach in a specific time frame. Jonasson (2012) pointed out that while the customer [i.e. user] is the most important person when gathering requirements, it is the facilitator who is most important to the success of JAD sessions. He also added that while the facilitator is the key to the success the JAD session, the scribe is the key to accurate documentation of everything of value. Poor facilitators will create chaos in a session and quickly lose control. He added “if at the end of three days of a JAD session, there is a blank piece of paper of the minutes of the meeting or a very short document, it probably means there is reliance on memory, which, even in the best cases, is unreliable” (p. was not numbered) Hence, JAD is highly dependent on a facilitator and a scribe. Christel and Kang (1992) recognized that all participants funnel their ideas through a facilitator or a recorder. Thus, the recorder may inadvertently impose an interpretation on the collected data not shared by the group. They pointed out that an ideal method would allow for the transparent capture of the information discussed in meetings and the efficient organization of this information. Liou and Chen (1993) also recognized some problems with JAD, such as the unequal involvement of JAD participants, with only the comments of the most vocal captured, and the limited ability of analysts to judge group consensus in real time.
2.3 Focus Groups
The focus group method emerged in social research in the 1950s (Templeton 1992), where a group of individuals are selected to discuss specific and focused issues based on their own perspective and experience. The recommended number of focus group participants should be within the range of 6–12 (Krueger 2000; Morgan & Scannell 1998; Kelley 1999). Focus groups are normally led by a moderator who should have communication and writing skills. Focus groups are similar to JAD sessions, but the group in the session acts in an advisory rather than a decision-making capacity, and the main outcome is not consensus as is the case in JAD (Jonasson 2012). Key factors in the success of group work are the harmony of the participants and the cohesion within the group. Stakeholders must feel comfortable and confident in speaking openly and freely (Zowghi & Coulin 2005). The homogeneity within the group is desirable in order to capitalize on people’s shared experiences (Kitzinger 1995). Morgan (1997, p. 35) suggested that “meeting with others whom they think of as possessing similar characteristics or levels of understanding about a given topic will be more appealing than meeting with those who are perceived to be different.” This method has been used for requirements elicitation with homogeneous and heterogeneous stakeholders. Nevertheless, these types of sessions can be difficult to organize due to the number of different stakeholders that may be involved in a project (Zowghi & Coulin 2005). In addition, there are subject to problems of dominant talkers and analysis costs (Farinha & Silva 2009).
Although there are many benefits of group interaction as discussed earlier, there are also disadvantages and limitations. The process of collaborative group work is often plagued with dysfunction known as process losses. According to Turban and Aronson (1998), there have been
attempts to improve the work of groups over the years. If some of the dysfunction could be eliminated or lessened, the benefits would be greatly enhanced. They referred to drawbacks of group work, such as inappropriate representation in the group, a tendency to repeat what has already been said, inappropriate influences (such as domination of time, opinions, or topics by one or few individuals, or fear of speaking), and time consuming to plan. Zowghi and Coulin (2005) also referred to limitations, including dominant participants, biased opinions, high logistic costs, and gathering stakeholders. Obviously, using group techniques for requirement elicitation requires the analyst not only to be proficient in applying the method but also to be proficient in group management, facilitation, and understanding group dynamics. Furthermore, these sessions demand more planning and follow-up and are more time consuming to translate and transcribe than methods with an individual stakeholder (McGraw & Harbison 1997). Moreover, existing methods (e.g., JAD, brainstorming sessions, and focus groups) lack the means to manage the information elicited from large numbers of stakeholders (Dheepa et al. 2013). The methods also fail to scale to big projects with hundreds of stakeholders, many stakeholders are omitted, and their requirements are overlooked (Lim & Finkelstein 2012). Taking into consideration all the aforementioned problems and pitfalls of existing group elicitation methods, the current paper introduces a new method for requirements elicitation that can effectively solve some of their drawbacks.
3 OVERVIEW OF DISCOUNT FOCUS SUBGROUPS (DFSG) METHOD
Halaweh (2013, 2014) developed a new innovative type of focus group called DFSG. He proposed using it as a qualitative research method to investigate IS/IT research phenomena. He listed three situations where this approach could be used: 1) when there are limited funds available for conducting research (monetary and human resources); 2) when researchers are investigating emerging topics (i.e., emerging technology) where it may be difficult to recruit research participants, particularly those who could provide insight and relevant information on the emerging research topic that could not be gained from one-on-one interviews or even focus groups with small numbers of participants; and 3) when a researcher wants to overcome challenges concerning the analysis of qualitative data as qualitative researcher typically spends considerable time transcribing every single recorded word. The latter process may result in hundreds of pages that are not entirely insightful or useful. Halaweh (2013) listed five methodological steps/principles for the application of DFSG (Table 2).
<table>
<thead>
<tr>
<th>Step</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>Utilize the limited resources available. Find participants from the work environment: in academia, students and instructors at the university; in industry, staff and workers from organizations. Both have various characteristics that are suitable for a large number of research topics. For example, university students are female or male, are of different ages, from different backgrounds, cultures and geographical areas, practice different religions, and some are professional workers in industry, have cell phones, and use the Internet.</td>
</tr>
<tr>
<td>2</td>
<td>Divide and assign roles. Divide the participants (the larger the number, the more numerous the insights and issues that emerge from the discussion) into subgroups and appoint one member of each subgroup as a research assistant/moderator to write notes and ideas (in the form of a list) from the subgroup discussions on paper, which will later be delivered to the researcher.</td>
</tr>
<tr>
<td>3</td>
<td>Avoid formality. Avoid using recorders and cameras to allow everyone to talk freely and spontaneously. Allow joking, debates, and fun. Avoiding formality increases participation. No one will be shy or judged by his/her speech and answers; rather, those who are unfamiliar with the topic or who have limited knowledge can pose questions and enrich the discussion. Having participants from the same environment (as indicated in step one) will facilitate the discussion and remove the formality, as the participants will know each other.</td>
</tr>
<tr>
<td>4</td>
<td>Open the discussion and document cross-discussions and debates among all subgroups that are not recorded by the subgroup leaders and that are derived from the interaction among the subgroups. Take the contribution from each subgroup in a circular round. Start with one idea/issue from each subgroup, then do another round to take another idea/issue, and allow intervention and debate from the other subgroups.</td>
</tr>
</tbody>
</table>
Consolidate and cluster lists of ideas written on paper by all subgroup leaders, as well as ideas written during open discussion and debating.
*Table 2. DFSG methodological steps/principles (Halaweh 2013)*
Halaweh (2013, 2014) pointed out that by applying the above steps, the data collection and analysis are carried out simultaneously, and therefore the researcher will not have to devote time later to transcribing each word and coding the keywords. The listed ideas from all the subgroups are usually ready for clustering and categorizing. Figure 1 shows a typical DFSG meeting in which the above steps are applied.

Halaweh showed that DFSG is considered innovative and different from traditional focus groups for several reasons. First, more than 12 participants (the maximum number suggested in the literature) can be involved, and the method remains effective, as they are divided into subgroups. He used 17 and 16 participants in two groups. This is different from all suggestions in the literature. He pointed out that the more participants involved, the more insight and discussion there will be. Using subgroups in one big group is a new way of including a large number of participants. He pointed out that this approach is needed for certain research topics that are new and emerging, as it is expected that some of the participants will be unfamiliar with the topic under investigation, but their role is to raise questions.
Another important aspect of DFSG is that it eliminates the costs of using voice/video recorders and of employing research assistants, as well as the time needed to transcribe each recorded word. Instead of transcribing irrelevant speeches, the researcher can focus on issues and themes and have participants record their own ideas. In addition, the cost of finding participants is reduced, as the participants are selected from the researcher’s environment or from one that can be easily accessed. Halaweh (2013, 2014) pointed out that DFSG is completely different from “discount usability engineering,” which was developed by Nielson (1989) in the human–computer interface field as a formative technique (http://www.nngroup.com/articles/discount-usability-20-years/). Halaweh developed his method in the context of qualitative research as a formative and/or summative data collection and analysis technique involving unique and different procedures. For example, using large groups and dividing the participants into subgroups differ completely from Nielson’s (1989) method, which recommends testing interface designs with a maximum of five participants. Other differences between the two methods include asking the participants to take notes and document their ideas (removing the need for transcriptions) and discussing unique items from each subgroup with the whole group.
Finally, based on research on emerging technology (new technology and its impact), Halaweh (2013, 2014) noted that the application of DFSG helps to promote awareness and learning among participants. He concluded that with emerging issues that are not common or widely known, it is acceptable to have some participants who are unfamiliar with the topic under investigation or who have little knowledge and the participants learn from the discussion. This approach is not supported by the traditional focus group method, which focuses only on objective of data collection.
4 RATIONALE FOR APPLICATION OF DFSG METHOD FOR REQUIREMENTS ELICITATION
In this section, we operationalize the concepts and principles of DFSG method for requirements elicitation, and justify using it as an alternative effective technique to improve requirements elicitation activity by addressing problems with existing group-based methods. Table 3 shows the corresponding terms of DFSG for requirements elicitation.
<table>
<thead>
<tr>
<th>Personnel</th>
<th>DFSG research method terms</th>
<th>Application for requirements elicitation</th>
</tr>
</thead>
</table>
| Appropriateness | Suitable for studying emerging IS/IT phenomena when participants have no/little knowledge about the subject under study | -Suitable for developing a novel and large systems (e.g. ERP)
-Unclear requirements
-Involving a large number of stakeholders |
| Design/layout | Dividing a big group into subgroups of participants based on, for example, expertise, demographic variables, or randomly | Dividing a big group into subgroups of users based on their business functional areas, interface views, user community, or randomly (when people are from one functional area) |
| Output | Data | System requirements |
| Key role | Researcher | System analyst |
Table 3. Equivalent terms of DFSG for requirement elicitation
In existing methods such as brainstorming, JAD, and focus group meetings, not all stakeholders are represented as the recommended numbers or optimal numbers for the meeting is between six and 12 participants at most. In certain large projects, such as enterprise systems (ERP), large numbers of stakeholders are required. Organizations might need to involve more people in the development process of the system. There might be a need for several people from each department or functional area within the organization to be involved. As existing methods do not involve large numbers of stakeholders, there is no guarantee that one or two people from each department are representative. Some people are not included on the assumption that they are not sufficiently knowledgeable or not suitable for political reasons, a process that might give rise to bias in the outcomes. In addition, some people simply may not be involved due to the limited number of permitted participants (at most 12). Furthermore, the inclusion of only a small number of individuals increases the risk of agenda setting by one or two participants. This can lead to the views and requirements of other stakeholders being overlooked or neglected. Therefore, involving a large number of stakeholders will better represent the views of different departments. Meeting people from cross-functional areas and allowing interactions between them is also important for such systems because system components and business processes are interdependent. In some cases, conducting separate meetings with people from each department is not appropriate when the organization’s business processes are interdependent and when understanding how work is done requires collaboration between individuals from cross-functional areas. Moreover, conducting separate meetings with several groups of people from different divisions increases costs. Involving large numbers of participants is also vital when the system is novel and where the requirements are not well defined or unclear. In such cases, including greater numbers of stakeholders will yield more opinions and insight about the system’s features and capabilities. Existing methods do not show how wide representation and large number of users can be involved. This can be addressed effectively by DFSG where a large number of stakeholders (exceeding 20) can be involved in one meeting and the group can be divided into subgroups of four to seven individuals from each organization’s department. Even if the number of participants exceeds 12, the method can still be effective because the session can be managed and organized by appointing one leader from each subgroup to record the participants’ needs of the system. The appointed leader can be nominated by the subgroup members or he/she can volunteer to lead the group. The written points represent their agreement on the requirements. These should be documented clearly by the leader on a sheet of paper.
For example, statements can be written in a standard form, such as “The main objectives of the system are to….” The requirement statements can be written as: “The system should/should not or the system should enable or should have …..”. Accurate documentation by the leader is very important in DFSG because no audio or video recorders are used to avoid formality. The absence of audio and video recording will ensure that people speak freely and without restriction and that people’s opinions will not be censored. In addition, all members of the subgroup will speak up in the discussion because they know each other well. In addition, those who may be shy about speaking out are likely to feel more comfortable in a smaller subgroup (four to six members) than in a large group.
In existing group methods, one or a few individuals may dominate a session. This problem does not occur in DFSG because discussions on requirements take place in a circular format, with each subgroup mentioning one main requirement and the others then commenting on it if they see fit, especially if they had already documented it and drawn attention to the matter. This avoids repetition of the same requirements by another subgroup. Given the interdependent nature of some business processes, the debate will assist in building a common understanding among all those involved in the process.
Existing group methods are totally dependent on a facilitator and a scribe. This can lead to potential bias in involving particular people or documenting certain points and requirements intentionally or unintentionally. To overcome this limitation, in DFSG sessions, the users play those roles and document their needs of the system. DFSG can help to ensure transparency and reduce the possibility of misinterpretation of requirements written by facilitators, scribes, or moderators in other methods. The use of DFSG also reduces costs by removing the need to hire facilitators, scribes, and moderators and the need to transcribe recorded meetings and analyze the findings later.
By applying DFSG, the analyst will not have to spend time analyzing the requirements as they are written in a form ready for clustering, categorization, and conversion into standard models (e.g., ERD or UML). The analyst applies clustering on collected requirements to identify the functional and nonfunctional requirements. Applying clustering in collected requirements ensures that the analyst does not enforce predefined classes of requirements. Rather, they emerge from the written requirements. The proposed system will satisfy stakeholders because those involved document what they want rather than what the analyst, facilitator, or scribe want. This avoids errors due to misunderstanding or misinterpretation being translated into the system.
Based on the above, DFSG can be used for requirements elicitation when 1) the development team aims to minimize the costs and budget in system development; 2) when large numbers of stockholders need to be involved, not only one or two representatives from each department within an organization; (or more than 12 participants (if the participants are homogeneous from one division) the maximum number of the whole group that is recommended by the existing methods); 3) when the system is novel and where no similar systems have been developed and when requirements are not clear or users are not familiar with similar systems; and 4) when the development team needs to be focused on systems requirements, not something else. DFSG removes time normally spent on transcribing and analyzing stakeholders’ needs, a process that takes a long time, and focuses on specific needs rather than unrelated or irrelevant points.
The following table shows the improvements provided by DFSG in comparison with existing methods.
<table>
<thead>
<tr>
<th>Improvements</th>
<th>Focus group</th>
<th>Brainstorming</th>
<th>JAD</th>
<th>DFSG</th>
</tr>
</thead>
<tbody>
<tr>
<td>Wide representation of stakeholders by including large numbers</td>
<td>√</td>
<td>√</td>
<td>√</td>
<td>√√</td>
</tr>
<tr>
<td>Reduced costs and time by removing the need to transcribe and to document the requirements</td>
<td>√</td>
<td>√</td>
<td>√</td>
<td>√√</td>
</tr>
<tr>
<td>Dependency on stakeholders rather than a facilitator or scribe, thereby avoiding bias, misunderstanding, and misinterpretations</td>
<td>√</td>
<td>√</td>
<td>√</td>
<td>√√</td>
</tr>
<tr>
<td>Applicable to novel systems with unclear and unknown requirements (inclusion of a large number of participants means more ideas, thoughts, and insights about system functionality and capabilities)</td>
<td>√</td>
<td>√</td>
<td>√</td>
<td>√√</td>
</tr>
<tr>
<td>Low possibility of domination</td>
<td>√</td>
<td>√</td>
<td>√</td>
<td>√√</td>
</tr>
</tbody>
</table>
Table 4. Improvements provided by DFSG in comparison with existing methods.
1) √√ technique strongly recognizes the issue and provides a means to deal with it
2) √ technique does not address the issue at all or provides very little support for it
5 DISCUSSION AND CONCLUSION
This paper described the use of the DFSG method for requirements elicitation to solve problems encountered with existing face-to-face group based methods. DFSG improves requirements elicitation activity by solving several problems. These include 1) limited user participation and representation; 2) costs of conducting meetings, including the costs of moderators, scribes, and facilitators who are required to manage the sessions and document requirements, and the costs of using recorders and transcribing recorded meetings; 3) biases in opinions and documentation by the facilitator or the scribe; 4) individual opinion domination; 5) conflicts among the participants; and 6) time to gather and analyze requirements.
Unlike other methods, DFSG does not require developing software tools (i.e. e-DFSG), as does E-JAD or EBS. The essence of the method is to involve large numbers of stakeholders in the meeting, something that is not supported by existing methods (e.g., focus groups, brainstorming, and JAD) without the use of software tools. In addition, the verbal face-to-face discussions in DFSG assist in building consensus and resolving conflicts. This is achieved at two levels: through discussion among subgroup members first and then through open discussion between all the subgroups. This method can be more effective in situations where it is costly to use or purchase software tools.
Furthermore, the method is more effective than e-methods because face-to-face discussion is required, making it easier to resolve communication problems, such as misinterpretation and misunderstanding, that are likely to appear when software tools are used. DFSG overcomes such problems because issues and viewpoints are clarified in the course of the meeting. However, the DFSG method is not suitable for stakeholders distributed in different geographical locations.
The DFSG method is economical compared with other methods. It is important to mention that the word “discount” in the name of the method does not mean producing lower quantity or quality requirements: Rather, discount means reducing requirements elicitation costs in a smart way. In reality, IT vendors or business organizations that develop systems want to minimize costs as much as possible. The DFSG method is economical, and this helps project managers to reduce costs when the project budget is set. This reduction is achieved through several ways as pointed out earlier.
The DFSG method can help to minimize biases that can arise with the use of facilitators, moderators, or scribes because they are not required. In addition, the method limits potential misunderstandings and misinterpretation that can occur due to them being unfamiliar with the process of software development and the business domain. In DFSG, they are replaced by the users and the analyst, both of whom have very active roles in the process.
Unlike other methods, DFSG will not produce a large volume of data because the stakeholders’ requirements are already refined, focused and specified clearly in the session and delivered to the system analyst on sheets. As no video or audio records are used, there are no data to transcribe unlike traditional methods, which usually produce large amounts of data. Transcribing these data can take a long time and potentially result in hundreds of pages. The gathering of a large amount of data should not be the purpose per se; Rather, it is the quality of the collected data that matters. One DFSG session might produce 10 pages of clear, direct, structured, and specified requirements (because they are written by the participants themself and focused on the core issues. See for example outputs that were generated from a DFSG session in Halaweh’s paper (2014)) that are equivalent perhaps to 50 transcribed pages of recorded JAD or focus group meeting. These 50 pages will include both the requested requirements and other irrelevant and insignificant information, including introductions, participants’ stories, cases, jokes, conflicts, and interrupted talks. Therefore, transcribing the meeting and then extracting the requirements from the transcripts require time and effort, neither of which are required in DFSG.
The DFSG method can be used in combination with other elicitation methods that focus on different aspects of the stakeholders’ requirements and at different stages of the development process, such as questionnaires and prototypes. As mentioned before, there is no optimal method for all situations, and the use of multiple methods in some situations is necessary.
The current paper provided a conceptual justification for the use of this method and its applicability to requirements elicitation. Thus, complementing this work with practical implementation will increase the validity of the method and assist in generalizing the suitability of the method for requirements elicitation. Another future area of research is to develop evaluation criteria to ensure the quality of the data collected with this method, such as their accuracy, completeness, and validity.
References
|
{"Source-Url": "https://aisel.aisnet.org/cgi/viewcontent.cgi?referer=&httpsredir=1&article=1040&context=pacis2015", "len_cl100k_base": 8369, "olmocr-version": "0.1.50", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 38957, "total-output-tokens": 11488, "length": "2e13", "weborganizer": {"__label__adult": 0.0004172325134277344, "__label__art_design": 0.0008111000061035156, "__label__crime_law": 0.0003674030303955078, "__label__education_jobs": 0.01250457763671875, "__label__entertainment": 0.00011742115020751952, "__label__fashion_beauty": 0.0002548694610595703, "__label__finance_business": 0.0014524459838867188, "__label__food_dining": 0.00043654441833496094, "__label__games": 0.0007410049438476562, "__label__hardware": 0.0007472038269042969, "__label__health": 0.0006041526794433594, "__label__history": 0.0004570484161376953, "__label__home_hobbies": 0.0001443624496459961, "__label__industrial": 0.00055694580078125, "__label__literature": 0.0007829666137695312, "__label__politics": 0.00031948089599609375, "__label__religion": 0.0004973411560058594, "__label__science_tech": 0.04718017578125, "__label__social_life": 0.00021219253540039065, "__label__software": 0.0136260986328125, "__label__software_dev": 0.91650390625, "__label__sports_fitness": 0.0002593994140625, "__label__transportation": 0.0005559921264648438, "__label__travel": 0.0002541542053222656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 50342, 0.0323]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 50342, 0.41405]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 50342, 0.91671]], "google_gemma-3-12b-it_contains_pii": [[0, 746, false], [746, 2074, null], [2074, 6303, null], [6303, 10777, null], [10777, 14179, null], [14179, 18721, null], [18721, 23419, null], [23419, 26867, null], [26867, 31888, null], [31888, 35613, null], [35613, 39489, null], [39489, 43501, null], [43501, 47135, null], [47135, 50342, null]], "google_gemma-3-12b-it_is_public_document": [[0, 746, true], [746, 2074, null], [2074, 6303, null], [6303, 10777, null], [10777, 14179, null], [14179, 18721, null], [18721, 23419, null], [23419, 26867, null], [26867, 31888, null], [31888, 35613, null], [35613, 39489, null], [39489, 43501, null], [43501, 47135, null], [47135, 50342, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 50342, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 50342, null]], "pdf_page_numbers": [[0, 746, 1], [746, 2074, 2], [2074, 6303, 3], [6303, 10777, 4], [10777, 14179, 5], [14179, 18721, 6], [18721, 23419, 7], [23419, 26867, 8], [26867, 31888, 9], [31888, 35613, 10], [35613, 39489, 11], [39489, 43501, 12], [43501, 47135, 13], [47135, 50342, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 50342, 0.13534]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
592e66e721f84cb0f616fc31cb512ad20347b9c9
|
[REMOVED]
|
{"Source-Url": "https://iris.unito.it/retrieve/handle/2318/1597172/299237/2016_jsupe_stencil_pp_4aperto.pdf", "len_cl100k_base": 10374, "olmocr-version": "0.1.53", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 46372, "total-output-tokens": 12258, "length": "2e13", "weborganizer": {"__label__adult": 0.0005159378051757812, "__label__art_design": 0.0008344650268554688, "__label__crime_law": 0.0004727840423583984, "__label__education_jobs": 0.0005459785461425781, "__label__entertainment": 0.00015914440155029297, "__label__fashion_beauty": 0.00023066997528076172, "__label__finance_business": 0.0002701282501220703, "__label__food_dining": 0.00041556358337402344, "__label__games": 0.0014028549194335938, "__label__hardware": 0.00527191162109375, "__label__health": 0.0006504058837890625, "__label__history": 0.0005040168762207031, "__label__home_hobbies": 0.00017023086547851562, "__label__industrial": 0.000903606414794922, "__label__literature": 0.0003304481506347656, "__label__politics": 0.00034809112548828125, "__label__religion": 0.0009002685546875, "__label__science_tech": 0.19091796875, "__label__social_life": 8.946657180786133e-05, "__label__software": 0.010986328125, "__label__software_dev": 0.7822265625, "__label__sports_fitness": 0.00043129920959472656, "__label__transportation": 0.0010528564453125, "__label__travel": 0.00031685829162597656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 45208, 0.06873]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 45208, 0.28596]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 45208, 0.85172]], "google_gemma-3-12b-it_contains_pii": [[0, 892, false], [892, 1398, null], [1398, 2659, null], [2659, 5984, null], [5984, 8941, null], [8941, 11876, null], [11876, 15010, null], [15010, 17899, null], [17899, 21010, null], [21010, 24409, null], [24409, 26749, null], [26749, 29259, null], [29259, 31257, null], [31257, 34380, null], [34380, 37161, null], [37161, 41308, null], [41308, 45208, null]], "google_gemma-3-12b-it_is_public_document": [[0, 892, true], [892, 1398, null], [1398, 2659, null], [2659, 5984, null], [5984, 8941, null], [8941, 11876, null], [11876, 15010, null], [15010, 17899, null], [17899, 21010, null], [21010, 24409, null], [24409, 26749, null], [26749, 29259, null], [29259, 31257, null], [31257, 34380, null], [34380, 37161, null], [37161, 41308, null], [41308, 45208, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 45208, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 45208, null]], "pdf_page_numbers": [[0, 892, 1], [892, 1398, 2], [1398, 2659, 3], [2659, 5984, 4], [5984, 8941, 5], [8941, 11876, 6], [11876, 15010, 7], [15010, 17899, 8], [17899, 21010, 9], [21010, 24409, 10], [24409, 26749, 11], [26749, 29259, 12], [29259, 31257, 13], [31257, 34380, 14], [34380, 37161, 15], [37161, 41308, 16], [41308, 45208, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 45208, 0.18182]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
e8df66d02ab3a63bc8d5d03ea2239f24915130ce
|
THE ULTIMATE PYTHON HANDBOOK
CodeWithHarry
Welcome to the “Ultimate Python Programming Handbook,” your comprehensive guide to mastering Python programming. This handbook is designed for beginners and anyone looking to strengthen their foundational knowledge of Python, a versatile and user-friendly programming language.
PURPOSE AND AUDIENCE
This handbook aims to make programming accessible and enjoyable for everyone. Whether you’re a student new to coding, a professional seeking to enhance your skills, or an enthusiast exploring Python, this handbook will definitely be helpful. Python’s simplicity and readability make it an ideal starting point for anyone interested in programming.
STRUCTURE AND CONTENT
The handbook is divided into clear, concise chapters, each focused on a specific aspect of Python:
- **Fundamental Concepts:** Start with the basics, such as installing Python and writing your first program.
- **Practical Examples:** Illustrative examples and sample code demonstrate the application of concepts.
- **Hands-On Exercises:** End-of-chapter exercises reinforce learning and build confidence.
- **Additional Resources:** References to official Python documentation for deeper exploration.
WHY PYTHON?
Python is known for its simplicity and readability, making it perfect for beginners. It is a high-level, interpreted language with a broad range of libraries and frameworks, supporting applications in web development, data analysis, AI, and more. Python’s versatility and ease of use make it a valuable tool for both novice and experienced programmers.
ACKNOWLEDGEMENTS
I extend my gratitude to the educators, programmers, and contributors who have shared their knowledge and insights, shaping the content of this handbook. Special thanks to all the students watching my content on YouTube and Python community for maintaining a supportive and inspiring environment for learners worldwide.
CONCLUSION
Learning programming can be both exciting and challenging. The “Ultimate Python Programming Handbook” aims to make your journey smooth and rewarding. Watch my video along with following this handbook for optimal learning. Let this guide be your stepping stone to success in the world of programming.
CONTENTS
PREFACE .................................................................................................................................................. 1
Purpose and Audience .............................................................................................................................. 1
Structure and Content .............................................................................................................................. 1
Why Python? .......................................................................................................................................... 1
Acknowledgements ................................................................................................................................. 1
Conclusion ............................................................................................................................................... 1
Contents .................................................................................................................................................. 2
Python programming Handbook ........................................................................................................... 6
What is Programming? ............................................................................................................................... 6
What is Python? ....................................................................................................................................... 6
Features of Python ................................................................................................................................ 6
Installation ............................................................................................................................................. 6
Chapter 1 – Modules, Comments & pip ................................................................................................. 7
Modules .................................................................................................................................................. 7
pip ......................................................................................................................................................... 7
Types of Modules .................................................................................................................................. 7
Using python as a calculator ................................................................................................................... 7
Comments ............................................................................................................................................... 7
Types of Comments ................................................................................................................................. 7
Chapter 1 – Practice Set ......................................................................................................................... 9
Chapter 2 – Variables and Datatypes .................................................................................................. 10
Data Types ............................................................................................................................................ 10
Rules for choosing an identifier ............................................................................................................. 10
Operators in Python ............................................................................................................................... 10
type() function and typecasting .......................................................................................................... 11
input() Function .................................................................................................................................. 11
Chapter 2 – Practice Set ....................................................................................................................... 12
Chapter 3 – Strings ............................................................................................................................... 13
String Slicing ........................................................................................................................................ 13
Slicing With Skip Value ......................................................................................................................... 14
String Functions ................................................................................................................................... 14
Escape Sequence Characters .................................................................................................................. 15
Chapter 3 – Practice Set ....................................................................................................................... 16
Chapter 4 – Lists And Tuples ............................................................................................................... 17
List Indexing ......................................................................................................................................... 17
Types Definitions in Python ................................................................. 48
Advanced Type Hints ........................................................................ 48
Match Case ......................................................................................... 49
Dictionary Merge & Update Operators ............................................... 49
Exception handling in Python .......................................................... 50
Raising Exceptions ........................................................................... 50
try with else clause ................................................................. 50
try with finally .......................................................................... 51
If __name__ == '__main__' in python ............................................. 51
The global keyword ................................................................. 51
enumerate function in python .................................................... 51
List comprehensions .................................................................... 51
Chapter 12 – Practice set ............................................................... 52
Chapter 13 – Advanced Python 2 .................................................... 53
Virtual environment ....................................................................... 53
Installation .............................................................................. 53
pip freeze command .................................................................... 53
Lambda functions ...................................................................... 53
join method (strings) ............................................................... 54
format method (strings) ........................................................... 54
Map, Filter & Reduce .................................................................... 54
Chapter 13 – Practice Set ............................................................... 56
MEGA Project 1: Jarvis ..................................................................... 57
Features ..................................................................................... 57
Workflow ................................................................................... 57
Libraries Used ............................................................................. 58
Mega Project 2: Auto Reply AI Chatbot ........................................ 59
Description ............................................................................... 59
Features ..................................................................................... 59
Workflow ................................................................................... 59
Libraries Used ............................................................................. 60
WHAT IS PROGRAMMING?
Just like we use Hindi or English to communicate with each other, we use a programming language like Python to communicate with the computer.
Programming is a way to instruct the computer to perform various tasks.
WHAT IS PYTHON?
Python is a simple and easy to understand language which feels like reading simple English. This Pseudo code nature is easy to learn and understandable by beginners.
FEATURES OF PYTHON
- Easy to understand = Less development time
- Free and open source
- High level language
- Portable: Works on Linux / Windows / Mac.
- Fun to work with!
INSTALLATION
Python can be easily installed from python.org. When you click on the download button, python can be installed right after you complete the setup by executing the file for your platform.
CHAPTER 1 – MODULES, COMMENTS & PIP
Let’s write our very first python program. Create a file called hello.py and paste the below code in it.
```python
print("hello world") # print is a function (more later)
```
Execute this file (.py file) by typing python hello.py and you will see Hello World printed on the screen.
### MODULES
A module is a file containing code written by somebody else (usually) which can be imported and used in our programs.
### PIP
Pip is the package manager for python. You can use pip to install a module on your system.
```bash
pip install flask # Installs Flask Module
```
### TYPES OF MODULES
There are two types of modules in Python.
1. Built in Modules (Preinstalled in Python)
2. External Modules (Need to install using pip)
Some examples of built in modules are os, random etc.
Some examples of external modules are tensorflow, flask etc.
### USING PYTHON AS A CALCULATOR
We can use python as a calculator by typing “python” + ` on the terminal.
This opens **REPL** or Read Evaluate Print Loop.
### COMMENTS
Comments are used to write something which the programmer does not want to execute. This can be used to mark author name, date etc.
### TYPES OF COMMENTS
There are two types of comments in python.
1. Single Line Comments: To write a single line comment just add a ‘#’ at the start of the line.
```bash
# This is a Single-Line Comment
```
2. Multiline Comments: To write multi-line comments you can use ‘#’ at each line or you can use the multiline string ("""")
```
"""This is an amazing example of a Multiline comment!"
```
"""This is an amazing example of a Multiline comment!"
```
CHAPTER 1 – PRACTICE SET
1. Write a program to print Twinkle twinkle little star poem in python.
2. Use REPL and print the table of 5 using it.
3. Install an external module and use it to perform an operation of your interest.
4. Write a python program to print the contents of a directory using the os module. Search online for the function which does that.
5. Label the program written in problem 4 with comments.
A variable is the name given to a memory location in a program. For example.
```
a = 30 # variables = container to store a value.
b = "harry" # keywords = reserved words in python
с = 71.22 # identifiers = class/function/variable name
```
## DATA TYPES
Primarily these are the following data types in Python:
1. Integers
2. Floating point numbers
3. Strings
4. Booleans
5. None
Python is a fantastic language that automatically identifies the type of data for us.
```
a = 71 # identifies a as class <int>
b = 88.44 # identifies b as class <float>
name = "harry" # identifies name as class <str>
```
### RULES FOR CHOOSING AN IDENTIFIER
- A variable name can contain alphabets, digits, and underscores.
- A variable name can only start with an alphabet and underscores.
- A variable name can’t start with a digit.
- No while space is allowed to be used inside a variable name.
Examples of a few variable names are: harry, one8, seven, _seven etc.
### OPERATORS IN PYTHON
Following are some common operators in python:
1. Arithmetic operators: +, -, *, / etc.
2. Assignment operators: =, +=, -= etc.
3. Comparison operators: ==, >, >=, <, != etc.
4. Logical operators: and, or, not.
TYPE() FUNCTION AND TYPECASTING.
The `type()` function is used to find the data type of a given variable in Python.
```python
da = 31
print(type(a)) # class <int>
b = "31"
print(type(b)) # class <str>
```
A number can be converted into a string and vice versa (if possible).
There are many functions to convert one data type into another.
```python
str(31) => "31" # integer to string conversion
int("32") => 32 # string to integer conversion
float(32) => 32.0 # integer to float conversion
```
... and so on.
Here "31" is a string literal and 31 a numeric literal.
INPUT () FUNCTION
This function allows the user to take input from the keyboard as a string.
```python
A = input("enter name") # if a is "harry", the user entered harry
```
It is important to note that the output of input is always a string (even if a number is entered).
1. Write a python program to add two numbers.
2. Write a python program to find remainder when a number is divided by z.
3. Check the type of variable assigned using input () function.
4. Use comparison operator to find out whether ‘a’ given variable a is greater than ‘b’ or not. Take a = 34 and b = 80
5. Write a python program to find an average of two numbers entered by the user.
6. Write a python program to calculate the square of a number entered by the user.
String is a data type in python.
String is a sequence of characters enclosed in quotes.
We can primarily write a string in these three ways.
```python
a = 'harry' # Single quoted string
b = "harry" # Double quoted string
c = '''harry''' # Triple quoted string
```
**STRING SLICING**
A string in python can be sliced for getting a part of the strings.
Consider the following string:
```
Name="harry" => Length = 5
```
The index in a sting starts from 0 to (length -1) in Python. In order to slice a string, we use the following syntax:
```python
sl = name [ind_start: ind_end]
```
- **first index included**
- **last index is not included**
- `sl [0:3]` returns "har" — characters from 0 to 3
- `sl [1:3]` returns "ar" — characters from 1 to 3
**Negative Indices:** Negative indices can also be used as shown in the figure above. -1 corresponds to the (length - 1) index, -2 to (length - 2).
SLICING WITH SKIP VALUE
We can provide a skip value as a part of our slice like this:
```python
word = "amazing"
word[1: 6: 2] # "mzn"
```
Other advanced slicing techniques:
```python
Word = "amazing"
Word = [:7] # word [0:7] - 'amazing'
Word = [0:] # word [0:7] - 'amazing'
```
STRING FUNCTIONS
Some of the commonly used functions to perform operations on or manipulate strings are as follows. Let us assume there is a string ‘str’ as follows:
```python
str = 'harry'
```
Now when operated on this string ‘str’, these functions do the following:
1. `len ()` function – This function returns the length of the strings.
```python
str = "harry"
print(len(str)) # Output: 5
```
2. `String.endswith("rry")` – This function tells whether the variable string ends with the string "rry" or not. If string is "harry", it returns true for "rry" since Harry ends with rry.
```python
str = "harry"
print(str.endswith("rry")) # Output: True
```
3. `string.count("c")` – counts the total number of occurrences of any character.
```python
str = "harry"
count = str.count("r")
print(count) # Output: 2
```
4. the first character of a given string.
```python
str = "harry"
capitalized_string = str.capitalize()
print(capitalized_string) # Output: "Harry"
```
5. `string.find(word)` – This function finds a word and returns the index of first occurrence of that word in the string.
```python
str = "harry"
```
index = str.find("rr")
print(index) # Output: 2
6. string.replace (old word, new word ) – This function replace the old word with new word in the entire string.
str = "harry"
replaced_string = str.replace("r", "l")
print(replaced_string) # Output: "hally"
ESCAPE SEQUENCE CHARACTERS
Sequence of characters after backslash "\" → Escape Sequence characters
Escape Sequence characters comprise of more than one character but represent one character when used within the strings.
Example: \n, \t, \', \" etc.
newline Tab Singlequote backslash
1. Write a python program to display a user entered name followed by Good Afternoon using input () function.
2. Write a program to fill in a letter template given below with name and date.
```python
letter = ''
Dear <|Name|>,
You are selected!
<|Date|>
```
3. Write a program to detect double space in a string.
4. Replace the double space from problem 3 with single spaces.
5. Write a program to format the following letter using escape sequence characters.
```python
letter = "Dear Harry, this python course is nice. Thanks!"
```
Python lists are containers to store a set of values of any data type.
```python
friends= ["apple","akash","rohan",7,false]
```
**LIST INDEXING**
A list can be indexed just like a string.
```python
l1 = [7,9,"harry"]
l1[0] # 7
l1[1] # 9
l1[70] # error
l1[0:2] # [7,9] #list slicing
```
**LIST METHODS.**
Consider the following list:
```python
l1 = [1,8,7,2,21,15]
```
- `l1.sort()`: updates the list to `[1,2,7,8,15,21]`
- `l1.reverse()`: updates the list to `[15,21,2,7,8,1]`
- `l1.append(8)`: adds 8 at the end of the list
- `l1.insert(3,8)`: This will add 8 at 3 index
- `l1.pop(2)`: Will delete element at index 2 and return its value.
- `l1.remove(21)`: Will remove 21 from the list.
**TUPLES IN PYTHON**
A tuple is an immutable data type in python.
```python
a = () # empty tuple
a = (1,) # tuple with only one element needs a comma
a = (1,7,2) # tuple with more than one element
```
**TUPLE METHODS**
Consider the following tuple.
a = (1, 7, 2)
- a.count (1): a count (1) will return number of times 1 occurs in a.
- a.index (1) will return the index of first occurrence of 1 in a.
1. Write a program to store seven fruits in a list entered by the user.
2. Write a program to accept marks of 6 students and display them in a sorted manner.
3. Check that a tuple type cannot be changed in python.
4. Write a program to sum a list with 4 numbers.
5. Write a program to count the number of zeros in the following tuple:
\[a = (7, 0, 8, 0, 0, 9)\]
Dictionary is a collection of keys-value pairs.
**Syntax:**
```python
a = {
"key": "value",
"harry": "code",
"marks": "100",
"list": [1, 2, 9]
}
print(a["key"]): # Output: "value"
print(a["list"]): # Output: [1, 2, 9]
```
**PROPERTIES OF PYTHON DICTIONARIES**
1. It is unordered.
2. It is mutable.
3. It is indexed.
4. Cannot contain duplicate keys.
**DICTIONARY METHODS**
Consider the following dictionary.
```python
a={"name":"harry",
"from":"india",
"marks":[92,98,96]}
```
- `a.items()`: Returns a list of (key,value)tuples.
- `a.keys()`: Returns a list containing dictionary's keys.
- `a.update({"friends":})`: Updates the dictionary with supplied key-value pairs.
- `a.get("name")`: Returns the value of the specified keys (and value is returned eg."harry" is returned here).
More methods are available on [docs.python.org](https://docs.python.org)
**SETS IN PYTHON.**
Set is a collection of non-repetitive elements.
```python
s = set() # no repetition allowed!
s.add(1)
s.add(2) # or set ={1,2}
```
If you are a programming beginner without much knowledge of mathematical operations on sets, you can simply look at sets in python as data types containing unique values.
**PROPERTIES OF SETS**
1. Sets are unordered => Element’s order doesn’t matter
2. Sets are unindexed => Cannot access elements by index
3. There is no way to change items in sets.
4. Sets cannot contain duplicate values.
**OPERATIONS ON SETS**
Consider the following set:
```
set = {1,8,2,3}
```
- `len(set)`: Returns 4, the length of the set
- `set.remove(8)`: Updates the set `set` and removes 8 from `set`.
- `set.pop()`: Removes an arbitrary element from the set and return the element removed.
- `set.clear()`: empties the set `set`.
- `set.union({8,11})`: Returns a new set with all items from both sets. `{1,8,2,3,11}`.
- `set.intersection({8,11})`: Return a set which contains only item in both sets `{8}`.
\[ R2 = A \cap B \]
\[ R1 + R2 + R3 = A \cup B \]
\[ R1 + R3 = A \Delta B \]
\[ R1 = A - B \]
\[ R3 = B - A \]
1. Write a program to create a dictionary of Hindi words with values as their English translation. Provide user with an option to look it up!
2. Write a program to input eight numbers from the user and display all the unique numbers (once).
3. Can we have a set with 18 (int) and '18' (str) as a value in it?
4. What will be the length of following set s:
```python
s = set()
s.add(20)
s.add(20.0)
s.add('20') # length of s after these operations?
```
5. What is the type of 's'?
6. Create an empty dictionary. Allow 4 friends to enter their favorite language as value and use key as their names. Assume that the names are unique.
7. If the names of 2 friends are same; what will happen to the program in problem 6?
8. If languages of two friends are same; what will happen to the program in problem 6?
9. Can you change the values inside a list which is contained in set S?
```python
s = {8, 7, 12, "Harry", [1,2]}
```
CHAPTER 6 – CONDITIONAL EXPRESSION
Sometimes we want to play PUBG on our phone if the day is Sunday.
Sometimes we order Ice Cream online if the day is sunny.
Sometimes we go hiking if our parents allow.
All these are decisions which depend on a condition being met.
In python programming too, we must be able to execute instructions on a condition(s) being met.
This is what conditionals are for!
IF ELSE AND ELIF IN PYTHON
If else and elif statements are a multiway decision taken by our program due to certain conditions in our code.
Syntax:
```python
if (condition1): # if condition1 is True
print("yes")
elif(condition2): # if condition2 is True
print("no")
else: # otherwise
print("maybe")
```
CODE EXAMPLE.
```python
a=22
if(a>9):
print("greater")
else:
print("lesser")
```
Quick Quiz: Write a program to print yes when the age entered by the user is greater than or equal to 18.
RELATIONAL OPERATORS
Relational Operators are used to evaluate conditions inside the if statements. Some examples of relational operators are:
`==`: equals.
`>=`: greater than/equal to.
`<=`: lesser than/equal to.
LOGICAL OPERATORS
In python logical operators operate on conditional statements. For Example:
- `and` – true if both operands are true else false.
- `or` – true if at least one operand is true or else false.
- `not` – inverts true to false & false to true.
ELIF CLAUSE
`elif` in python means [else if]. An if statements can be chained together with a lot of these `elif` statements followed by an else statement.
```python
if (condition1):
#code
elif (condition2): # this ladder will stop once a condition in an if or elif is met.
#code
elif (condition3):
#code
else:
#code
```
IMPORTANT NOTES:
1. There can be any number of `elif` statements.
2. Last else is executed only if all the conditions inside `elifs` fail.
1. Write a program to find the greatest of four numbers entered by the user.
2. Write a program to find out whether a student has passed or failed if it requires a total of 40% and at least 33% in each subject to pass. Assume 3 subjects and take marks as an input from the user.
3. A spam comment is defined as a text containing following keywords: “Make a lot of money”, “buy now”, “subscribe this”, “click this”. Write a program to detect these spams.
4. Write a program to find whether a given username contains less than 10 characters or not.
5. Write a program which finds out whether a given name is present in a list or not.
6. Write a program to calculate the grade of a student from his marks from the following scheme:
- 90 – 100 => Ex
- 80 – 90 => A
- 70 – 80 => B
- 60 – 70 => C
- 50 – 60 => D
- <50 => F
7. Write a program to find out whether a given post is talking about “Harry” or not.
CHAPTER 7 – LOOPS IN PYTHON
Sometimes we want to repeat a set of statements in our program. For instance: Print 1 to 1000.
Loops make it easy for a programmer to tell the computer which set of instructions to repeat and how!
TYPES OF LOOPS IN PYTHON
Primarily there are two types of loops in python.
- while loops
- for loops
We will look into these one by one.
WHILE LOOP
**Syntax:**
```python
while (condition): # The block keeps executing until the condition is true
# Body of the loop
```
In while loops, the condition is checked first. If it evaluates to true, the body of the loop is executed otherwise not!
If the loop is entered, the process of [condition check & execution] is continued until the condition becomes False.
**Quick Quiz:** Write a program to print 1 to 50 using a while loop.
**Example:**
```python
i = 0
while i < 5: # print "Harry" - 5 times!
print("Harry")
i = i + 1
```
**Note:** If the condition never become false, the loop keeps getting executed.
**Quick Quiz:** Write a program to print the content of a list using while loops.
FOR LOOP
A for loop is used to iterate through a sequence like list, tuple, or string [iterables]
**Syntax:**
```python
l = [1, 7, 8]
for item in l:
print(item) # prints 1, 7 and 8
```
RANGE FUNCTION IN PYTHON
The `range()` function in python is used to generate a sequence of number.
We can also specify the start, stop and step-size as follows:
```python
range(start, stop, step_size)
# step_size is usually not used with range()
```
AN EXAMPLE DEMONSTRATING RANGE () FUNCTION.
```python
for i in range(0, 7): # range(7) can also be used
print(i) # prints 0 to 6
```
FOR LOOP WITH ELSE
An optional else can be used with a for loop if the code is to be executed when the loops exhausts.
**Example:**
```python
l= [1, 7, 8]
for item in l:
print(item)
else:
print("done") # this is printed when the loop exhausts!
```
**Output:**
```
1
7
8
done
```
THE BREAK STATEMENT
‘break’ is used to come out of the loop when encountered. It instructs the program to – exit the loop now.
Example:
```python
for i in range(0, 80):
print(i) # this will print 0,1,2 and 3
if i == 3
break
```
THE CONTINUE STATEMENT
‘continue’ is used to stop the current iteration of the loop and continue with the next one. It instructs the Program to “skip this iteration”.
Example:
```python
for i in range(4):
print("printing")
if i == 2: # if i is 2, the iteration is skipped
continue
print(i)
```
PASS STATEMENT
pass is a null statement in python.
It instructs to “do nothing”.
Example:
```python
l = [1, 7, 8]
for item in l:
pass # without pass, the program will throw an error
```
CHAPTER 7 – PRACTICE SET
1. Write a program to print multiplication table of a given number using for loop.
2. Write a program to greet all the person names stored in a list ‘l’ and which starts with S.
\[ l = ["Harry", "Soham", "Sachin", "Rahul"] \]
3. Attempt problem 1 using while loop.
4. Write a program to find whether a given number is prime or not.
5. Write a program to find the sum of first n natural numbers using while loop.
6. Write a program to calculate the factorial of a given number using for loop.
7. Write a program to print the following star pattern.
`*`
`***` for n = 3
8. Write a program to print the following star pattern:
`*`
`**` for n = 3
9. Write a program to print the following star pattern.
`* *`
`* for n = 3`
10. Write a program to print multiplication table of n using for loops in reversed order.
A function is a group of statements performing a specific task.
When a program gets bigger in size and its complexity grows, it gets difficult for a program to keep track on which piece of code is doing what!
A function can be reused by the programmer in a given program any number of times.
**EXAMPLE AND SYNTAX OF A FUNCTION**
The syntax of a function looks as follows:
```python
def func1():
print('hello')
```
This function can be called any number of times, anywhere in the program.
**FUNCTION CALL**
Whenever we want to call a function, we put the name of the function followed by parentheses as follows:
```python
func1() # This is called function call.
```
**FUNCTION DEFINITION**
The part containing the exact set of instructions which are executed during the function call.
**Quick Quiz:** Write a program to greet a user with “Good day” using functions.
**TYPES OF FUNCTIONS IN PYTHON**
There are two types of functions in python:
- Built in functions *(Already present in python)*
- User defined functions *(Defined by the user)*
Examples of built in functions includes `len()`, `print()`, `range()` etc.
The `func1()` function we defined is an example of user defined function.
**FUNCTIONS WITH ARGUMENTS**
A function can accept some value it can work with. We can put these values in the parentheses.
A function can also return value as shown below:
```python
def greet(name):
gr = "hello"+ name
return gr
a = greet("harry")
# a will now contain "hello harry"
```
### DEFAULT PARAMETER VALUE
We can have a value as default as default argument in a function.
If we specify name = “stranger” in the line containing def, this value is used when no argument is passed.
**Example:**
```python
def greet(name = "stranger"):
# function body
greet() # name will be "stranger" in function body (default)
greet("harry") # name will be "harry" in function body (passed)
```
### RECURSION
Recursion is a function which calls itself.
It is used to directly use a mathematical formula as function.
**Example:**
```
def factorial(n) = n x factorial(n-1)
```
This function can be defined as follows:
```python
def factorial(n):
if i == 0 or i==1: # base condition which doesn’t call the function any further
return 1
else:
return n*factorial(n-1) # function calling itself
```
This works as follows:
```
Factorial (5)
\ \ \ \ \ \\
5 x Factorial(4)
\ \ \ \ \\
5 x 4 x Factorial(3)
\ \ \ \\
5 x 4 x 3 x Factorial(2)
\ \ \\
5 x 4 x 3 x 2 x Factorial(1)
\ \\
5 x 4 x 3 x 2 x 1
```
The programmer needs to be extremely careful while working with recursion to ensure that the function doesn’t indefinitely keep calling itself. Recursion is sometimes the most direct way to code an algorithm.
1. Write a program using functions to find greatest of three numbers.
2. Write a python program using function to convert Celsius to Fahrenheit.
3. How do you prevent a python print() function to print a new line at the end.
4. Write a recursive function to calculate the sum of first n natural numbers.
5. Write a python function to print first n lines of the following pattern:
*
** - for n = 3
***
6. Write a python function which converts inches to cms.
7. Write a python function to remove a given word from a list ad strip it at the same time.
8. Write a python function to print multiplication table of a given number.
PROJECT 1: SNAKE, WATER, GUN GAME
We all have played snake, water gun game in our childhood. If you haven’t, google the rules of this game and write a python program capable of playing this game with the user.
CHAPTER 9 – FILE I/O
The random-access memory is volatile, and all its contents are lost once a program terminates. In order to persist the data forever, we use files.
A file is data stored in a storage device. A python program can talk to the file by reading content from it and writing content to it.
TYPE OF FILES.
There are 2 types of files:
1. Text files (.txt, .c, etc)
2. Binary files (.jpg, .dat, etc)
Python has a lot of functions for reading, updating, and deleting files.
OPENING A FILE
Python has an open() function for opening files. It takes 2 parameters: filename and mode.
```python
# open("filename", "mode of opening(read mode by default)")
open("this.txt", "r")
```
READING A FILE IN PYTHON
```python
# Open the file in read mode
f = open("this.txt", "r")
# Read its contents
text = f.read()
# Print its contents
print(text)
```
OTHER METHODS TO READ THE FILE.
We can also use `f.readline()` function to read one full line at a time.
```python
f.readline() # Read one line from the file.
```
MODES OF OPENING A FILE
- `r` – open for reading
- `w` - open for writing
- `a` - open for appending
- `+` - open for updating.
- `'rb'` will open for read in binary mode.
- `'rt'` will open for read in text mode.
WRITE FILES IN PYTHON
In order to write to a file, we first open it in write or append mode after which, we use the python’s `f.write()` method to write to the file!
```python
# Open the file in write mode
f = open("this.txt", "w")
# Write a string to the file
f.write("this is nice")
# Close the file
f.close()
```
WITH STATEMENT
The best way to open and close the file automatically is the with statement.
```python
# Open the file in read mode using 'with', which automatically closes the file
with open("this.txt", "r") as f:
# Read the contents of the file
text = f.read()
# Print the contents
print(text)
```
1. Write a program to read the text from a given file ‘poems.txt’ and find out whether it contains the word ‘twinkle’.
2. The game() function in a program lets a user play a game and returns the score as an integer. You need to read a file ‘Hi-score.txt’ which is either blank or contains the previous Hi-score. You need to write a program to update the Hi-score whenever the game() function breaks the Hi-score.
3. Write a program to generate multiplication tables from 2 to 20 and write it to the different files. Place these files in a folder for a 13 – year old.
4. A file contains a word “Donkey” multiple times. You need to write a program which replace this word with ###### by updating the same file.
5. Repeat program 4 for a list of such words to be censored.
6. Write a program to mine a log file and find out whether it contains ‘python’.
7. Write a program to find out the line number where python is present from ques 6.
8. Write a program to make a copy of a text file “this. txt”
9. Write a program to find out whether a file is identical & matches the content of another file.
10. Write a program to wipe out the content of a file using python.
11. Write a python program to rename a file to “renamed_by_python.txt.”
Solving a problem by creating object is one of the most popular approaches in programming. This is called object-oriented programming.
This concept focuses on using reusable code (DRY Principle).
CLASS
A class is a blueprint for creating object.
Syntax:
```python
class Employee:
# Class name is written in pascal case
# Methods & Variables
```
OBJECT
An object is an instantiation of a class. When class is defined, a template (info) is defined. Memory is allocated only after object instantiation.
Objects of a given class can invoke the methods available to it without revealing the implementation details to the user. – Abstractions & Encapsulation!
MODELLING A PROBLEM IN OOPS
We identify the following in our problem.
- Noun → Class → Employee
- Adjective → Attributes → name, age, salary
- Verbs → Methods → getSalary(), increment()
CLASS ATTRIBUTES
An attribute that belongs to the class rather than a particular object.
Example:
class Employee:
company = "Google" # Specific to Each Class
harry = Employee() # Object Instatiation
harry.company
Employee.company = "YouTube" # Changing Class Attribute
INSTANCE ATTRIBUTES
An attribute that belongs to the Instance (object). Assuming the class from the previous example:
harry.name = "harry"
harry.salary = "30k" # Adding instance attribute
Note: Instance attributes, take preference over class attributes during assignment & retrieval.
When looking up for harry.attribute it checks for the following:
1) Is attribute present in object?
2) Is attribute present in class?
SELF PARAMETER
self refers to the instance of the class. It is automatically passed with a function call from an object.
harry.getSalary() # here self is harry
# equivalent to Employee.getSalary(harry)
The function getSalary() is defined as:
class Employee:
company = "Google"
def getSalary(self):
print("Salary is not there")
STATIC METHOD
Sometimes we need a function that does not use the self-parameter. We can define a static method like this:
@staticmethod # decorator to mark greet as a static method
def greet():
print("Hello user")
__INIT__() CONSTRUCTOR
__init__() is a special method which is first run as soon as the object is created. __init__() method is also known as constructor.
It takes ‘self’ argument and can also take further arguments.
For Example:
class Employee:
def __init__(self, name):
self.name = name
def getSalary(self):
...
harry = Employee("Harry")
1. Create a class “Programmer” for storing information of few programmers working at Microsoft.
2. Write a class “Calculator” capable of finding square, cube and square root of a number.
3. Create a class with a class attribute a; create an object from it and set ‘a’ directly using ‘object.a = 0’. Does this change the class attribute?
4. Add a static method in problem 2, to greet the user with hello.
5. Write a Class ‘Train’ which has methods to book a ticket, get status (no of seats) and get fare information of train running under Indian Railways.
6. Can you change the self-parameter inside a class to something else (say “harry”). Try changing self to “slf” or “harry” and see the effects.
Inheritance is a way of creating a new class from an existing class.
**Syntax:**
```python
class Employee: # Base class
# Code
class Programmer(Employee): # Derived or child class
# Code
```
We can use the method and attributes of ‘Employee’ in ‘Programmer’ object.
Also, we can overwrite or add new attributes and methods in ‘Programmer’ class.
**TYPES OF INHERITANCE**
- Single inheritance
- Multiple inheritance
- Multilevel inheritance
**SINGLE INHERITANCE**
Single inheritance occurs when child class inherits only a single parent class.
MULTIPLE INHERITANCE
Multiple Inheritance occurs when the child class inherits from more than one parent classes.
MULTILEVEL INHERITANCE
When a child class becomes a parent for another child class.
SUPER() METHOD
super() method is used to access the methods of a super class in the derived class.
```python
super().__init__()
# __init__() Calls constructor of the base class
```
CLASS METHOD
A class method is a method which is bound to the class and not the object of the class.
@classmethod decorator is used to create a class method.
**Syntax:**
```
@classmethod
def(cls,p1,p2):
```
@PROPERTY DECORATORS
Consider the following class:
```python
class Employee:
@property
def name(self):
return self.ename
```
If e = Employee() is an object of class employee, we can print (e.name) to print the ename by internally calling name() function.
@.GETTERS AND @.SETTERS
The method name with '@property' decorator is called getter method.
We can define a function + @ name.setter decorator like below:
```python
@name.setter
def name (self,value):
self.ename = value
```
OPERATOR OVERLOADING IN PYTHON
Operators in Python can be overloaded using dunder methods.
These methods are called when a given operator is used on the objects.
Operators in Python can be overloaded using the following methods:
```python
p1+p2 # p1.__add__(p2)
p1-p2 # p1.__sub__(p2)
p1*p2 # p1.__mul__(p2)
p1/p2 # p1.__truediv__(p2)
p1//p2 # p1.__floordiv__(p2)
```
Other dunder/magic methods in Python:
```python
str__() # used to set what gets displayed upon calling str(obj)
```
__len__() # used to set what gets displayed upon calling. __len__() or len(obj)
CHAPTER 11 - PRACTICE SET
1. Create a class (2-D vector) and use it to create another class representing a 3-D vector.
2. Create a class ‘Pets’ from a class ‘Animals’ and further create a class ‘Dog’ from ‘Pets’. Add a method ‘bark’ to class ‘Dog’.
3. Create a class ‘Employee’ and add salary and increment properties to it.
Write a method ‘salaryAfterIncrement’ method with a @property decorator with a setter which changes the value of increment based on the salary.
4. Write a class ‘Complex’ to represent complex numbers, along with overloaded operators ‘+’ and ‘*’ which adds and multiplies them.
5. Write a class vector representing a vector of n dimensions. Overload the + and * operator which calculates the sum and the dot(.) product of them.
6. Write __str__() method to print the vector as follows:
7i + 8j +10k
Assume vector of dimension 3 for this problem.
7. Override the __len__() method on vector of problem 5 to display the dimension of the vector.
We are going to write a program that generates a random number and asks the user to guess it.
If the player’s guess is higher than the actual number, the program displays “Lower number please”. Similarly, if the user’s guess is too low, the program prints “higher number please” When the user guesses the correct number, the program displays the number of guesses the player used to arrive at the number.
**Hint:** Use the random module.
NEWLY ADDED FEATURES IN PYTHON
Following are some of the newly added features in Python programming language:
WALRUS OPERATOR
The walrus operator (: =), introduced in Python 3.8, allows you to assign values to variables as part of an expression. This operator, named for its resemblance to the eyes and tusks of a walrus, is officially called the "assignment expression."
```
# Using walrus operator
if (n := len([1, 2, 3, 4, 5])) > 3:
print(f"List is too long ({n} elements, expected <= 3)")
# Output: List is too long (5 elements, expected <= 3)
```
In this example, n is assigned the value of \( \text{len}([1, 2, 3, 4, 5]) \) and then used in the comparison within the if statement.
TYPES DEFINITIONS IN PYTHON
Type hints are added using the colon (:) syntax for variables and the \( \to \) syntax for function return types.
```
# Variable type hint
age: int = 25
# Function type hints
def greeting(name: str) -> str:
return f"Hello, {name}!"
# Usage
print(greeting("Alice")) # Output: Hello, Alice!
```
ADVANCED TYPE HINTS
Python's typing module provides more advanced type hints, such as List, Tuple, Dict, and Union.
You can import List, Tuple and Dict types from the typing module like this:
```
from typing import List, Tuple, Dict, Union
```
The syntax of types looks something like this:
```python
from typing import List, Tuple, Dict, Union
# List of integers
numbers: List[int] = [1, 2, 3, 4, 5]
# Tuple of a string and an integer
person: Tuple[str, int] = ("Alice", 30)
# Dictionary with string keys and integer values
scores: Dict[str, int] = {
"Alice": 90,
"Bob": 85
}
# Union type for variables that can hold multiple types
identifier: Union[int, str] = "ID123"
identifier = 12345 # Also valid
```
These annotations help in making the code self-documenting and allow developers to understand the data structures used at a glance.
**MATCH CASE**
Python 3.10 introduced the match statement, which is similar to the switch statement found in other programming languages.
The basic syntax of the match statement involves matching a variable against several cases using the case keyword.
```python
def http_status(status):
match status:
case 200:
return "OK"
case 404:
return "Not Found"
case 500:
return "Internal Server Error"
case _:
return "Unknown status"
# Usage
print(http_status(200)) # Output: OK
print(http_status(404)) # Output: Not Found
print(http_status(500)) # Output: Internal Server Error
print(http_status(403)) # Output: Unknown status
```
**DICTIONARY MERGE & UPDATE OPERATORS**
New operators `|` and `|=` allow for merging and updating dictionaries.
```python
dict1 = {'a': 1, 'b': 2}
dict2 = {'b': 3, 'c': 4}
merged = dict1 | dict2
print(merged) # Output: {'a': 1, 'b': 3, 'c': 4}
```
You can now use multiple context managers in a single `with` statement more cleanly using the parenthesised context manager
```
with (open('file1.txt') as f1,
open('file2.txt') as f2):
# Process files
```
### EXCEPTION HANDLING IN PYTHON
There are many built-in exceptions which are raised in python when something goes wrong.
Exception in python can be handled using a `try` statement. The code that handles the exception is written in the `except` clause.
```python
try:
# Code which might throw exception
except Exception as e:
print(e)
```
When the exception is handled, the code flow continues without program interruption.
We can also specify the exception to catch like below:
```python
try:
# Code
except ZeroDivisionError:
# Code
except TypeError:
# Code
except:
# Code # All other exceptions are handled here.
```
### RAISING EXCEPTIONS
We can raise custom exceptions using the ‘raise’ keyword in python.
### TRY WITH ELSE CLAUSE
Sometimes we want to run a piece of code when try was successful.
try:
# Somecode
except:
# Somecode
else:
# Code # This is executed only if the try was successful
TRY WITH FINALLY
Python offers a ‘finally’ clause which ensures execution of a piece of code irrespective of the exception.
try:
# Some Code
except:
# Some Code
finally:
# Some Code # Executed regardless of error!
IF __NAME__ == ‘__MAIN__’ IN PYTHON
‘__name__’ evaluates to the name of the module in python from where the program is run.
If the module is being run directly from the command line, the ‘__name__’ is set to string “__main__”. Thus, this behaviour is used to check whether the module is run directly or imported to another file.
THE GLOBAL KEYWORD
‘global’ keyword is used to modify the variable outside of the current scope.
ENUMERATE FUNCTION IN PYTHON
The ‘enumerate’ function adds counter to an iterable and returns it
for i, item in list1:
print(i, item) # Prints the items of list 1 with index
LIST COMPREHENSIONS
List Comprehension is an elegant way to create lists based on existing lists.
list1 = [1, 7, 12, 11, 22,]
list2 = [i for item in list1 if item > 8]
1. Write a program to open three files 1.txt, 2.txt and 3.txt if any of these files are not present, a message without exiting the program must be printed prompting the same.
2. Write a program to print third, fifth and seventh element from a list using enumerate function.
3. Write a list comprehension to print a list which contains the multiplication table of a user entered number.
4. Write a program to display a/b where a and b are integers. If b=0, display infinite by handling the ‘ZeroDivisionError’.
5. Store the multiplication tables generated in problem 3 in a file named Tables.txt.
VIRTUAL ENVIRONMENT
An environment which is same as the system interpreter but is isolated from the other Python environments on the system.
INSTALLATION
To use virtual environments, we write:
```
pip install virtualenv # Install the package
```
We create a new environment using:
```
virtualenv myprojectenv # Creates a new venv
```
The next step after creating the virtual environment is to activate it.
We can now use this virtual environment as a separate Python installation.
PIV FREEZE COMMAND
‘pip freeze’ returns all the package installed in a given python environment along with the versions.
```
pip freeze > requirements .txt
```
The above command creates a file named ‘requirements.txt’ in the same directory containing the output of ‘pip freeze’.
We can distribute this file to other users, and they can recreate the same environment using:
```
pip install -r requirements.txt
```
LAMBDA FUNCTIONS
Function created using an expression using ‘lambda’ keyword.
**Syntax:**
```
lambda arguments:expressions
# can be used as a normal function
```
**Example:**
```
square = lambda x:x*x
square(6) # returns 36
sum = lambda a,b,c:a+b+c
```
\texttt{sum(1,2,3)} # returns 6
**JOIN METHOD (STRINGS)**
Creates a string from iterable objects.
```python
l = ["apple", "mango", "banana"]
result = ", and, ".join(l)
print(result)
```
The above line will return “apple, and, mango, and, banana”.
**FORMAT METHOD (STRINGS)**
Formats the values inside the string into a desired output.
```python
template.format(p1, p2...)
```
*Syntax:*
```
"{} is a good {}".format("harry", "boy") #1
"{} is a good {}".format("harry", "boy") #2.
```
# output for 1:
# harry is a good boy
# output for 2:
# boy is a good harry
**MAP, FILTER & REDUCE**
Map applies a function to all the items in an input_list.
*Syntax.*
```python
map(function, input_list)
```
Filter creates a list of items for which the function returns true.
```python
list(filter(function))
```
Reduce applies a rolling computation to sequential pair of elements.
```python
from functools import reduce
val=reduce (function, list1)
```
# the function can be lambda function
If the function computes sum of two numbers and the list is [1,2,3,4]
CHAPTER 13 - PRACTICE SET
1. Create two virtual environments, install few packages in the first one. How do you create a similar environment in the second one?
2. Write a program to input name, marks and phone number of a student and format it using the format function like below:
“The name of the student is Harry, his marks are 72 and phone number is 99999888”
3. A list contains the multiplication table of 7. Write a program to convert it to vertical string of same numbers.
7
14
.
.
.
4. Write a program to filter a list of numbers which are divisible by 5.
5. Write a program to find the maximum of the numbers in a list using the reduce function.
6. Run pip freeze for the system interpreter. Take the contents and create a similar virtualenv.
7. Explore the ‘Flask’ module and create a web server using Flask & Python.
MEGA PROJECT 1: JARVIS - VOICE-ACTIVATED VIRTUAL ASSISTANT
Jarvis is a voice-activated virtual assistant designed to perform tasks such as web browsing, playing music, fetching news, and responding to user queries using OpenAI's GPT-3.5-turbo model.
FEATURES
- Voice Recognition
- Utilizes the speech_recognition library to listen for and recognize voice commands.
- Activates upon detecting the wake word "Jarvis."
- Text-to-Speech
- Converts text to speech using pyttsx3 for local conversion.
- Uses gTTS (Google Text-to-Speech) and pygame for playback.
- Web Browsing.
- Opens websites like Google, Facebook, YouTube, and LinkedIn based on voice commands.
- Music Playback
- Interfaces with a musicLibrary module to play songs via web links.
- News Fetching
- Fetches and reads the latest news headlines using NewsAPI.
- OpenAI Integration
- Handles complex queries and generates responses using OpenAI's GPT-3.5-turbo.
- Acts as a general virtual assistant similar to Alexa or Google Assistant.
- Activates upon detecting the wake word "Jarvis."
- Text-to-Speech
WORKFLOW
1. Initialization
2. Greets the user with "Initializing Jarvis...."
3. Wake Word Detection
4. Listens for the wake word "Jarvis."
5. Acknowledges activation by saying "Ya."
7. Processes commands to determine actions such as opening a website, playing music, fetching news, or generating a response via OpenAI.
8. Speech Output.
9. Provides responses using speak function with either pyttsx3 or gTTS.
10. Greets the user with "Initializing Jarvis...."
11. Wake Word Detection
12. Acknowledges activation by saying "Ya."
13. Processes commands to determine actions such as opening a website, playing music, fetching news, or generating a response via OpenAI.
**LIBRARIES USED**
- speech_recognition
- webbrowser
- pyttsx3
- musicLibrary
- requests
- openai
- gTTS
- pygame
- os
MEGA PROJECT 2: AUTO-REPLY AI CHATBOT
DESCRIPTION
This project automates the process of interacting with a chat application, specifically designed to analyze chat history and generate humorous responses using OpenAI’s GPT-3.5-turbo model. The virtual assistant, named Naruto, is a character that roasts people in a funny way, based on the chat history.
FEATURES
14. Automated Chat Interaction
15. Uses pyautogui to perform mouse and keyboard operations, interacting with the chat application without manual intervention.
16. Chat History Analysis
17. Copies chat history from the chat application and analyzes it to determine if the last message was sent by a specific user (e.g., “Rohan Das”).
18. Humorous Response Generation
19. Integrates with OpenAI’s GPT-3.5-turbo model to generate funny, roast-style responses based on the analyzed chat history.
20. Clipboard Operations
21. Utilizes pyperclip to copy and paste text, facilitating the retrieval and insertion of chat messages.
22. Uses pyautogui to perform mouse and keyboard operations, interacting with the chat application without manual intervention.
23. Copies chat history from the chat application and analyzes it to determine if the last message was sent by a specific user (e.g., “Rohan Das”).
24. Humorous Response Generation
25. Integrates with OpenAI’s GPT-3.5-turbo model to generate funny, roast-style responses based on the analyzed chat history.
WORKFLOW
- Initialization and Setup
- Click on the Chrome icon to open the chat application.
- Wait for a brief period to ensure the application is open and ready for interaction.
- Chat History Retrieval
- Periodically select and copy chat history by dragging the mouse over the chat area and using the copy shortcut.
- Retrieve the copied text from the clipboard.
- Message Analysis
• Analyze the copied chat history to check if the last message is from a specific user (e.g., "Rohan Das").
• If the last message is from the target user, send the chat history to OpenAI's GPT-3.5-turbo to generate a humorous response.
• Copy the generated response to the clipboard.
• Send Response
• Click on the chat input area and paste the generated response.
• Press 'Enter' to send the response.
• Wait for a brief period to ensure the application is open and ready for interaction.
LIBRARIES USED
1. pyautogui: For automating mouse and keyboard interactions.
2. time: For adding delays between operations.
3. pyperclip: For clipboard operations.
4. openai: For interacting with OpenAI's GPT-3.5-turbo model.
|
{"Source-Url": "https://cwh-full-next-space.fra1.cdn.digitaloceanspaces.com/YouTube/The%20Ultimate%20Python%20Handbook.pdf", "len_cl100k_base": 13088, "olmocr-version": "0.1.53", "pdf-total-pages": 61, "total-fallback-pages": 0, "total-input-tokens": 125325, "total-output-tokens": 16327, "length": "2e13", "weborganizer": {"__label__adult": 0.000400543212890625, "__label__art_design": 0.0004451274871826172, "__label__crime_law": 0.0001417398452758789, "__label__education_jobs": 0.00411224365234375, "__label__entertainment": 0.00011175870895385742, "__label__fashion_beauty": 0.00012123584747314452, "__label__finance_business": 0.00015723705291748047, "__label__food_dining": 0.0005035400390625, "__label__games": 0.0008101463317871094, "__label__hardware": 0.0005512237548828125, "__label__health": 0.00029277801513671875, "__label__history": 0.00018334388732910156, "__label__home_hobbies": 0.00015842914581298828, "__label__industrial": 0.00023496150970458984, "__label__literature": 0.00032806396484375, "__label__politics": 0.00013375282287597656, "__label__religion": 0.0004036426544189453, "__label__science_tech": 0.002655029296875, "__label__social_life": 0.00015866756439208984, "__label__software": 0.0098876953125, "__label__software_dev": 0.9775390625, "__label__sports_fitness": 0.0002465248107910156, "__label__transportation": 0.0002567768096923828, "__label__travel": 0.0002224445343017578}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 56865, 0.02263]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 56865, 0.83875]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 56865, 0.76117]], "google_gemma-3-12b-it_contains_pii": [[0, 44, false], [44, 2238, null], [2238, 7515, null], [7515, 7515, null], [7515, 7515, null], [7515, 10390, null], [10390, 11188, null], [11188, 12446, null], [12446, 12837, null], [12837, 13254, null], [13254, 14453, null], [14453, 15308, null], [15308, 15776, null], [15776, 16680, null], [16680, 18091, null], [18091, 18641, null], [18641, 19209, null], [19209, 20166, null], [20166, 20318, null], [20318, 20681, null], [20681, 21722, null], [21722, 22726, null], [22726, 23682, null], [23682, 24614, null], [24614, 25572, null], [25572, 26498, null], [26498, 27588, null], [27588, 28600, null], [28600, 29234, null], [29234, 30124, null], [30124, 31513, null], [31513, 32482, null], [32482, 32918, null], [32918, 33559, null], [33559, 33770, null], [33770, 34629, null], [34629, 35640, null], [35640, 36884, null], [36884, 37844, null], [37844, 39018, null], [39018, 39387, null], [39387, 40086, null], [40086, 40648, null], [40648, 41033, null], [41033, 42242, null], [42242, 42322, null], [42322, 43303, null], [43303, 43743, null], [43743, 45019, null], [45019, 46460, null], [46460, 47650, null], [47650, 48760, null], [48760, 49360, null], [49360, 50530, null], [50530, 51526, null], [51526, 51596, null], [51596, 52448, null], [52448, 54079, null], [54079, 54338, null], [54338, 56149, null], [56149, 56865, null]], "google_gemma-3-12b-it_is_public_document": [[0, 44, true], [44, 2238, null], [2238, 7515, null], [7515, 7515, null], [7515, 7515, null], [7515, 10390, null], [10390, 11188, null], [11188, 12446, null], [12446, 12837, null], [12837, 13254, null], [13254, 14453, null], [14453, 15308, null], [15308, 15776, null], [15776, 16680, null], [16680, 18091, null], [18091, 18641, null], [18641, 19209, null], [19209, 20166, null], [20166, 20318, null], [20318, 20681, null], [20681, 21722, null], [21722, 22726, null], [22726, 23682, null], [23682, 24614, null], [24614, 25572, null], [25572, 26498, null], [26498, 27588, null], [27588, 28600, null], [28600, 29234, null], [29234, 30124, null], [30124, 31513, null], [31513, 32482, null], [32482, 32918, null], [32918, 33559, null], [33559, 33770, null], [33770, 34629, null], [34629, 35640, null], [35640, 36884, null], [36884, 37844, null], [37844, 39018, null], [39018, 39387, null], [39387, 40086, null], [40086, 40648, null], [40648, 41033, null], [41033, 42242, null], [42242, 42322, null], [42322, 43303, null], [43303, 43743, null], [43743, 45019, null], [45019, 46460, null], [46460, 47650, null], [47650, 48760, null], [48760, 49360, null], [49360, 50530, null], [50530, 51526, null], [51526, 51596, null], [51596, 52448, null], [52448, 54079, null], [54079, 54338, null], [54338, 56149, null], [56149, 56865, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 56865, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 56865, null]], "pdf_page_numbers": [[0, 44, 1], [44, 2238, 2], [2238, 7515, 3], [7515, 7515, 4], [7515, 7515, 5], [7515, 10390, 6], [10390, 11188, 7], [11188, 12446, 8], [12446, 12837, 9], [12837, 13254, 10], [13254, 14453, 11], [14453, 15308, 12], [15308, 15776, 13], [15776, 16680, 14], [16680, 18091, 15], [18091, 18641, 16], [18641, 19209, 17], [19209, 20166, 18], [20166, 20318, 19], [20318, 20681, 20], [20681, 21722, 21], [21722, 22726, 22], [22726, 23682, 23], [23682, 24614, 24], [24614, 25572, 25], [25572, 26498, 26], [26498, 27588, 27], [27588, 28600, 28], [28600, 29234, 29], [29234, 30124, 30], [30124, 31513, 31], [31513, 32482, 32], [32482, 32918, 33], [32918, 33559, 34], [33559, 33770, 35], [33770, 34629, 36], [34629, 35640, 37], [35640, 36884, 38], [36884, 37844, 39], [37844, 39018, 40], [39018, 39387, 41], [39387, 40086, 42], [40086, 40648, 43], [40648, 41033, 44], [41033, 42242, 45], [42242, 42322, 46], [42322, 43303, 47], [43303, 43743, 48], [43743, 45019, 49], [45019, 46460, 50], [46460, 47650, 51], [47650, 48760, 52], [48760, 49360, 53], [49360, 50530, 54], [50530, 51526, 55], [51526, 51596, 56], [51596, 52448, 57], [52448, 54079, 58], [54079, 54338, 59], [54338, 56149, 60], [56149, 56865, 61]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 56865, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
0f977cf7dc1b5743ae85c3ba2a7d3881d22fbf88
|
Abstract
Formal methods tools have been shown to be effective at finding defects in and verifying the correctness of safety-critical systems, many of which require some form of certification. However, there are still many issues that must be addressed before formal verification tools can be used as part of the certification of safety-critical systems. For example, most developers of avionics systems are unfamiliar with which formal methods tools are most appropriate for different problem domains. Different levels of expertise are necessary to use these tools effectively and correctly. In most certification processes, a tool used to meet process objectives must be qualified. The qualification of formal verification tools will likely pose unique challenges.
1 Executive Summary
Darren Cofer
Gerwin Klein
Konrad Slind
Virginie Wiels
Motivation and objectives
Dagstuhl Seminar 13051, Software Certification: Methods and Tools, convened experts from a variety of software-intensive domains (automotive, aircraft, medical, nuclear, and rail) to discuss software certification challenges, best practices, and the latest advances in certification technologies. One of the key challenges identified in that seminar was tool qualification. Tool qualification is the process by which certification credit may be claimed for the use of a software tool. The purpose of tool qualification is to provide sufficient confidence in the tool functionality so that its output may be trusted. Tool qualification is, therefore, a significant aspect of any certification effort. Seminar participants identified a number of needs in the area of formal methods tool qualification. Dagstuhl Seminar 15182 Qualification of Formal Methods Tools, was organized to address these needs.
Software tools are used in development processes to automate life cycle activities that are complex and error-prone if performed by humans. The use of such tools should, in principle, be encouraged from a certification perspective to provide confidence in the correctness of the software product. Therefore, we should avoid unnecessary barriers to tool qualification which may inadvertently reduce the use of tools that would otherwise enhance software quality and confidence.
Most software tools are not used in isolation, but are used as part of a complex tool chain requiring significant integration effort. In general, these tools have been produced by different organizations. We need to develop better and more reliable methods for integrating tools from different vendors (including university tools, open source tools, and commercial tools).
A given software tool may be used in different application domains having very different requirements for both certification and tool qualification. Furthermore, the methods and standards for tool development varies across domains. Consistent qualification requirements across different domains would simplify the process.
Despite the additional guidance provided for the avionics domain in recently published standards (DO-178C, DO-330, and DO-333), there are still many questions to be addressed. For one thing, most practicing engineers are unaware of how to apply different categories of formal verification tools. Even within a particular category, there are a wide variety of tools, often based on fundamentally different approaches, each with its own strengths and weaknesses.
If formal verification is used to satisfy DO-178C objectives, DO-333 requires the applicant to provide evidence that the underlying method is sound, i.e., that it will never assert something is true when it is actually false, allowing application software errors to be missed that should have been detected. Providing an argument for the soundness of a formal verification method is highly dependent on the underlying algorithm on which the method is based. A method may be perfectly sound when used one way on a particular type of problem and inherently unsound when used in a different way or on a different type of problem. While these issues may be well understood in the research community, they are not typically collected in one place where a practitioner can easily find them. It is also not realistic to expect avionics developers to be able to construct an argument for the soundness of a formal method without help from experts in the field.
At the same time, it is also important to not make the cost of qualification of formal methods tools so great as to discourage their use. While it is tempting to hold formal verification tools to a higher standard than other software tools, making their qualification unnecessarily expensive could do more harm than good.
The objectives of this Dagstuhl Seminar were to
- investigate the sorts of assurances that are necessary and appropriate to justify the application of formal methods tools throughout all phases of design in real safety-critical settings,
- discuss practical examples of how to qualify different types of formal verification tools, and
- explore promising new approaches for the qualification of formal methods tools for the avionics domain, as well as in other domains.
Accomplishments
Qualification is not a widely understood concept outside of those industries requiring certification for high-assurance, and different terminology is used in different domains. The seminar was first a way of sharing knowledge from certification experts so that formal methods researchers could better understand the challenges and barriers to the use of formal methods tools.
The seminar also included presentations from researchers who have developed initial approaches to address qualification requirements for different classes of formal methods tools. We were especially interested in sharing case studies that are beginning to address tool qualification challenges. These case studies include tools based on different formal methods (model checking, theorem proving, abstract interpretation).
As a practical matter, we focussed much of our discussion on the aerospace domain since there are published standards addressing both formal methods and tool qualification for avionics software. The seminar also included researchers from other domains (nuclear, railway) so we could better understand the challenges and tool qualification approaches that are being discussed in those domains.
We managed to bridge a lot of the language between the certification domains, mostly railway, avionics, and nuclear, and bits of automotive, and related the qualification requirements to each other. Some of the otherwise maybe less stringent schemes (e.g. automotive) can end up having stronger qualification requirements, because formal methods are not specifically addressed in them. There is some hope that DO-333 might influence those domains, or be picked up by them in the future, to increase the use of FM tools which would increase the quality of systems.
For the academic tool provider side, we worked out and got the message across that tool qualification can be a lot easier and simpler than what we might strive for academically, and discussed specific tools in some detail, clarifying what would be necessary for a concrete qualification. Finally, we also investigated tool architectures that make tools easier to qualify (verification vs code generation).
2 Table of Contents
Executive Summary
Darren Cofer, Gerwin Klein, Konrad Slind, and Virginie Wiels .............. 142
Overview of Talks
Please check my 500K LOC of Isabelle
June Andronick .................................................. 146
Compiling avionics software with the CompCert formally verified compiler
Sandrine Blazy ................................................... 148
Qualification of Formal Methods Tools and Tool Qualification with Formal Methods
Matteo Bordin ................................................... 148
Are You Qualified for This Position? An Introduction to Tool Qualification
Darren Cofer .................................................... 148
Sharing experience on SAT-based formal verification toolchain qualification in the railway domain
Rémi Delmas ..................................................... 149
Qualification of PVS for Systematic Design Verification of a Nuclear Shutdown System
Mark Lawford .................................................. 150
How much is CompCert’s proof worth, qualification-wise?
Xavier Leroy ..................................................... 151
Certificates for the Qualification of the Model Checker Kind 2
Alain Mebsout .................................................. 153
Towards Certification of Network Calculus
Stephan Merz .................................................... 153
Tool Qualification Strategy for Abstract Interpretation-based Static Analysis Tools
Markus Pister ................................................... 154
Tool Qualification in the Railway Domain
Werner Schuetz .................................................. 154
FM Tool Trust Propositions
Konrad Slind .................................................... 155
DO-330 Tool Qualification: An experience report
Lucas Wagner .................................................... 156
Discussion Groups
Why qualify a formal methods tool? ........................................ 156
How to qualify a formal methods tool? .................................... 157
Compiler qualification strategies ......................................... 157
Comparison of qualification in different domains ...................... 158
Participants ........................................................ 159
3 Overview of Talks
3.1 Please check my 500K LOC of Isabelle
June Andronick (UNSW – Sydney, AU)
The seL4 microkernel has been formally proved correct [2], from binary code, up to high level requirements, using the Isabelle theorem prover [5]. In this talk we first gave an overview of seL4 development and proof guarantees and assumptions. We then explored what would be needed for a (hypothetical) certification of seL4 according to DO-178 (the software certification standard for airborne systems on commercial aircraft [6]), including a potential qualification of Isabelle according to DO-330 (tool qualification guidelines [7]).
The seL4 microkernel is a small operating system kernel, of roughly 10,000 lines of C code, designed to be a high-performance, secure, safe, and reliable foundation for a wide variety of application domains. It provides isolation and controlled communication to applications running on top of it, allowing trusted applications to run alongside untrusted, legacy code such as a whole Linux instance.
seL4 is the world’s most verified kernel [2], with a full functional correctness proof, showing that the binary code is a correct implementation of the high-level functional specification, plus security proofs, showing that seL4 enforces integrity and confidentiality. All the proofs have been conducted in the Isabelle/HOL theorem prover, apart from the binary-to-C correctness proof, which uses some SMT solvers and HOL4 models and proofs. The combined Isabelle proofs amount to about 500,000 lines of Isabelle models and proof scripts.
For this Dagstuhl seminar of tool qualification, we have put ourselves in the situation of wanting to certify seL4 for use in an avionics context, and therefore needing to qualify the tools used in its formal verification, here mainly Isabelle, according to DO-330. Following the discussions and presentations from the seminar, we investigated the following question:
What would be needed to qualify Isabelle, for the objective of using the proof of functional correctness of seL4 to justify that the code is complete and correct with respect to its high-level specification?
From our understanding of the qualification process, we propose to answer the following questions.
1. Justify that the method (Interactive Theorem Proving) is suitable:
Since the property we are showing is functional correctness, it requires a high-level of expressiveness to precisely model the code and specification; such high level of expressiveness implies a loss of decidability, and therefore requires user’s input to perform the proof. Interactive theorem proving fits precisely with those requirements. To justify this to a certifier, we could refer to peer-reviewed papers or point to examples of projects using interactive theorem provers to prove functional correctness.
2. Justify that the method (Isabelle-style deduction) is sound:
Isabelle’s logic is based on a very small kernel that needs to be trusted: a dozen axioms, that have been manually validated. All extensions are derived from first principles and checked by this kernel. The only ways of adding axioms is through (conservative) definitions and through explicit axioms and tracked oracles (e.g. sorrowed lemmas). To justify this to a certifier, we could again refer to peer-reviewed papers, the *HOL-report* [1], or the formally verified HOL-light [3] and CakeML implementations [4].
3. Justify that the tool (Isabelle) correctly implements the method:
This would require us to show that only the standard distribution theory HOL is used, that no axiom commands are used after the theory HOL, that no “sorry” and “cheat_tac” commands are used, and other technical corner-cases that should be documented. When these conditions are met, only true theorems in HOL can be derived. Evidences for this question would ideally be a small verified proof checker for Isabelle (using e.g. cakeML and providing efficient proof terms).
4. Justify the correct use of the tool (Isabelle):
This would consist in checking that the above conditions (no axioms, no sorries, etc) are satisfied in the specific example of the proof under consideration. This is where the title of this talk comes from.
5. Justify that the tool (Isabelle) is helping meeting the objective:
This would require showing that the model of C used is a correct representation of C, that the model of the specification is a correct representation of the expected behavior, and that the formalisation of the property (here refinement) is a correct representation of the objective (here that the code is complete and correct with respect to its high-level specification). The seL4 verification includes high-level security proofs, which aim at justifying that the specification satisfies the expected behaviors. Evidence for the C model and refinement statement could be done by review, inspection and testing. As a community, it would also be helpful to provide documentation and training material on how to read formal specification, to allow certifiers and non-experts to convince themselves that the statements and properties make sense. Then they only need to trust the experts and peer-reviewed papers that the proof script will indeed provide an evidence that the statement is true, that the property is satisfied.
Acknowledgments
NICTA is funded by the Australian Government as represented by the Department of Broadband, Communications and the Digital Economy and the Australian Research Council through the ICT Centre of Excellence program.
References
7. RTCA. DO-330, Software Tool Qualification Considerations.
3.2 Compiling avionics software with the CompCert formally verified compiler
Sandrine Blazy (IRISA – Rennes, FR)
Compilers are complicated pieces of software that sometimes contain bugs causing wrong executable code to be silently generated from correct source programs. In turn, this possibility of compiler-introduced bugs diminishes the assurance that can be obtained by applying formal methods to source code.
This talk gives an overview of the CompCert project: an ongoing experiment in developing and formally proving correct a realistic, moderately-optimizing compiler from a large subset of C to PowerPC, ARM and x86 assembly languages. The correctness proof, mechanized using the Coq proof assistant, establishes that the generated assembly code behaves exactly as prescribed by the semantic of the C source, eliminating all possibilities of compiler-introduced bugs and generating unprecedented confidence in this compiler.
3.3 Qualification of Formal Methods Tools and Tool Qualification with Formal Methods
Matteo Bordin (AdaCore – Paris, FR)
This work focuses on the return of experience in the relation between Formal Methods and Tool Qualification. We explored two main application domains: the qualification of formal methods tools and the use of formal methods for tool qualification. In the first case, we present our work in qualifying an abstract interpretation tool (CodePeer) and a formal verification tool (SPARK) in a DO-178 context. In the second case, we focus instead on a lightweight use of formal methods to help the qualification of an automated code generator from Simulink models. This second experience is particularly interesting as it describes how we used Ada 2012 contracts (pre/post-condition) to formally describe in first-order logic the behavior of a code generator. Such specification is not used to statically verify the code generator, but rather as a run-time oracle that checks that the tool executes accordingly to its specifications. Differently from other similar experiences, and quite to our surprise, we realized that the specification in the form of pre/post-conditions significantly differed from the implementation algorithm.
3.4 Are You Qualified for This Position? An Introduction to Tool Qualification
Darren Cofer (Rockwell-Collins – Minneapolis, US)
Formal methods tools have been shown to be effective at finding defects in and verifying the correctness of safety-critical systems such as avionics systems. The recent release of
DO-178C and the accompanying Formal Methods supplement DO-333 will make it easier for developers of software for commercial aircraft to obtain certification credit for the use of formal methods.
However, there are still many issues that must be addressed before formal verification tools can be injected into the design process for safety-critical systems. For example, most developers of avionics systems are unfamiliar with which formal methods tools are most appropriate for different problem domains. Different levels of expertise are necessary to use these tools effectively and correctly. Evidence must be provided of a formal method's soundness, a concept that is not well understood by most practicing engineers. Finally, DO-178C requires that a tool used to meet its objectives must be qualified in accordance with the tool qualification document DO-330. The qualification of formal verification tools will likely pose unique challenges.
Qualification is not a widely understood concept outside of those industries requiring certification for high-assurance, and different terminology is used in different domains. This talk provided an overview of certification and qualification requirements for the civil aviation domain so that formal methods researchers can better understand the challenges and barriers to the use of formal methods tools. Topics covered included a summary of certification processes and objectives for avionics software, requirements for qualification of tools used in software development and verification, and how formal methods tools fit into the certification environment.
3.5 Sharing experience on SAT-based formal verification toolchain qualification in the railway domain
Rémi Delmas (ONERA - Toulouse, FR)
The goal of the talk is to fuel the reflexion and discussion about formal verification tool qualification in the aerospace domain according to the new DO-333 guidelines, by sharing previous experience on tool qualification in the railway domain under CENELEC SIL-* requirements. The talk describes a formal verification toolchain based on SAT solvers and k-induction used in the railway domain for the verification of safety properties of interlocking and communication-based train control systems. The tool in question has been used to earn certification credits, by replacing tests with formal properties verification, in real world railway control systems. In particular, the talk describes how the tool chain’s architecture, development and V&V process was designed in order to meet CENELEC SIL-4 tool qualification requirements, using implementation diversification, semantic equivalence checking, proof-logging/proof-checking. The talk also highlights the various non-technical issues that surround formal verification tool qualification, which nevertheless must be taken into account to ensure the success of formal verification in industrial applications.
3.6 Qualification of PVS for Systematic Design Verification of a Nuclear Shutdown System
Mark Lawford (McMaster University – Hamilton, CA)
The Systematic Design Verification (SDV) process used on the redesign of the Darlington Nuclear Generating Station originated in the difficulties encountered in receiving regulatory approval for Canada’s first computer based reactor shutdown system (SDS) [4]. The SDV process for the redesign project made use of tabular expressions for the Software Requirements Specification (SRS) and the Software Design Description (SDD). Completeness and consistency of the tabular expressions and the conformance of the SDD to the SRS were established using the automated theorem prover PVS [3]. The process used to qualify PVS for use in this context is described below and related to the latest version of IEC 61508.
The qualification required the use of manual proof to mitigate against potential undetected errors that might be caused by a failure of PVS, i.e., all of the proofs performed in the PVS theorem prover also had to be done by hand. The standard IEC 61508 (2nd ed) in part 4 provides a classification of tools according to whether they are software on-line support tools that can directly influence system safety at run time, or software off-line support tools that support a phase of the software development lifecycle and that cannot directly influence the safety-related system during its run time. Software off-line support tools are further broken down into three subclasses:
T1: generates no outputs which can directly or indirectly contribute to the executable code (including data) of the safety related system; (e.g. a text editor, a requirements or design support tool with no automatic code generation capabilities, configuration control tools)
T2: supports the test or verification of the design or executable code, where errors in the tool can fail to reveal defects but cannot directly create errors in the executable software; (e.g. a test harness generator, test coverage measurement tool, static analysis tool)
T3: generates outputs which can directly or indirectly contribute to the executable code of the safety related system (e.g., an optimising compiler where the relationship between the source code program and the generated object code is not obvious, a compiler that incorporates an executable run-time package into the executable code). According to this classification, PVS as used on the Darlington Redesign Project would be a T2 tool since it is being used to verify a design and a tool failure could fail to reveal an error but not introduce an error into the executable.
In IEC 61508-3 (2nd ed) it states that:
7.4.4.5 An assessment shall be carried out for offline support tools in classes T2 and T3 to determine the level of reliance placed on the tools, and the potential failure mechanisms of the tools that may affect the executable software. Where such failure mechanisms are identified, appropriate mitigation measures shall be taken.
Since a failure mechanism is that PVS has a bug that causes a proof to succeed when it should have failed, we needed a mitigation strategy. The strategy chosen was to redo all proofs manually. Although this mitigation strategy might appear to defeat much of the benefit of using a formal methods tool, PVS could still be used to quickly check design iterations and the manual checks only needed to be performed on the final work product to mitigate PVS's failure modes. Still, the final manual proofs were tedious and required significant effort.
A proposal is made for a revised Tabular Expression Toolbox that makes use of PVS and an SMT solver to eliminate the need for manual review in order to gain tool qualification. A prototype implementation of the Tabular Expression Toolbox is described in [1].
References
3.7 How much is CompCert’s proof worth, qualification-wise?
Xavier Leroy (INRIA – Le Chesnay, FR)
Intuitively as well as experimentally (cf. the Csmith compiler testing project), the formal verification of the CompCert C compiler generates much confidence that it is free of miscompilation issues. How can we derive certification credit from this formal verification, in the context of a DO-330 / DO-333 tool qualification? This question is being investigated within the Verasco project (ANR-11-INSE-03; http://verasco.imag.fr/).
Consider first the formally-verified part of the CompCert C compiler. This part goes from abstract syntax for the CompCert subset of C to abstract syntax for the assembly language of the target processor. This part contains all the optimizations and almost all code generation algorithms. For this part, we see a plausible mapping between parts of the Coq development and DO-330 concepts:
- The “specifications” part of the Coq development constitutes most of the (high-level) tool requirements. This part comprises the abstract syntax and operational semantics of the CompCert C and CompCert assembly languages, as well as the high-level statement of compiler correctness, namely preservation of semantics during compilation, with preservation of properties as a corollary.
- The “code” part of the Coq development map to the low-level tool requirements. This part comprises all compilation algorithms (written in pure functional, executable style in Coq’s specification language) as well as the abstract syntaxes of the intermediate languages used. It is comparable to the pseudocode or Simulink/Scade models that are used as low-level requirements in other certifications.
- The “proof” part of the Coq development automates the verification activities between the (high-level) tool requirements and the low-level tool requirements. This part contains the proofs of semantic preservation for every compilation pass, the proofs of semantic
soundness for every static analysis, as well as the operational semantics for the intermediate languages.
A first difficulty is that the “specifications”, “code” and “proof” parts are not clearly separated in CompCert’s Coq development, owing to good mathematical style (theorems and their proofs come just after definitions) and also to the use of dependently-typed data structures. It would be useful to develop a “slicing” tool for Coq that extracts the various parts of the development by tracing dependencies.
The source code for the compiler, in DO-330 parlance, corresponds to the OCaml code that is generated from the “code” part of the Coq development by Coq’s extraction facility. The executable compiler, then, is obtained by OCaml compilation. Here, we are in familiar territory: automatic code generation followed by compilation. However, suitable confidence arguments must be provided for Coq’s extraction and for OCaml’s compilation. Several approaches were discussed during the meeting, ranging from dissimilar implementations to Coq-based validation of individual runs of the executable compiler.
At the other end of the DO-330 sequence of refinements, we are left with the tool operational requirements, which have to be written in informal prose, with references to the ISO C 1999 language standard, the ISA reference manuals for the target architecture, and coding standards such as MISRA C. The verification activities here are essentially manual, and include for example relating the CompCert C formal semantics with the informal specifications in ISO C 1999 and MISRA. Such a relation can be built from appropriate tests, since CompCert provides a reference interpreter that provides an executable, testable form of its C formal semantics.
All in all, the formal proof of CompCert does not eliminate the need for manual verifications, but it reduces their scope tremendously: from manual verification of a full optimizing compiler to manual verification of formal semantics for C and assembly languages. For example, changes to the “code” part of the compiler (e.g. adding new optimizations, modifying the intermediate languages, etc) need no new manual verification activities, as long as the “specification” part of the compiler is unchanged.
To finish, we need to consider the parts of the CompCert C compiler that are not formally verified yet: uphill of the verified part, the transformations from C source text to CompCert C abstract syntax (preprocessing, tokenization, parsing, type-checking, pre-simplifications, production of an abstract syntax tree); downhill, the transformation from assembly abstract syntax to ELF executables (assembling and linking). CompCert provides an independent checker that validates a posteriori the assembling and linking phases. Likewise, some of the uphill passes were formally verified recently (parsing and type-checking). Nonetheless, many of the uphill passes lack formal specifications and therefore must be verified by conventional, test-based means.
In conclusions, the qualification of an optimizing compiler to the highest quality levels has never been attempted before, and might very well be too expensive to be worth the effort. A formal compiler verification such as CompCert’s has high potential to reduce these costs. However, much work remains to take full advantage of this potential.
3.8 Certificates for the Qualification of the Model Checker Kind 2
Alain Mebsout (University of Iowa – Iowa City, US)
License © Creative Commons BY 3.0 Unported license
Joint work of Mebsout, Alain; Tinelli, Cesare
This talk presents a technique for generating proof certificates in the model checker Kind 2 as an alternate path of qualification with respect to DO-178C. This is put in perspective with the qualification that was conducted for the SMT solver Alt-Ergo at Airbus for use in the development of the A350. Alt-Ergo was qualified wrt DO-178B as a backend solver for Caveat to verify C code of the pre-flight inspection. On the other hand, Kind 2 generates proof certificates which allows to shift the trust from the model checker to the proof checker (LFSC). Certificates for the actual model checking algorithm are generated as SMT2 files and verified by an external SMT solver. The translation from Lustre to the internal first-order logic representation is verified in a lightweight way by proving observational equivalence between independent frontends (for the moment JKind and Kind 2). This proof is actually carried by Kind 2 itself and generates in turn SMT2 certificates.
3.9 Towards Certification of Network Calculus
Stephan Merz (INRIA Nancy – Villers-lès-Nancy, FR)
License © Creative Commons BY 3.0 Unported license
Joint work of Boyer, Marc; Fejoz, Loïc; Mabille, Etienne; Merz, Stephan
URL http://dx.doi.org/10.1007/978-3-642-39634-2_37
Network Calculus (NC) is an established theory for determining bounds on message delays and for dimensioning buffers in the design of networks for embedded systems. It is supported by academic and industrial tool sets and has been widely used, including for the design and certification of the Airbus A380 AFDX backbone. However, tool sets used for developing certified systems need to be qualified, which requires substantial effort and makes them rigid, even when deficiencies are subsequently detected. Result checking may be a worthwhile complement, since the use of a qualified (and highly trustworthy) checker could replace qualifying the analysis tool itself. In this work, we experimented an encoding of the fundamental theory of NC in the interactive proof assistant Isabelle/HOL and used it to check the results of a prototypical NC analyzer.
3.10 Tool Qualification Strategy for Abstract Interpretation-based Static Analysis Tools
Markus Pister (AbsInt – Saarbrücken, DE)
License © Creative Commons BY 3.0 Unported license
Joint work of Kästner Daniel, Pister Markus, Gebhard Gernot, Ferdinand Christian
In automotive, railway, avionics and healthcare industries more and more functionality is implemented by embedded software. A failure of safety-critical software may cause high costs or even endanger human beings. Also for applications which are not highly safety-critical, a software failure may necessitate expensive updates.
Safety-critical software has to be certified according to the pertinent safety standard to get approved for release. Contemporary safety standards including DO-178C, IEC-61508, ISO-26262, and EN-50128 require the identification of potential functional and non-functional hazards and to demonstrate that the software does not violate the relevant safety goals. If tools are used to satisfy the corresponding verification objectives, an appropriate tool qualification is mandatory to show functional correctness of the tool behavior with respect to the operational context.
To ensure functional program properties, automatic or model-based testing and formal techniques like model checking are becoming more widely used. For non-functional properties identifying a safe end-of-test criterion is a hard problem since failures usually occur in corner cases and full test coverage cannot be achieved.
For some non-functional program properties this problem is solved by abstract interpretation-based static analysis techniques which provide full control and data coverage and yield provably correct results. Like model checking and theorem proving, abstract interpretation belongs to the formal software verification methods. AbsInt provides abstract interpretation-based static analyzers to determine safety-guarantees on the worst-case execution time (aiT) and stack consumption (StackAnalyzer) as well as to prove the absence of runtime errors (Astree) in safety-critical software.
This talk focuses on our tool qualification strategy of the above mentioned verification tools, which are increasingly adopted by industry in their validation activities for safety-critical software. First, we will give an overview of the tools and their role within the analyzed system’s certification process. We then outline the required activities for a successful tool qualification of our static analyzers alongside their correspondingly produced data.
3.11 Tool Qualification in the Railway Domain
Werner Schuetz (Thales – Wien, AT)
License © Creative Commons BY 3.0 Unported license
In this presentation we give an overview of the relevant standards applicable to the rail domain. EN50128 is concerned with software, while EN50129 addresses system issues.
This presentation focuses on tool qualification. The 2011 edition of EN50128 is the first to include requirements on “Support Tools and Languages”. To this end it defines three tool classes. T3 tools directly or indirectly produce code or data that is used in the safety-related
system. T2 tools are verification tools that may fail to detect an error but cannot introduce an error themselves. T1 tools do not contribute directly or indirectly to the executable code or data.
This presentation discusses the requirements on support tools and how they apply to the three tool classes. Comparison with the relevant aerospace standards (DO178C, DO330) is partly given.
In an appendix we briefly analyze which “Formal Methods” are contained in the 2011 edition of EN50128.
### 3.12 FM Tool Trust Propositions
**Konrad Slind (Rockwell-Collins – Minneapolis, USA)**
An interactive theorem proving (ITP) system is a complex piece of software that bundles a great deal of functionality together. Beyond their core theorem proving task, which can employ highly complex algorithms, these systems provide extensibility, rich interfaces for users, interaction with host operating systems, etc. And yet, ITP systems are claimed to provide very high assurance. It is our purpose to take a close look at this state of affairs and explain the justifications for this claim.
We introduce the notion of the **trust proposition** to organize the discussion: it helps the consumer of a theorem prover’s output understand what the full assurance story is, by breaking the overall trust proposition down to subcomponents. In particular, we identify the work product of an ITP as a collection of theories, which formalize the artifact under scrutiny, plus properties and proofs. This work product can be trusted, provided the following conditions are met:
1. **Trusted Basis** The support theories are trusted;
2. **Trusted Extension** The newly introduced types, constants, definitions, and axioms are trusted;
3. **Valid Model** The support theories plus newly introduced types, constants, definitions, and axioms accurately model the artifact under scrutiny;
4. **Sound Logic** The proof system is sound;
5. **Correct Implementation** The proof system and extension mechanisms are correctly implemented
6. **Correct Libraries** The libraries used in the implementation are correctly implemented;
7. **Correct Compilation** The compiler correctly compiles the libraries and the implementation of the proof system;
8. **Correct Execution** The machine correctly runs the executable; and
9. **Trusted IO** The input and output of the ITP can be trusted.
3.13 DO-330 Tool Qualification: An experience report
Lucas Wagner (Rockwell Collins – Cedar Rapids, US)
This presentation gives an overview of the qualification of a test case generation tool that utilized model checking to generate tests. The tool is used to satisfy verification objectives, so it was qualified in accordance with DO-330 Tool Qualification Level 5 (TQL-5).
The presentation covers the rationale used for classifying the tool as a TQL-5 tool, the applicable DO-330 objectives for a TQL-5 tool, and examples of how the major objectives were satisfied, including examples of test cases used in the qualification package developed for the test generation tool.
The purpose of this presentation was to give a concrete example and demonstrate that qualification of a tool is not overly complicated, but rather a straightforward, manageable process.
4 Discussion Groups
In addition to individual presentations, the seminar included four discussion groups organized around specific questions that arose during these presentations.
4.1 Why qualify a formal methods tool?
DO-178 (certification standard for software in civil aviation) states that qualification of a tool is needed when certification processes are eliminated, reduced, or automated by the use of a software tool without its output being verified.
For formal methods tools, two questions arise:
- Why use formal methods tools?
- Is qualification necessary?
One difficulty with DO-178 is that structural coverage testing is connected to many different certification objectives. Only some of these objectives can be mitigated using formal methods tools. A careful look at objectives is necessary to determine the economic benefit of using formal methods tools. In some cases, the business case may be derived from a new capability enabled by the use of a formal methods tool. For example:
- The ability to optimize code by using the CompCert compiler (see presentation by Xavier Leroy)
- The ability to increase processor utilization by performing worst case execution time (WCET) analysis with AiT
- The ability to host software at multiple criticality levels on same processor using a verified microkernel such as seL4
Formal methods qualification may, therefore, be a means to justify using the new capability.
Sometimes it is also possible to realize value without qualifying the tool. The use of a formal methods tool to detect and remove errors earlier in the development process is an example. Therefore, the benefit to be derived from a formal methods tool and how it is used in the development process should be carefully evaluated before assuming that qualification is needed.
4.2 How to qualify a formal methods tool?
In this group, we discussed qualification considerations for formal methods tools in the civil aviation context.
DO-333, the formal methods supplement for DO-178C, makes a distinction between a formal method and the tool which implements the method. Additional objectives for formal methods are defined in DO-333 (appearing in tables A-3 through A-5). These objectives apply to the underlying method, and are in addition to any tool qualification activities that may be required. For each formal method used, the following activities should be done:
- Verification that the method has precise unambiguous, mathematically defined syntax and semantic
- Justification of the soundness of the analysis method
- Description and justification of any assumptions that are made in the analysis performed
Concerning tool qualification, there is nothing specific for formal methods tools required by the tool qualification document, DO-330. For verification tools (called TQL-5 tools), the main activities have to do with definition and verification of Tool Operational Requirements. These describe operation of the tool from a user perspective and demonstrate that the tool can satisfy the certification objectives for which it is being used. Some verification must be done showing that the tool does what the requirements say it should do (for example by the use of adequate test cases).
4.3 Compiler qualification strategies
Some formal methods are more difficult to classify in terms of how they fit in to a certification process and what kind of qualification is needed. A good example is the CompCert tool [1]. CompCert is a formally verified C compiler and thus could be seen as a development tool. However, DO-178 is designed to not require that the compiler be trusted. Instead, it assumes that executable object code will be verified by means of test (for compliance and robustness with respect to the requirements and to demonstrate structural coverage). The question is thus what is the certification objective that is automated by CompCert?
A possible answer is property preservation between source code and object code. In that case, CompCert could be considered as a verification tool automating this objective, and thus it would be qualified as a TQL-5 tool (according to DO-330). It would, however, be necessary to separate the code production part from the proof part inside the CompCert tool, which is not easy given the nature of the technique used (Coq).
Of course, CompCert could also be qualified as a development tool (TQL-1). In that case, since its assurance story is based on a formal proof, DO-333 (the formal methods supplement to DO-178C) could be applied for the qualification objectives concerning the tool development process. This combination of using formal methods to qualify a formal methods development tool has not been previously considered. In that case, the issue is to justify qualification of CompCert as a development tool on an economic point of view. Since a TQL-1 qualification is costly, it is necessary to determine what can we put in the balance to motivate the use of CompCert in place of a traditional compiler.
References
4.4 Comparison of qualification in different domains
In this discussion group we discussed the similarities and difference among qualification standards in different domains. The standards considered were:
- DO-178C Software Considerations in Airborne Systems and Equipment Certification and DO-330 Software Tool Qualification Considerations
- ISO 26262 Road vehicles – Functional safety – Part 8: Supporting processes
The comparison concerned the following questions:
- When is tool qualification required?
- What levels of qualification are defined and what is the purpose of each?
- What activities are required to achieve qualification?
Participants
- June Andronick
UNSW – Sydney, AU
- Rob Arthan
Lemma 1 Ltd. – Twyford, GB
- Jasmin Christian Blanchette
INRIA Lorraine – Nancy, FR
- Sandrine Blazy
IRISA – Rennes, FR
- Matteo Bordin
AdaCore – Paris, FR
- Darren Cofer
Rockwell Collins – Minneapolis, US
- David Cok
GrammaTech Inc. – Ithaca, US
- Rémi Delmas
ONERA – Toulouse, FR
- Michael Dierkes
Rockwell Collins France – Toulouse, FR
- Eric Engstrom
SIFT – Minneapolis, US
- Gerwin Klein
NICTA – Sydney, AU
- Ramana Kumar
University of Cambridge, GB
- Mark Lawford
McMaster Univ. – Hamilton, CA
- Xavier Leroy
INRIA – Le Chesnay, FR
- Stefan Leue
Universität Konstanz, DE
- Alain Mebsout
Univ. of Iowa – Iowa City, US
- Stephan Merz
INRIA Nancy – Villers-lès-Nancy, FR
- Cesar A. Munoz
NASA Langley ASDC – Hampton, US
- Magnus Myreen
University of Cambridge, GB
- Scott Owens
University of Kent, GB
- Marc Pantel
University of Toulouse, FR
- Markus Pister
AbsInt – Saarbrücken, DE
- Werner Schütz
Thales – Wien, AT
- Konrad Slind
Rockwell Collins – Minneapolis, US
- Nick Tudor
D-RisQ Limited – Malvern, GB
- Lucas Wagner
Rockwell Collins – Cedar Rapids, US
- Michael W. Whalen
University of Minnesota – Minneapolis, US
- Virginie Wiels
ONERA – Toulouse, FR
|
{"Source-Url": "http://drops.dagstuhl.de/opus/volltexte/2015/5354/pdf/dagrep_v005_i004_p142_s15182.pdf", "len_cl100k_base": 8895, "olmocr-version": "0.1.49", "pdf-total-pages": 18, "total-fallback-pages": 0, "total-input-tokens": 39042, "total-output-tokens": 10734, "length": "2e13", "weborganizer": {"__label__adult": 0.00045561790466308594, "__label__art_design": 0.0004055500030517578, "__label__crime_law": 0.0004949569702148438, "__label__education_jobs": 0.002685546875, "__label__entertainment": 9.316205978393556e-05, "__label__fashion_beauty": 0.0002346038818359375, "__label__finance_business": 0.0004165172576904297, "__label__food_dining": 0.0004165172576904297, "__label__games": 0.0009379386901855468, "__label__hardware": 0.0023136138916015625, "__label__health": 0.0006985664367675781, "__label__history": 0.0003764629364013672, "__label__home_hobbies": 0.00015366077423095703, "__label__industrial": 0.0012216567993164062, "__label__literature": 0.0002841949462890625, "__label__politics": 0.0003218650817871094, "__label__religion": 0.0006031990051269531, "__label__science_tech": 0.10333251953125, "__label__social_life": 0.0001360177993774414, "__label__software": 0.00927734375, "__label__software_dev": 0.8720703125, "__label__sports_fitness": 0.0004887580871582031, "__label__transportation": 0.0022678375244140625, "__label__travel": 0.0002789497375488281}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 48403, 0.01697]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 48403, 0.54462]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 48403, 0.90625]], "google_gemma-3-12b-it_contains_pii": [[0, 1771, false], [1771, 5157, null], [5157, 7339, null], [7339, 9605, null], [9605, 13030, null], [13030, 16376, null], [16376, 18875, null], [18875, 21790, null], [21790, 25364, null], [25364, 28745, null], [28745, 32118, null], [32118, 34655, null], [34655, 37963, null], [37963, 40322, null], [40322, 42994, null], [42994, 46344, null], [46344, 47123, null], [47123, 48403, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1771, true], [1771, 5157, null], [5157, 7339, null], [7339, 9605, null], [9605, 13030, null], [13030, 16376, null], [16376, 18875, null], [18875, 21790, null], [21790, 25364, null], [25364, 28745, null], [28745, 32118, null], [32118, 34655, null], [34655, 37963, null], [37963, 40322, null], [40322, 42994, null], [42994, 46344, null], [46344, 47123, null], [47123, 48403, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 48403, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 48403, null]], "pdf_page_numbers": [[0, 1771, 1], [1771, 5157, 2], [5157, 7339, 3], [7339, 9605, 4], [9605, 13030, 5], [13030, 16376, 6], [16376, 18875, 7], [18875, 21790, 8], [21790, 25364, 9], [25364, 28745, 10], [28745, 32118, 11], [32118, 34655, 12], [34655, 37963, 13], [37963, 40322, 14], [40322, 42994, 15], [42994, 46344, 16], [46344, 47123, 17], [47123, 48403, 18]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 48403, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
24a4401799f3016ec2e7635af3db750ea10e4f09
|
The Stakeholders of a User-Centred Design Process in Mobile Service Development
Ari Alamäki¹ and Amir Dirin²
HAAGA-HELIA University of Applied Sciences
Business Information Technology
Ratapihantie 13, 00520 Helsinki, Finland
{ari.alamaki, amir.dirin}@haaga-helia.fi
ABSTRACT
The use of agile methods in mobile service development has gained much attention in software design research. In addition, involving the potential stakeholders of the mobile service in the design and development process has become vital in achieving the best service experience. Many application development methodologies, such as user-centred design (UCD), ensure that stakeholders have direct involvement in the design and development. This paper describes experiences of designing a mobile concept where various stakeholders, such as tourism companies, target application users, business experts and the application developers, were involved in the design and development process. The main focus of this paper is the roles and contributions of stakeholders in mobile services for outdoor activities such as kayaking, hiking and biking. These outdoor activities are associated with different functional and non-functional requirements that are essential considerations during the design and development process. Therefore, we utilized the UCD principle as an appropriate method of involving all stakeholders in the design process, and we show that hearing the stakeholders’ voice is vital in the design of outdoor-based mobile service development.
KEYWORDS
1 INTRODUCTION
This paper describes the design process of a mobile web service for outdoor tourism activities where users regularly need navigation, guidance and nature- and route-specific information. The outdoor tourists, such as hikers, bikers or kayakers, are often operating in an unfamiliar environment. Therefore, geo-location is the important feature in the development of useful mobile services.
The ultimate goal of the research project was to design a geo-location service to support and improve customer satisfaction with small tourism companies by offering new types of digital guidance and navigation services. In developing this type of application, where different stakeholders are involved, it is essential to grasp the stakeholders’ needs and requirements. In addition, the user’s direct involvement in the application design and development can help in anticipating possible errors and failures. Therefore, we selected the user-centred design (UCD) [1] as the development approach. The mobile service design and development was accomplished as university research and a development project where partner companies delivered their insights through a steering group and acted as pilot companies to test the application.
The advancement in web technologies, such as cloud computing [2-6], Software as a Service (SaaS) [7-8], Service-oriented Architecture (SOA) and Web Services [9-12], as well as software development frameworks such as Vaadin and PhoneGap [13-15], have provided a unique opportunity to develop faster prototypes and robust mobile applications for various business segments. Therefore, more research is required in how to manage design and development projects in which various user groups and design teams could co-create new mobile service concepts. The methods of value co-creation are emphasized nowadays in marketing, sales and design literature, as the end users and customers are taking a more active role in service design projects [16-17]. Hence, they are not only consumers of products but also active partners in designing new services.
This research project applies mobile web technologies to design and develop a customized mobile guide service for several small and medium (SME) tourism companies to use simultaneously. The product designed in this study is based on one mobile web service where the target service is deployed to the mobile device’s browser. Therefore, the designed mobile guide is not a native mobile application but is delivered as a SaaS model to the tourism companies who then deliver the URL link to their own customers, i.e. the end users. This study applied the Vaadin 6.0 Java framework [13-14], Apache TomCat and LAMP stack (Linux, Apache, MySQL and PHP) on the virtual server. The research and development goal was to design iteratively with all stakeholders’ mobile web services to meet the needs of outdoors tourism business services. To achieve the research goals in the UCD process we defined the following subtasks.
1) Elicitation phase: Conducting user studies and gathering requirements in order to design the potential mobile web service concept. This phase is realized by collecting requirements through iterative concept and prototype designing, and gathering stakeholders’ feedback. The main focus of the concept design is the end users’ experiences of the provided services.
2) Evaluation and assessment phase: Piloting and testing the mobile guide prototypes with several stakeholders in the real usage environment to gain instant feedback on the concept and the proposed services.
2 MOBILE SERVICES and TOURISM
The tourism business was among the first to build electronic commerce and internet-based shopping solutions. According to a TripAdvisor survey [18], 38% of travellers have used their mobile devices to plan a trip, and 60% of respondents indicated that they have downloaded travel apps on their mobile devices in advance. The survey shows that the use of tourism-based mobile applications focuses particularly on researching restaurants (62%), checking flight statuses (51%) and researching attractions (46%). Google’s study [19] provides similar findings and emphasizes the importance of mobile services in the tourism business.
In outdoor activities, users often require navigation, guidance and other nature- and route-specific information. Tourists who are walking, hiking, biking or kayaking are frequently operating in an unfamiliar environment. Therefore, the aim is to design a tourism-based mobile system that supports most outdoor activities and is available from tourism companies [20, 21]. In addition to supporting the outdoor pursuits, the tourism companies could increase customer satisfaction by offering new types of digital guides and navigation aids as added-value services for their customers.
Tourism companies nowadays offer their customers hard-copy maps that contain the route details. However, it is easy to transfer the existing route and guide information, as highlighted on the printed map, to mobile devices. In addition to the static paper-based information, mobile services offer dynamic information for end users, such as speed, location, distances and social information related to specific locations. The existing tourism applications mainly provide information and support for city or urban navigation services [22, 23], in which the use of the mobile device does not significantly differ from typical mobile device usage, e.g. from checking SMS or answering a call.
3 USER-CENTRED DESIGN
Several studies [24-28] emphasize the involvement of stakeholders in the software development process as early as the first phases of the innovation and design stages. Developing an innovative mobile application and service is not a straightforward process. It requires several iterative experiments involving end users before any successful commercialization of the product. Blank [24] states that going backwards is considered a failure in the traditional linear product development model, whereas in the iterative development model going backwards is a natural and valuable part of learning and discovery. Ries [25] has also emphasized a cyclical development process where going backwards is an important element in continuous learning and a natural part of software development. Additionally, Blank [24] emphasizes that, unlike in the linear model, finding the right
customers and markets is unpredictable, and developers make several mistakes before they get it right. Blank also emphasizes the importance of the initial phases in his customer development model, namely determining customers’ real problems and needs. Similarly, Ries [25] highlights the involvement of end users in the software innovation and development process, and claims you can begin the fruitful learning and discovery process with them by testing and measuring iteratively your minimum viable products and prototypes.
The paying customers do not necessarily explicitly know what they need or want, but they can offer valuable comments and suggestions for your visualized drafts and prototypes [26, 27]. Those comments and feedback work as guidelines in searching the possibilities for successful product and service innovations. Moreover, usability and user experience considerations are increasingly important in contemporary mobile application developments [28, 29, 30]. Mobile applications’ usability often mandates multi-level usability assessments. This complex and yet important process is accomplished by applying appropriate software development methods such as UCD or lean product development. Both of these methods consider users as key stakeholders at the various design and development stages.
Gould [29, 30] states that in a usable system we need to involve users continuously in the development process and based on their feedback refine the design concept. The term UCD was first used by Dan Norman during the 1980s after the publication of *User-Centered System Design; New Perspectives on Human-Computer Interaction* [31]. User-centred design and the development of interactive systems and devices have since increased in importance in product development as UCD both cuts costs [32] and improves usability. Additionally, it should also place a special focus on the business benefits, which are easier to identify when using rapid innovation methods where end users are involved in the same process as business owners.
The UCD process is divided into different phases of creating a usable mobile application [26-28]: 1) Concept phase: The users’ needs and the opportunities are explored by applying different user study methods, such as interviews and questionnaires; 2) Requirements phase: Prepare a list of the requirements revealed during the previous phase. Applying various data analysis methods, such as interview transcripts, or task and environment analysis and affinity diagrams, assists in preparing such a list; 3) Prototype phase: The list of requirements is converted to a low-fidelity prototype and shared with the users. Based on users’ feedback, the design is retuned; 4) Usability assessments phase: Users then assess the high-fidelity prototype through a usability test.
Unlike UCD, in which users are consulted at various stages of the mobile application concept development, the lean development principle is based on the values that the product provides to consumers. Lean principles originated from the lean manufacturing developed by Toyota. However, lean software development originated from a book written by Tom and Mary Poppendieck [33]; essentially, it is a software development model inspired by lean manufacturing and agile development principles. The lean model focuses on customer feedback and the reduction of waste. Based on lean software development principles, waste is defined as any part of the development process that does not create value for consumers.
Therefore, the first step in following the lean principle is to understand and identify activities that create value in a product. In the digital service business, this practically means that users will not be motivated to use a new digital service if they do not see or recognize added value or personal interest in the new digital solution [34, 35]. The second step in the lean principle emphasizes that quality ought not to be a separate phase, and instead requires consideration at all phases of the software development. Creating knowledge is the third step in the lean principle, and stresses sharing information among project workers and customers. Deferring commitment to the lean principle promotes the need for decision making at the last minute. The lean principle moreover recommends delivering smaller increments of the software product over shorter time intervals and promotes project workers as independent decision makers in their designated tasks, allowing them to achieve their goals more efficiently. Finally, the lean principle recommends optimizing the product
based on consumers’ requests and hopes, just as UCD does [36].
Hence, in creating innovative and new applications in the software business, we are actually solving problems based on unknown proposals, as users cannot describe exactly what they need or want. Therefore, this requires iterative, lean and user-centred methods.
4 RESEARCH METHODOLOGY
The research approach is case-study [37] that applies the action research strategy [38], as the design team members were also actors in both the research and design work phases. Data was collected through semi-structured interviews and structured questionnaires from stakeholders during the design and development iterations. Table 1 shows the targets and phases of design iterations in designing the outdoor mobile guide service, the role and number of stakeholders involved and details of where the user encounters took place.
### Table 1. Design iterations, stakeholders and context.
<table>
<thead>
<tr>
<th>Design iterations of outdoor mobile guide service</th>
<th>Stakeholder</th>
<th>N=</th>
<th>Where the interview, interaction or evaluation took place</th>
</tr>
</thead>
<tbody>
<tr>
<td>1. Business case and concept definition</td>
<td>Tourism company, ICT company and university-based tourism experts</td>
<td>6</td>
<td>Office</td>
</tr>
<tr>
<td>2. Validation of the technological solutions</td>
<td>Software developers</td>
<td>2</td>
<td>Street navigation, testing by kayaking on the Baltic Sea</td>
</tr>
<tr>
<td>3. Validation of the business viewpoint</td>
<td>Steering group members and two developers</td>
<td>6</td>
<td>Testing by kayaking on the Baltic Sea</td>
</tr>
<tr>
<td>4. Validation of the end users’ viewpoint</td>
<td>Tourism university students</td>
<td>23</td>
<td>Testing by snowshoeing and hiking in the park</td>
</tr>
<tr>
<td>5. Validation of the expert users’ viewpoint</td>
<td>Tourism experts</td>
<td>5</td>
<td>Testing by kayaking on the Baltic Sea</td>
</tr>
<tr>
<td>6. Validation of business scaling in the selected business segment</td>
<td>Customers and staff of tourism companies</td>
<td>50</td>
<td>Interaction at tourism centres and testing by kayaking on the sea and river</td>
</tr>
</tbody>
</table>
The study qualitatively analysed and applied the UCD principle to design a mobile web application for the outdoor tourism business. Outdoor tourism activities set special requirements for mobile application design processes. Therefore, in this paper we aim to reveal empirically the nature of the mobile application design process where several stakeholders are involved in the design and assessment actions throughout. The design process included tasks such as identifying requirements and creating mock-ups and prototypes with several iterative phases that utilize UCD principles.
5 DESIGNING and ASSESSING the MOBILE CONCEPT
5.1 Business Case and Concept Definition
The development team conducted a short study (n=6) to investigate the possibilities and the benefits of the mobile application for small tourism companies. The chief executive officer (CEO) of a tourism company, the CEO of a mobile software company, three tourism university lecturers and the director of a tourism education department participated in the survey and shared their insights on the possibilities for, and needs of an outdoor mobile guidance service. The university lecturers also work closely with several tourism companies, and therefore they have extensive knowledge of the possibilities and challenges for mobile services for tourism companies and their customers. The study mainly focused on revealing the possibilities for mobile services in this sector, and the tourism companies’ expectations of, and business needs from, mobile-based services. By analysing the data the development team identified that location-based mobile services should include route information, a geo-location guide, maps suitable for outdoor
activities, points of interest, a link to the tourism company’s e-commerce site, general and safety information and feedback functions. This was valuable information for our designers, and enabled them to propose an initial concept to the first partner companies. The designers themselves had prior experience of outdoor activities such as kayaking, camping and hiking, which helped greatly in coming up with additional features.
Figure 1. Example of the first conceptual plan and navigation structure.
Figure 2. Example of drafts of the mobile guide’s graphical user interface design.
The proposed features were shared with companies, and their feedback was collected and analysed. On receiving the companies’ confirmations based on the proposed features, we investigated the technological feasibility with programmers and software developers. Thus, the initial concept for a mobile web application to guide and inform kayakers and other outdoors tourists was designed and drafted (Figures 1 and 2).
5.2 Validation of the Technological Solutions
The first paper mock-ups and the proposed concept plans were assessed by the partner tourism company, which represented the voice of the customer at the beginning of the development period. After the partner companies confirmed the concept, the development team began implementation of the application prototype. The application prototype development was based on Vaadin [16], which is an open source rich internet application framework that consists of a server-side programming model and client-side development tools based on the Google Web Toolkit and HTML5. Moreover, it includes a server-side solution where the majority of the logic runs. Additionally, Ajax technology is used on the browser side to ensure interactivity and a better user experience, whereas on the client side Vaadin mainly utilizes the Google Web Toolkit, which is also used for rendering the resulting web pages. The framework is based on event-based programming and provides widgets and add-ons for helping developers to design and build faster and richer web applications and solutions.
Figure 3. Screenshots of the mobile guide application built on the Vaadin Java framework.
The Vaadin framework provides ready-made user interface elements, and therefore the visual outlook of the user interface was based on the
Vaadin framework’s visual themes. Figure 3 shows screen shots of the mobile guide application prototype. The mobile application is compatible with iOS and Android devices, and the content is created, updated and managed by the external content editor, which was also built for this research project.
The software developers (n=2) themselves first assessed the functions of the prototype at the street level, as geo-location features require outdoor testing. They focused on assessing the software code related to the digital maps, navigation, digital compass and global positioning system (GPS). After the functionality tests on the street, the first field testing involved the designers using the mobile guide prototype in a target environment while kayaking on the Baltic Sea. This field test threw up new issues, which caused the development team to reconsider the design. First, the mobile device had to be in a plastic waterproof bag while kayaking, and this caused significant challenges for the user interface designers. Although this was recognized beforehand to some extent, the bright sunshine and glinting water caused considerable visibility challenges in the real usage environment. Therefore, the user interface designers were required to redesign, especially the visibility and size of the buttons and other navigation graphical user interface elements. In addition, the strength of the users’ internet connection varied while on the sea, and while it was strong enough in places, in others it weakened significantly. Furthermore, the kayakers were often sitting near the sea’s surface, and the bobbing of the water and breaking of waves also disturbed the internet connection. However, the GPS signal was excellent on the sea as unlike in cities there are no physical obstacles. The unstable or variable internet connection on the sea and archipelago surprised the development team, and caused further changes to the initial development plan. For example, the development team re-prioritized the requirements and dropped the features that required continual online access and made changes to the logic for the navigation software component. Before making these changes, the application prototype crashed every time during testing.
In the first plan, the development team aimed to include online route tracking and automatic saving in the service, but this feature had to be removed from the first release, despite it being flagged as useful by the tourism companies. It would have enabled the companies to record the kayaking routes of their customers and later allowed them to identify the most popular routes. It was decided to keep this feature in the product roadmap, with a view to reconsideration in the future.
5.3 Validation of the Business Viewpoint
The development proceeded to the second prototype, the so-called Alfa version, which implemented roughly 70% of the requirements and was ready for testing by a small group of potential customer companies. The steering group members of this project represented the potential customer tourism companies. Two group members were company representatives, one member worked as the CEO of an outdoor tourism company and another member was marketing director of a company specializing in location intelligence and digital map services. Therefore, they each had valuable viewpoints on the business perspective of the application. Overall, the testing group consisted of four steering group members and two developers (n=6).
The steering group members conducted the field testing on the archipelago of East Helsinki in Finland. They each had their own smartphones in plastic bags and the mobile web application was running in the background of their phones. The test group kayaked the Kalliosaaren kierto route with the help of the application guide. The test trip took five hours, during which we conducted semi-structured group interview to collect feedback on the users’ experience and the development needs.
The overall analysis of the user feedback revealed that company representatives, i.e. business experts, were satisfied with the application. They expressed positive attitudes towards the concept and their experience of it as it helped them to navigate the archipelago and displayed the selected route information. The field test revealed that the application displayed the current kayaking speed and accurately calculated the distance to the next turning point and the final destination. If a kayaker turned away from the route, the navigation arrow turned red; when the direction was corrected, the
arrow returned to green, digitally guiding the kayaker back to the correct route.
The users also emphasized that they felt secure and safe by having the application at hand. Moreover, the field test demonstrated the potential business benefits for tourism companies. They stated that the mobile guide “would work as a digital tourism guide that helps companies to scale business”, which also indicates that better services could result in increased customer satisfaction. In general, the application received positive feedback, especially the user interface design that it was said supported use in challenging environments without any major technical or usability problems. The testers naturally suggested some new features, such as a warning mechanism, or at least better information about ferry routes, and a “home button” that would guide the kayaker directly back to the home harbour.
This testing round also raised some additional development requirements. For example, each route included several turning points, which can be defined on the editor site while creating a new route on the system. If a kayaker went 50 metres past a turning point the navigation compass began to point backwards, towards the turning point, unless the kayaker clicked the next-point button. Instructive information was required to do this, but smartphones’ small screens do not allow the space to accommodate this extra information, and we found that only pop-up windows would be user friendly. The development team had already recognized this challenge, but no simple solutions were forthcoming; the software cannot know if you are lost and going in the wrong direction or whether you have passed the turning point but actually headed in the right direction. The solution requires more intelligence, and this need was added to the further development roadmap.
5.4 Validation of the End Users’ Viewpoint
The tourism company Natura Viva arranged the field tests several months later, where (n= 17) tourism university students used the application while snowshoeing different routes on sea ice. The testers represented authentic end users as the tourism students were active in outdoor activities and many had specific experiences in the activities for which the application was designed. Feedback was collected after testing using a structured questionnaire that included closed and open-ended questions. Thirteen of the seventeen testers returned the feedback forms; seven used an iPhone and six an Android device. Eleven of the thirteen used mobile applications daily or at least a few times a week. Similar tests were carried out two months later with a group of tourism students (n=6) that used the mobile guide application in navigating a route on the paths of a city park. Figure 4 presents a picture of the users’ field test.
Figure 4. Tourism university students preparing to test the concept by snowshoeing a route guided by the prototype application.
The overall feedback we received was positive and optimistic. For example, typical replies to the question “Good things in this navigation application?” included: “very clear with coloured arrows pointing to the right direction”; “it’s simple and easy to follow”; “easy to see where you are, the direction and generally easy to use once it’s started”; and “it’s really fun and easy and doesn’t require any special skills to use it”. The testers, however, raised development issues, such as: “could add some sounds/feedback, and block the screensaver”; “the arrow was kind of restless at times, it was difficult to follow...”; “the current location should be in the centre...”; and “sometimes the arrow was slow and showed green for every direction”. These comments indicated that navigation was the key feature on which users focused, meaning it ought to work perfectly. The testers requested additional features, such as: “a person telling you where to go, so you don’t have to look at your phone constantly”; “voice feedback”;
276
“amount of steps taken and a voice telling you where to go”; and “info about the things to see around you”. Some testers thought the application already had everything they would need, stating: “Nothing. If I need a navigation app, navigation features are all I need”; and “I can’t think of any. Everything I needed was there”. The testers did not raise the requirement for radical changes or any compulsory need for new features. As the previous examples demonstrate, the users believed that the basic features would work well, and that any new features would improve and simplify the user experience, but only when the basic functions worked properly. The basic features are the foundation for the use of more advanced features, i.e. secondary features, for use only if the basic features of the application bring value. The basic features are essentially a doorway to the use of mobile applications.
5.5 Validation of the Expert Users’ Viewpoint
The design team arranged a two-day testing trip where tourism experts (n=5) used the mobile application while kayaking three routes on the Helsinki archipelago of the Baltic Sea (Figure 5). The experts were a German navigation consultant and trainer with vast experience of outdoor tourism, wellness activities and associated navigation devices; two tourism experts (a lecturer and a project manager) from the tourism education department of a university; the CEO of an outdoor tourism company that offers services, e.g. kayaking, for tourists in the Helsinki area; and the project manager of this research and development project. Three of them had prior experiences about the application from the previous testing iterations. All participants used the mobile guide application while kayaking pre-defined routes during the trip.
Data related to the usage experiences and development proposals was collected from the semi-structured group interview conducted during the test trip while participants were using the mobile guide. The framework of interview themes concerned the benefits for users, development needs, technical problems and the business potential in European markets. Overall, the user experience was positive, although a bug in the iOS 6.0 operating system blocked the automatic location feature in the Safari browser. The automatic location information worked for a spell and then stopped, and the application had to be restarted.

The test users raised significant benefits related to safety and entertainment aspects. The mobile guide provided users with the feeling of safety as it displayed their current location, speed and distance information related to the route. There was no similar application known to exist in European markets that allowed tourism companies to edit and manage content and with a user interface designed for use during outdoor activities, particularly kayaking and hiking.
5.6 Validation of Business Scaling in the Selected Business Segment
The design team arranged (n=4) company pilots after the application concept and functional prototype were readied for performance and usability assessments. Three company pilots were arranged with tourism companies that largely offer their customers kayaking, canoeing and hiking services on the sea or in river areas. The fourth pilot involved a hotel in Lapland, Finland, that targeted hikers.
All of the companies received their own application’s internet address or uniform resource locator (URL) with their own content including points of interest, route content and instructions. The development team also printed a poster that invited customers to test the mobile guide application. The end users were personally met at the tourism premises, except the hotel guests were
planned to leave their written feedback in the hotel reception; due to its distance from Helsinki as face-to-face meetings were not feasible.
The design team conducted end user interactions with approximately 50 customers and staff members (n=50) in three different kayaking or paddling centres. Most end user interactions took place on land while users were preparing for their kayaking trip. In the first phase of end user interaction, the interview mainly focused on the interest of the new proposed mobile guide service, and on the second phase, if the potential end users interested to review or test the application, the user experience and technical functionalities.
In addition to the feedback documenting, data was also collected from the unstructured feedback function of the application. The users had possibility to give written feedback to the designers through the feedback functions of the application. Examples of the user experience and technical feedback received were: “Does not work in HTC Sensation Z710e phone, application does not recognize the phone, e.g. Google Map works fine in the same phone and it has been tested over one day and the application started several times”; and “The application basically works, but minor failures disturb usage”. The users also gave practical ideas for how to improve the application, such as: “The navigation arrow could be larger...”; “...you could add a new feature that helps users: A touch and a user indicator (blue point) on the screen would change visible point instead of target point... ...the blue point would appear “wave” element...”; and “The approaching colours (green/red triangle) would change in the app. based on a 90-degree sector. Could you also do this for 45-degree sectors?”
In this testing iteration, the design team also shared the mobile guide prototype with the staff of tourism companies and discussed with them their expectations and requirements. The staff of tourism companies were eager to know how their customers accepted the mobile guide application, and what kinds of benefits it could offer their business. All of them believed that mobile services would form an essential part of the tourism business in the future, with the “automated guide” and “digital tourism guide” seen as the mobile applications with most potential in outdoor activities, given that their customers expect guide and route information. Investing in this would probably increase customer satisfaction and encourage fearful kayakers or hikers to purchase an outdoor trip or rent a kayak. However, the managers’ open question remains: How will they earn a return on their investment if the mobile service is offered free of charge?
5.7 Limitations of the Pilot Testing
On starting prototype testing at the tourism company premises in the sixth iteration, we expected to encounter eager users because almost all of them were interested in the mobile guide application at the previous phases. However, the results of assessing the prototype indicated that only a few of the end users pro-actively downloaded or scanned the quick response (QR) code while participating in activities such as kayaking or hiking. Most of the end users who tested the application and provided feedback were asked to use the application, but few downloaded it voluntarily. The steps involved in testing consisted of loading or scanning the application to the browser, learning the key features, using it during the trip and reporting their experiences.
Kayaking is an activity that requires you to concentrate to maintain balance, which means if you are not careful there is a risk of damaging or wetting your own mobile phone while getting familiar with the functions of the mobile application. That is reasonable justification for why many kayakers did not try to use the new application while focusing on their kayaking. As a result, several users liked to hear and speak about the application, but were hesitant to try it in practice. The most active and enthusiastic users were those with some experience of mobile navigation devices; therefore, we call this group of users the early adapters and expert users. Often, the early adapters are interested in new technologies in their field of interest.
Despite the problems discussed above, during the prototype tests in the field environment the design team did collect enough feedback from the active potential users who voluntarily tested the application. In addition, conducting interviews with potential users provided us with substantial relevant feedback and increased our understanding of users’
expectations and priorities. However, it is important to mention that the hotel prototype testing was not as successful as the other company pilot tests because the design team failed to conduct face-to-face interviews with the target users. As a result, we gathered little relevant feedback from the hotel-based end users. It is difficult to estimate how many of them actually tested the application, but few returned the feedback form. Nevertheless, the post-interview with the hotel manager was beneficial and provided new insights regarding the application. The timing of the prototype testing was not optimal as most users hike in the autumn. In addition, most customer at the hotel were retirees and thus not the most eager testers of new mobile applications. Furthermore, the design team could not remotely support and motive potential end users in testing the mobile service in practice. The design team learned that visitors to hotels are not spontaneous or proactive testers, and instead need to be tempted or convinced to test through motivating or helping them in some way. Similar findings emerged from other piloting premises; the users needed to be motivated to provide formal feedback.
6 DISCUSSION
6.1 The Key Roles of Stakeholders in Mobile Service Design
The UCD process should adopt a broad perspective and not focus only on the end users if a successful new service is to be produced. Instead, software and ICT services must satisfy other stakeholders, such as business managers, the software team, project management and, in many cases, the project financiers and providers [39, 40, 41]. It is essential to begin the iterative development process by identifying the most significant stakeholders and their impact on the overall application in UCD. This novel application design and development is a result of merging tourism business offerings with existing technological possibilities. Co-operation and interaction between software developers, project management, tourism experts, tourism companies and their potential customers provided useful resources for defining specification requirements and realizing the expected user interface design. Conducting various prototyping phases, continuous testing events with target users of the application and several interview sessions with tourism companies’ management have provided us with a great deal of valuable data. The analysis of the gathered data in this case has helped significantly in defining and prioritizing features and identifying the potential stakeholders’ demands. Figure 6 demonstrates the three key stakeholders groups involved in the application concept design process.

This paper emphasizes the importance of involving the most significant stakeholders in the design process, not only at the beginning of the project but throughout every phase of the design and development work. The diversified interaction and collaboration with end users and potential customer companies helped us to focus on the most important and requested features and to continually prioritize the product backlog, the development plan and the product roadmap.
6.2 The Main Contributions of Each Stakeholder
This paper emphasizes that working in a cross-disciplinary team ensures a broader viewpoint during the design process. The users’ involvement may affect the specification requirements from the elicitation phase onwards, right up to the prototype evaluation phase. Therefore, as Figure 7 shows, using the mobile guide application can be categorized into three main phases: the end users
use it before their trip mainly for route planning; during their trip it is mainly for navigation and information about the route and points of interest; and after their trip they can share their experiences. The system administrator takes care of the configuration and the tourism companies’ personnel manage the route and information about points of interest.
The business benefits, such as more revenue, increased customer satisfaction and differentiation from competitors, whereas their customers or end users are expecting direct benefits for their outdoor activities, such as automatic route guides and more relevant information.
### 6.3 User Involvement in Application Design
The management of pilot companies naturally considered the mobile services in a broader context than that considered by the end users. Therefore, there were several “layers” of interest within the development of the mobile service. The UCD principle ensures that all stakeholders’ interests are taken into account, and several users have various roles in the digitalized service process. The prototype testing results indicated that the mobile service precipitated unanticipated and new added value for small tourism firms and their customers.
This project found that end users place significant value on the core features that offer them most benefits and satisfaction. However, these features have to be implemented in a proper way, one that results in an excellent user experience. The lack of proper functionalities or a poor design process results in a service failure from a usability point of view. Hence, the developers should recognize those features and their nuances as quickly as possible and prioritize their development effort by focusing on them. This can be achieved using rapid development methods and a mobile web development framework, and its add-ons, to shorten the lead-time of software product development cycles.
Mobile applications need to attract and attach themselves to the users emotionally to achieve sustainable usage [42, 43]. This is even more important in outdoor navigation applications, where the user not only has to emotionally attach to the application, they must also trust and feel secure with the application, a point raised during the field testing phase of this application. This is particularly important because the target users of the applications are often kayakers, joggers, cyclists or hikers who are alone or in a small group in rugged or rural areas. Moreover, the mobile service usage and functionalities should be easy to load and fast to navigate, and the functioning logics should
follow the user’s mental models, i.e. their prior assumptions.
The field tests revealed that the end users and companies’ staff members were satisfied with the overall concept and the application, and viewed it as a useful service that would provide added value for outdoor activities. Interestingly, many active kayakers used waterproof mobile devices and had downloaded some form of tracking application to their devices. The end users liked the information offered by the application, such as points of interest, guided route information, speeds and the distance to the destination. For instance, some users liked to see how fast they were able to kayak on a particular route or how far away the destination was, or to be able to locate the nearest point of interest.
6.4 Managing the User-centred Design Process
The involvement of several stakeholders in the design process requires careful project planning and management, as each stakeholder reviews the concept, requirements and user experience from a different perspective. As Ries [25] states, it is easier to build known software products for established markets than to innovate unknown product concepts for emerging early markets. Therefore, to optimize the use of resources and shorten the time-to-market, it is recommended to plan carefully which features could be tested using scenarios, screenshots, sketches and other mock-ups without the need for coding the application itself.
Table 2 shows six design iterations that involve different stakeholders and validation targets and content. The testing of usability and the researching of business benefits involve different development goals, although they can be tested using the same prototypes. This study shows that all six of these iterations bring value to the mobile service design process and have a unique role in the process. It is also recommended to involve all stakeholders in the design process as early as possible, as they will affect its success on launching the mobile service in any case.
<table>
<thead>
<tr>
<th>Design iterations</th>
<th>Stakeholders and target of design iteration</th>
</tr>
</thead>
<tbody>
<tr>
<td>1. Business case and concept definition</td>
<td>Business owners: Produce initial concept plan and visual drafts to create a common understanding of the goals</td>
</tr>
<tr>
<td>2. Validation of the technological solutions</td>
<td>Software developers: Validate technological solutions by testing functional prototypes in the real usage context</td>
</tr>
<tr>
<td>3. Validation of the business viewpoint</td>
<td>Company representatives: Validate the business viewpoint by assessing the concept using prototypes</td>
</tr>
<tr>
<td>4. Validation of the end users’ viewpoint</td>
<td>Potential end users: Validate the end users’ viewpoint by assessing the concept using prototypes</td>
</tr>
<tr>
<td>5. Validation of the expert users’ viewpoint</td>
<td>Domain experts: Validate the marketing, sales and service viewpoints by assessing the concept using prototypes</td>
</tr>
<tr>
<td>6. Validation of the business scaling, delivery and service needs</td>
<td>Company pilots: Validate the scalability, delivery and service needs by piloting mobile services with real potential customer companies</td>
</tr>
</tbody>
</table>
This study points out that usability issues are only part of successful product design. For example, the managers of tourism companies were most appreciative of business benefits, while the end users focused on the guidance and navigation benefits of the application. To obtain relevant feedback in field testing situations, interviews and performance assessments in real usage environments should focus on emotion- and motivation-related issues, as they have a direct impact on the financial success and scaling of a new mobile service.
7 CONCLUSIONS and FUTURE WORKS
This case study, accomplished using an action research strategy, contributes to software development methods by describing an experiment in which users were involved in the mobile service design through six different design and assessment iterations. Each of the iterations had its own role and specific stakeholder group, and each helped the design team to gain rich and versatile user experience data. As a UCD method it worked
logically, and each phase synchronized with the others.
The study revealed that iterative UCD increases significantly the number of stakeholder touch points, and therefore helps to integrate business and user needs with new technological possibilities. We have also shown that the involvement of various stakeholders in new mobile service development ensures better integration of business expectations, mobile web technologies and user experiences. Hence, the UCD approach should adopt a broad perspective, not only the viewpoint of end users, as the successful new mobile service must satisfy business managers, end users, software developers, project management and, in many cases, the project financiers.
The latest cloud computing, mobile technology advancements and UCD methods have created a unique opportunity to boost digital services, especially in the small business market segment. Hence, this requires fresh research on how such new digital services can be designed and developed more quickly by involving various stakeholder groups in an iterative design process.
ACKNOWLEDGEMENT
This work was supported by the DIGILE’s Digital Services research program funded by TEKES. We also wish to thank partner companies for their helpful contributions and support in this project.
REFERENCES
|
{"Source-Url": "http://sdiwc.net/digital-library/request.php?article=daff130a2c8fd7cd8109ba6931458574", "len_cl100k_base": 8818, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 63338, "total-output-tokens": 11558, "length": "2e13", "weborganizer": {"__label__adult": 0.0006585121154785156, "__label__art_design": 0.0016431808471679688, "__label__crime_law": 0.00046133995056152344, "__label__education_jobs": 0.00745391845703125, "__label__entertainment": 0.00014603137969970703, "__label__fashion_beauty": 0.0003409385681152344, "__label__finance_business": 0.001903533935546875, "__label__food_dining": 0.0008697509765625, "__label__games": 0.0017366409301757812, "__label__hardware": 0.0013227462768554688, "__label__health": 0.0010585784912109375, "__label__history": 0.0011539459228515625, "__label__home_hobbies": 0.00012230873107910156, "__label__industrial": 0.0005693435668945312, "__label__literature": 0.0005097389221191406, "__label__politics": 0.0004992485046386719, "__label__religion": 0.0006117820739746094, "__label__science_tech": 0.0239410400390625, "__label__social_life": 0.0001270771026611328, "__label__software": 0.01271820068359375, "__label__software_dev": 0.93798828125, "__label__sports_fitness": 0.0005421638488769531, "__label__transportation": 0.0013217926025390625, "__label__travel": 0.0022792816162109375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 54547, 0.03546]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 54547, 0.06526]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 54547, 0.9378]], "google_gemma-3-12b-it_contains_pii": [[0, 3718, false], [3718, 8038, null], [8038, 12645, null], [12645, 16523, null], [16523, 18871, null], [18871, 23454, null], [23454, 27440, null], [27440, 31238, null], [31238, 35846, null], [35846, 39515, null], [39515, 42140, null], [42140, 46285, null], [46285, 50090, null], [50090, 53932, null], [53932, 54547, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3718, true], [3718, 8038, null], [8038, 12645, null], [12645, 16523, null], [16523, 18871, null], [18871, 23454, null], [23454, 27440, null], [27440, 31238, null], [31238, 35846, null], [35846, 39515, null], [39515, 42140, null], [42140, 46285, null], [46285, 50090, null], [50090, 53932, null], [53932, 54547, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 54547, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 54547, null]], "pdf_page_numbers": [[0, 3718, 1], [3718, 8038, 2], [8038, 12645, 3], [12645, 16523, 4], [16523, 18871, 5], [18871, 23454, 6], [23454, 27440, 7], [27440, 31238, 8], [31238, 35846, 9], [35846, 39515, 10], [39515, 42140, 11], [42140, 46285, 12], [46285, 50090, 13], [50090, 53932, 14], [53932, 54547, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 54547, 0.09697]]}
|
olmocr_science_pdfs
|
2024-12-11
|
2024-12-11
|
f1e4b017dc374af2b657f528fc66882df1bf8aa5
|
Ville-Pekka Peltonen
Software Asset Management
Current state and use cases
Helsinki Metropolia University of Applied Sciences
Master of Business Administration
Master's Degree Programme in Business Informatics
Thesis
24.4.2015
Abstract
The purpose of this study was to analyse the current state of asset management at a major telecom operator in Finland in order to create competitive edge out of software asset management for the company. The aim was to use the results of the current state analysis to improve the target company’s software asset management.
The study was carried out as qualitative research with interview workshops. This was done in order to get a clear picture of what the organisation has. The participants for these workshops were carefully chosen so that the entire company was represented in the workshops.
The results from these workshops showed that a lot was already done regarding the software asset management in the company but the work was isolated and needed to be organised. There was a lot of room for improvement in software asset management. The most critical issue was the lack of proper tools for handling license masses in the company. Therefore, the improvement task in the present study was to create use cases that could be utilized as a communication tool illustrating the wanted outcome from software asset management and also to explain to other parties what should be done and why to improve the performance of the company in this area.
The author proposes that the company should start a programme to build the capabilities required to have effective software asset management in place in the company.
Keywords
Software asset management, Current State Analysis, Use Case
1 Introduction
This paper is done as part of my MBA studies at Metropolia University of Applied Sciences. It illustrates improvement project that was done to the Software Asset Management (SAM) area in order to improve SAM and achieve better understanding on risks involved in this area. SAM will be introduced in the second chapter of this paper.
Project is now finalized. The scope changed from original due to the re-organization during the project. The constant organizational changes in the case company make project work quite challenging since project resources can vanish from a project literally overnight.
During the project it had two different project managers who mainly concentrated steering and handling project governance issues. The core group in the project work was I and my colleague Merja Sorsa, neither of us did not have too deep knowledge of SAM. As Merja had started working in license management team just two months earlier and my normal work was not around License management area. We were fortunate to have senior expertise in our team in the license management team to point us to the right direction in the beginning. This saved us plenty of time at the beginning of the project.
Project consisted of two phases which were quite separate activities. First step was current state analysis that was conducted as an interview of stakeholder groups that are involved in different phases in SAM. We selected workshops to be the structure for these interviews in order to have rich discussions over the topics that were in a workshop in question. By dividing the workshops under separate topics helped us to keep the focus on that particular workshop in question because people have often tendencies to extend discussion outside the agreed topic in order by explaining problems they face. Those kinds of discussions could be easily then directed to appropriate workshop to keep the discussion sharp and to get to the point.
Second phase was improvement activity concentrated on improving management tools, mainly reconciliation and contract management tool enabling SAM to work in more agile way. Furthermore one of the main goals of the whole project was to enable SAM to respond to the coming pressure from outside the organization in a form of
software audits. By a proper SAM we could mitigate the risks and minimize the costs coming from those software audits. In addition to that it was obvious also to find tools that have the capability to optimize licenses. In audits software vendors investigate in co-operation with organization that they have the right amount of licenses compared into the amount of installations they have. The paid invoice is often needed as an evidence for user right verification. Motivation for vendors is purely financial; they want to be sure that customers have paid for what they are using. For customers the motivation is also financial as you don’t want to pay for something that you don’t even use.
The original intention of the project was to implement a tool for SAM purposes but in the early phase of the project of the setup it was understood by the project steering group that we had to narrow the scope to be able to deliver the current state analysis about the capabilities of organization. Reasoning for this was that we had to find out how the things are in real world instead of making assumptions. The work based on assumptions usually will actually do more harm than good.
We also decided to investigate how current tools in organization could support SAM activities during the whole software lifecycle. This would prevent possible investments to yet another IT tool before we were even sure that the need could have been covered within the existing tool. Not investing to new tool means that the money can be used to something else in the organization. This intention was also reduced later to be the definition work for use-cases of the SAM tool as we encountered problems with current tools. Problems related to the information model that was not done properly when the tool was taken into use. At that time the tool was only considered to be used in a limited use and with limited information.
At the end of project we ended up with very good current state analysis and tool independent use cases for Software Asset Management. Use cases will be a real asset in the next phase of License management improvement work. Use cases will be tangible for any future projects showing particular needs for processes and tools for controlled and efficient Software Asset Management.
2 Software Asset Management
Software Asset Management (SAM) is part of IT asset management (ITAM). Gartner defines ITAM as framework and set of processes tracking and monitoring the financial, physical, licensing and contractual aspects of IT assets throughout their life cycle. (Snyder: 2012, p11)
IT assets cover all items that are used in organization for IT that has some kind of value. Assets can be tangible or intangible. Tangible assets include things like servers, storages and networks. So they are something that can be easily identified because they are physical assets. Intangible assets are usually harder to comprehend, because they are not something that can be calculated as easily as physical things. Licenses are intangible assets. Licenses produce right to use for certain software.
There are different kind of licensing models and depending on the licensing model one license can be used for instance whether for physical server installation purposes or to unlimited installations. In example case where servers has four Central Processing Units (CPU’s) the licence need for one installation is as many licenses as the server hardware has. Like in Oracle database case that would mean that four CPU server would actually need four Oracle Database Licenses for a one installation in the server in question. At the other end of spectrum for example Microsoft offers an Enterprise agreement where the agreement gives company possibility to utilize unlimited installations of agreed Microsoft products during the agreement period.
SAM is concentrating managing as name suggests Software that is used by organization. Software is always owned by its publisher License grants to user organization right to use for that particular software. Reason for introducing SAM to organization is primarily cost savings that enable organization to have competitive edge against competitor that lack such capability in their own organization. (Snyder, 2012, p1)
Organizations that have started SAM programs reports according survey, done by Gartner, that 40% of companies achieve more than 10% savings in the first year of and more than 50% see above 10% savings between years two and five. (Snyder, 2012, p1)
Majority of SAM programs reporting successful result for cost saving are totalling 92,3% of respondents. This high success rate implies that there is real benefit for com-
pany to be achieved by putting SAM at the focal point in organization especially when Software costs for IT companies can be in tens or even in hundred million euro range. This suggest that saving cost are most certainly something that must have CIO and CFO level attention so that they will be run throughout organization smoothly. (Snyder, 2012, p2)
<table>
<thead>
<tr>
<th>Degree of success</th>
<th>Total</th>
<th>Level 1 Maturity</th>
<th>Level 2 Maturity</th>
<th>Level 3 Maturity</th>
<th>Level 4 Maturity</th>
<th>Level 5 Maturity</th>
</tr>
</thead>
<tbody>
<tr>
<td>Total (number of respondents)</td>
<td>143</td>
<td>10</td>
<td>17</td>
<td>63</td>
<td>28</td>
<td>25</td>
</tr>
<tr>
<td>1. Not at all successful</td>
<td>2</td>
<td>-</td>
<td>-</td>
<td>2</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td>2.</td>
<td>1</td>
<td>-</td>
<td>-</td>
<td>1</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td>3.</td>
<td>8</td>
<td>3</td>
<td>2</td>
<td>3</td>
<td>-</td>
<td>-</td>
</tr>
<tr>
<td>4.</td>
<td>29</td>
<td>1</td>
<td>5</td>
<td>13</td>
<td>7</td>
<td>3</td>
</tr>
<tr>
<td>5.</td>
<td>43</td>
<td>4</td>
<td>3</td>
<td>21</td>
<td>8</td>
<td>7</td>
</tr>
<tr>
<td>6.</td>
<td>42</td>
<td>2</td>
<td>6</td>
<td>16</td>
<td>8</td>
<td>10</td>
</tr>
<tr>
<td>7. Extremely successful</td>
<td>18</td>
<td>-</td>
<td>1</td>
<td>7</td>
<td>5</td>
<td>5</td>
</tr>
</tbody>
</table>
Table 1: Cost Savings from ITAM
In table 1 above is shown results from Gartner survey done during 2011 among companies that have started ITAM programs. It illustrates how good organizations perceive their own success in ITAM programs. There is clear correlation between the level of maturity and cost savings. More mature the company is more likely it is to achieve saving for the company. Maturity model is explained in details below. (Snyder 2012, p5-6)
Maturity levels are described by Gartner:
Level 1: Ad hoc. ITAM exists but roles and processes are event- and transaction-driven. In this maturity level purchasing of licenses is done by individuals on current need without for example checking that someone might have free available license on their desk.
Level 2: Repeatable. ITAM roles and processes are defined at departmental level, but are not uniform across the organization. At this stage there might be agreement in the company that assistant might be ordering the Licenses, but the recording of assets is usually still at a low level.
Level 3: Defined. ITAM roles are defined, and processes are applied consistently across the organization. ITAM data is reliable and is starting to be used to guide tactical IT decisions. At this level usually there starts to be some kind of picture how many licenses are purchased by the organization.
Level 4: Quantitatively managed. ITAM responsibility and process integration extends into financial management and other adjacent areas. ITAM reporting is used by senior executives to manage cross-IT operational costs via reporting (for example, IT budget calculations, budget variance reconciliation and total cost of ownership).
Level 5: Optimizing. There is a proactive capacity to "sense" the business and its strategy, to look forward and plan future actions, and to support IT strategies (for example, cost modelling, IT service cost alignment and long-term IT planning). ITAM data is used at the most senior levels of the organization to influence and guide IT strategies (sourced vs. outsourced, for example, or cloud vs. on-premises). This is facilitated by proactive cross-organizational engagement (with, for example, strategic sourcing, and enterprise architecture and project and portfolio management).
Gartner has two maturity models that overlap in the area of ITAM. These maturity models are directed at differing audiences (IT operations and more centralized ITAM programs), but are generally consistent in the levels they use to measure how an ITAM program matures.
Main focus in SAM is to have compliant situation, where entitlement to use software meets the actual situation of installed and used software by the organization. Status of
a company can be in three stages when it comes to reconciliation of license status. These three statuses are compliant, over-licensed and under-licensed. (Barber, 2010, p5)
Compliance means that organization is exactly right licensed, which is extremely rare. At any time it is likely that there are purchase orders processed and installations waiting for the implementation. This will lead to a small margin of error with the exemption of legacy products that are no longer purchased or deployed. Therefore A small tolerance can be applied where the situation can be considered to be compliant. (Barber, 2010, p5)
Over-licensing is situation where organization utilizes fewer licenses than it actually owns. In this situation usually licenses are placed in pool where they can be utilized later if needed. In over-licensing situation is also organization must also think if retiring licences can bring savings in form of avoided support fees, of course this kind of decision must be done in that way that possible future needs for this particular license are also gone through so that organization can also avoid possible future purchase needs if some kind of usage is foreseen for future. (Barber, 2010, p5)
Under-licensing is the most dangerous situation for organization as it is breach of license conditions and terms. Furthermore it can jeopardize organizations relationship with licence vendor resulting hike for license prices. This result in internal audit should always trigger investigation and remediation of this situation so that the risk of losing both money and image of organization can be removed. (Barber, 2010, p5)
The Information utilized in SAM consists of entitlement data and inventory data. They both have good use in the SAM. In next chapter we will go through what is meant by entitlement and inventory.
2.1 Entitlement
The entitlement data that company has about the software licences such as contracts, license certificates, invoices, CDs, boxes and electronic license keys. Entitlement data shows and proof that organization has right to use these software’s and it also describes what constraints usage might have, like geographical restrictions or possibility to only utilize licenses in certain legal entity.
It is quite common that this information is quite scattered in organization and standards don’t support well SAM processes in the beginning. It is a task for SAM to influence other processes like purchase to pay (P2P) in that way the P2P will start to provide information that is needed in SAM processes later on in life cycle of software asset in organization.
2.2 Inventory
Inventory data is the data that shows to the company what kind of assets they have and what kind of configurations those assets have. The data is gathered from existing IT environment either manually or by any available inventory tool. In corporate environments the amount of data renders away the possibility for using manual methods. The changes to environment will happen more rapidly than it is possible to record the changes manually without having literally hundred people to keep records up to date.
There are many inventory tools in the markets today and finding the one that will suite your organizations needs and also can be used as evidence in the case of possible licence review is quite limited.
Next we will take a look on the case company. We will look the size of company and the impact that SAM can have on company.
3 Case Company
Company that this project is done is large multinational telecom-operator functioning in 17 countries. It has 25,000 employees and with 190 million subscriptions. Net sales of the company were 10,000 Million Euros in the year 2013.
The corporate consists on many independent companies that have different ways to handle their operations. From Group point of view aim is to have economies of scale for all where it is possible. In Software licence area this benefit comes from negotiation volumes in frame agreements between software vendors and case company. One will get better discounts when combining 17 companies needs compared to situation where each of those companies try to negotiate their own deals individually.
Also other big emerging reality is that software vendors are losing their revenue due to the downturn in global economic situation. Their solution to decreasing revenues is auditing customers in order to find possible installed software without proper licensing and then sell those licences to companies that have broken the license rules. Big software companies have said that they expect to have 30% of their revenue in future coming from this auditing stream. As IT budgets in companies are quite big spending and the software is the fastest growing segment in the IT budget. The effect from unwanted license costs can be really significant, even millions of euros.
If rough estimation of value of the licenses in scope is 450 Million Euros and yearly maintenance cost is around 20% of that value. It makes saving potential to be about 90 Million Euros in value and in yearly operative cost it would mean saving potential of 4,5 Million Euros in the year one with assumption of 10% saving. So no doubt this is worth of investigating.
4 Pre-study about present state
When the project was planned by the project team we quickly realized that SAM task were fragmented into several units or we have so many places in the organization where SAM tasks were done. So we decided that we had to make some kind of mapping how tasks were done in different parts of the company. After considerations we decided to have workshops in all those several areas we felt had impact on SAM.
We ended up by having a five separate workshops and prepared questions to participants forehand so that they could understand better what angle we wanted to have on SAM. Since responsibility for SAM is not managed centrally we did not have any common way of working with SAM on corporate level. We divided the workshops according to the model that shows the life cycle of license in our own organization. This model can be taken as general model that shows the license lifecycle in the organization. This is illustrated in picture1 below.
License Life-cycle
- Contract negotiation
- Requisition
- Purchase
- Deployment
- Upgrading / Maintenance
- Retire / RIP

Picture 1 License life-cycle shows the steps in the SAM
First step in life cycle is Contract negotiation; this is step where the organization makes agreement with vendor. Agreement covers for example volumes to be purchased, prices and other license terms that are valid between organization and vendor.
In the requisition phase organization notices that they have a need for additional license purchases. After checking if there is no existing licences the next step is the purchasing phase. In the case licenses exist the next step is deployment, which is explained later in this chapter.
In purchase step one does all the needed tasks to ensure that the organizations procurement process is followed and that all necessary information is recorded in all the needed systems. This means financial systems and SAM systems at a minimum level.
In Deployment step the actual software itself is deployed and all the required information is recorded in all the needed databases in order to make production work smoothly like a well-oiled machine.
In upgrading / Maintenance step there is checkpoint that at least every year all installation base of any particular software is assessed and then mirrored against purchases.
Intention is also to renew needed supports for software’s and terminate those which are not needed anymore by the organization.
Finals step in the lifecycle of a license is the retirement of license. In this step it is made sure that all installations of the software in question are removed. Contracts against vendors for maintenance also must be terminated. Also important task is to remove these assets from financial systems.
From these lifecycle steps we identified a need for five separate workshops in order to have some kind of manageability on topics we could cover with reasonable amount of people and at the same time keep the focus strictly on the issues that should have the focus in the workshop. The workshops are shown in picture 2. There was a kick off workshop where we invited all the relevant people in order to share our view of things to all parties in effective manner. The remaining four workshops are presented along with their results in next chapter.
<table>
<thead>
<tr>
<th>Kick off workshop</th>
</tr>
</thead>
<tbody>
<tr>
<td>Procurement workshop</td>
</tr>
<tr>
<td>Purchase and Contract management workshop</td>
</tr>
<tr>
<td>Software installation and inventory workshop</td>
</tr>
<tr>
<td>Configuration management workshop</td>
</tr>
</tbody>
</table>
Picture 2 workshop structure
For the workshops we identified the key persons who had the best possible knowledge of current situation from their point of view. So we had persons participating workshops who actually knew best what the reality in the company is. We decided to record all the meetings so we could listen recordings if we disagreed what the participants told us in the workshops. This proved to be good convention, we found out that already with in the research core team we sometime interpreted something that was said in different way. Recordings helped us to clear the right understanding to all team members. We also did separate observations notes during workshops trying to also capture the non-verbal messages that participants might have. Also after each work shop the core group gathered and we made conclusions about each workshop and adjusted next workshop to be better that previous one. Next we will go through all the workshops and their results.
4.1 Procurement workshop
The first workshop handled IT procurement, where there were two participants from IT procurement and one person from a financial IT system department. In the workshop IT procurement presented what role and responsibilities procurement has in Software Assets Management. Procurement is the function that negotiates all the corporate level frame agreements with vendors. Their strategy is to have global contracts negotiated in order to reduce price and also to get better terms and conditions by using economies of scale as a bargain power producing item.
We saw immediately that the negotiation phase is in a rather good shape and it creates competitive advantage to the whole group. From financial side we found out that there is place for improvements that were confirmed in the next workshop that we held and we are going to dive in to those issues in the next chapter.
4.2 Purchase and Contract management workshop
In the second workshop we had five participants. One of the participants was from the financial IT department. Three of them from production responsible for the license support. Also there was one participant with financial controller role.
There were improvement tasks that were already raised in the previous workshop and those were confirmed in this one. Issues were for instance that the lack of Software catalogue in the company, meaning that people were ordering wide variety of same software parallel not able to check already existing licenses making efficient SAM very challenging. One other issue was that the free text information to Purchase Orders (PO). There were not enough mandatory predefined fields to make the data accurate. While handling the information the purchase process produces has a high risk for errors. Using this information for any inventory usage would have to be manual work. In an enterprise situation this is very challenging since number of PO’s may be in thousands.
From the workshops came three conclusions. First was that there is a clear need to use catalogue based purchase with the most used licenses in order to have data quality that in that level could be utilized in SAM. Second conclusion was that centralizing
the purchases to one unit would give a lot of benefit the company. The third conclusion was that the cost reporting from licenses could be much more accurate than it is.
4.3 Software installation and inventory workshop
In the third workshop there were largest group of participants. Reason for this is that most important tasks for software license management are done at this phase. There were five persons who are license managers. Two persons were involved from configuration management database function and three application operations persons who are responsible for installing the software in to the production.
There were four main conclusions from this workshop. First the licenses outside Group It Infra were not centralized and therefore not in control that means that they have higher risk for noncompliance. Second that sufficient inventory information is getting better due to the system development that is already in progress. Third conclusion is that the definition of software installation identification from discovery data to actual license asset information in Configuration management database must be done for each software product. Fourth conclusion was License management will prevent illegal installations in future via software discovery.
4.4 Configuration management workshop
In the final workshop there were three participants, all of them were in management role for configuration management database. In configuration management area there is actually few different databases up and running due to the historical facts and working methods that are quite different depending on who is in driver seat so to say. As a conclusion from this workshop we found out that Contract, Software's and License assets are not considered as Configuration Items in The configuration management database.
4.5 Conclusions from workshops
As a conclusion from the workshops we found out following things:
1. There is a lot of data available in different IT systems related to the software license management. But the data is not integrated between the systems. Partly the same data is updated manually and automated in different systems.
2. Information model on top level is missing e.g. what can be found from where.
3. There are functionalities (modules) in the systems which are not implemented into use.
4. The data is not structured, a lot of free text is used and several data items are optional for the end user to fill in. This cause a loss of a benefit of the possibility to build up comprehensive information view of the data in systems.
5. The overall provocative observation is, that we have not used the IT systems on the way they are meant to be used and lack of discipline in the working methods have caused the present situation. The Business Requirements for the focus area have never required detailed, accurate and integrated information.
To our disappointment we realized that we did not have as fast track to set up system as we had thought earlier. These results prohibited us to gain the quick win that we originally thought that we could use existing system as building block for future. There was too much things missing. After carefully going the situation through in project steering group we decided to go on with Development project that we will go through next.
5 Development model
After we had built up the picture of present stage in the company we decided that we should aim to develop a common way of working in the company including processes and tools to support the way of working. Due to the strict time limit for the project we took a practical route and started with defining all use cases for tool that we would use as SAM tool in the company. This was done in order to communicate the need that tool had to fulfil in order to tackle SAM in most efficient way. After use cases were done we could then map if any of our existing tool could solve the issues that we had and we could make estimates of the costs that this kind of exercise would create in order to calculate the business case later on.
We had two possible candidate models how we could illustrate the task at hand. They were Rummler-Brache diagram also called swim lane diagram or RACI matrix. Next we
will look at both of those models and reasoning why we ended up with swim lane diagram. Both models are used frequently in the case company and because they are most familiar for employees in the company limited the models that we chose from to these two models.
5.1 RACI matrix
RACI matrix is quite common tool in project management to describe and track responsibilities on tasks that needs to be executed in the project, process or in company in general the name of this matrix come from Responsible, Accountable, Consulted and Informed. Responsible is the one who will do the work itself. Accountable is the responsible in case anything goes wrong in other words the person whose head will roll if things end up failing. Consulted is someone who has more knowledge on task at hand and can give advises if any problems occur. Informed party is someone who is depended for example on result or progress of task at hand.
In RACI matrix all tasks are listed in excel shape format and for all of them there must be at least Responsible and Accountable party. The benefit of this model is that it clearly shows who is responsible of doing what. This prevents quite efficiently the hazard of assumption, that the task was someone else problem and it makes sure that all tasks have someone who is responsible and someone who is accountable for task. Below is example of RACI matrix just to illustrate how matrix looks like.
<table>
<thead>
<tr>
<th>Task</th>
<th>Responsible</th>
<th>Accountable</th>
<th>Consulted</th>
<th>Informed</th>
</tr>
</thead>
<tbody>
<tr>
<td>Plan project</td>
<td>Project Manager</td>
<td>Sponsor</td>
<td>Subject matter experts</td>
<td>Stakeholders</td>
</tr>
<tr>
<td>Plan requirements</td>
<td>Business analyst</td>
<td>Project manager</td>
<td>Subject matter experts</td>
<td>Sponsor</td>
</tr>
<tr>
<td>Analyse requirements</td>
<td>Business analyst</td>
<td>Business analyst</td>
<td>Subject matter experts</td>
<td>Project manager</td>
</tr>
<tr>
<td>Testing</td>
<td>Quality Assurance</td>
<td>Project manager</td>
<td>Business analyst, Developers</td>
<td>Sponsor, Team</td>
</tr>
</tbody>
</table>
Table 2 example of RACI matrix
RACI matrix itself is not that important when it is finished the making of it is more significant because the building of matrix actually requires a lot of talk and once matrix is ready all parties that have tasks in the current project has clear vision what is expected from them enabling all parties to aim towards common goal. (Best Project Management Software Reviews 2014, accessed 13.11.2014)
Weakness of this model is that it is not very illustrative and this might possibly create delay to development. Therefore we picked also other model we wanted to compare and choose to use for description. That model is introduced next.
5.2 Rummler-Brache Diagram
The second model that is used to describe the use case it is classical swim lane diagram also known as Rummler-Brache Diagram. It was proposed by them in their book Improving Processes (1990) it allows one to follow Process flow between teams, departments and even between different processes.
Like other process diagramming techniques, with the Rummler-Brache method, you map processes linearly as a series of tasks across the page. Lines and arrows between tasks represent the flow of information, goods or work-in-progress, and also represent changes in responsibility.
The difference and the genius part of Rummler-Brache is the use of "swim lanes", horizontal rows. Think of a swim meet where each competitor has own lane to swim: In this diagramming method, each "swim lane" may belong to an individual, a team, a department, or any other organizational unit you choose.
These Diagrams are great tool for describing not only process flow but also the responsibility behind them giving out the high risks between handovers to other teams and departments included in particular process flow.
As describing method it also is useful as you can use ready described process as sub process in the new task and just point to that existing process reduces the complexity and change of re-inventing the wheel situation. In this situation you try to resolve something that is already resolved, this is problem since a problem usually has multitude ways that it can be resolved. But in process driven situation key for success is that output is always in same format so that the next part in process flow always get same kind of input for same kind of problem. That makes sure that they can concentrate on solving the underlying cause and not try to guess what the input really means this time. Below in picture 3 you can see an example of a Swim lane diagram. (Mind Tools, accessed 13.11.2014)
6 Development Project
We ended up using swim lane model since we saw that we needed tool for future discussion for bringing people on board what we wanted to achieve as end result for users. The proof if something works is in the results that the model can deliver to people that needs to be convinced that this work is necessary for company and worthwhile doing.
We discovered 11 different use cases that we described in that way that they were applicable despite what tool we would later select. This would also ensure that we don’t try to fix a problem by just introducing yet another tool.
Use cases are as listed below:
1. License enquiry
2. License purchase
3. Extend license support
4. License Support check
5. Use rights addition
6. License compliance check
7. Incompliance
8. Software decommission
9. Server decommission
We identified total of four roles that functions as swim lanes in our diagrams. We chose role as dividing factor because we wanted model be in depended from organizations so it will stand against renewal of organizations that are quite frequent all over. Also we saw that in this model is more guidelines how things should be handled opposed to building huge centralized “office” or other function. More beneficial for the organization would be that individuals who are working around SAM, would work with same kind of processes and procedures in order to grasp the global view on Software Assets in future.
Roles that were identified were; License User, License Manager, License Owner, Finance and CESAM. License User is role of person who is looking after system that is using licensed software. Usually these persons are engineers or other technical personnel in organization.
License manager is role for person who is looking after particular software or vendors licenses in the company. License manager is usually responsible for purchasing, allocating and retiring licenses. He or She also assists IT Procurement in the contract negotiations between organization and vendor.
The License Owner is role for the person who owns the budget for licenses. Usually this role goes hand in hand with role of ownership of IT systems that utilize software, but it is not mandatory demand.
The finance is role that takes care of receiving invoices and taking care that they are correct and put in to the appropriate cost center in order to keep track of financials in the organization. This is also important for profit calculations as licenses are also part of products cost structure.
CESAM or Central Software Asset Management is Corporate Group function that owns and develops Organizations SAM model. Their role is also to function as party that will handle software audit requests that vendors may have toward organization.
6.1 Use case License enquiry
First use case is License enquiry shown in picture 3 below. This is the most simple use case in SAM. In this case there are two roles involved License User and License Manager.
It describes the situation that License User identifies need to use software that needs license. It can be triggered by either new system or expansion of existing system. In both cases License User creates request to License Manager who checks from SAM tool if there is any available Use-rights that can be allocated to this particular need. If there are available use-rights then the answer is yes and the needed licenses are allocated to this new or expanded system.
Otherwise it will trigger sub-process called order new license / License Purchase. And after Purchase of licenses is complete the allocation of license is done. After these steps License User can install and start to use software that he needed to have.
The other use case we will go through is the sub process introduced in License enquiry process flow Order New license / License Purchase.
6.2 Use Case License Purchase
This Use case is triggered by need for purchasing new licenses to be used by the organization. Purchases are done against pre-negotiated contracts with vendors. License manager does the Purchase order in to the financial system with all the needed information in order to have good knowledge on what was purchased.
After order is done License Owner checks if He or She has budget that can be used to purchase this license or licenses. If the budget is there then License Owner will simply approve the Purchase Order. In the case that the budget is lacking funds for this purchase it is his/hers responsibility to extend the budget. If the budget cannot be extended the License Owner have to disapprove the Purchase Order. Disapproving means that needed license cannot be delivered. This means that License manager have to give really clear message to the License User that they should not in any case install the software. This would cause the organization breach the agreement with vendor. The extension of a budget should be really rare thing to happen as organization should have good forecasting in place that can predict this kind of need.
When License Owner approves the Purchase Order financial systems send the order to vendor. Vendor acknowledges the order and sends the goods ordered. Usually nothing tangible is today sent to customer besides the invoice. License Manager confirms the goods received in the financial system. This will activate the investment in financial system and in book keeping. Then License Manager will record obtained License or Licenses in to the SAM tool to License pool that represents this particular software. After all this it can be allocated for any particular use.
6.3 Extend License Support
This use case is triggered once a year when it is time to make decision if License support will be extended. This decision has financial impact on company; usually the fees are around 20% of purchase price of license so it has an impact of Total cost of ownership of Licenses. One does not want to pay support on licenses that are not in use or will not be used in future.
Use case starts with need to renew the support for licenses in question. The license manager will create Purchase order in to the financial system for right licenses and with proper amount of money. After that License owner makes sure that he or she has money reserved for this support in the budget. If that allocation does not exist then it is the responsibility of license owner to secure that it will be added to budget.
When in most cases the money exists already in budgets the license owner simply approves the purchase order and then License Manager Makes good received for this support extension. Then License manager also updates the new end date for this renewed license support in order it to be triggered again when this contract term is about to be end.
If owner cannot for some reason secure the money then purchase order will be rejected and the licenses end up without support. This might mean different things depend-
ing on contract terms. In some cases it means that company will not get technical sup-
port if something is wrong with software. In other cases it might mean that right to use
for software might be removed. For this reason in case support is removed it must be
gone through very carefully what this decisions impact will be.

Picture 6 Extend License support use case
6.4 License support check
Next use case is very simple but it is needed by License manager in order to check
where his/hers particular license stands regarding licence support status. In normal
situation all licenses should have support enabled. But in some cases support is re-
moved in order to optimize costs, one example is when system will be removed in near
future and the usage of system is very low and the support cost would be too high tak-
ing in to the count the risk to the business if there would be something wrong in the
system in question.
The use case is triggered by License Manager. He or She will check if the licenses in
question have support agreement and that it has been paid according the agreement.
This can be verified from financial system because it is kept up to date by License sup-
port extend use case. As a result from this use case License Manager has knowledge
whether licenses have or don’t have support.
6.5 Use rights addition
As licenses produces right to use for software it is needed that when licenses are purchased for the company. This use case is triggered as sub use case by License purchase use case in most of cases, we identified that also that use rights addition might be used without purchase for example in merger or acquisitions so we ended up describing it as its own use case.
It starts with need to extend or create license pool in the company with new licenses. As described earlier licenses have different mechanisms to produce right to use. This is the skills that license manager brings to the use case, License Manager creates rules what kind of right to use each license produces and then adds the proper amount of licenses in to the Software Asset Management system. After adding Licenses Manager checks that everything is ok in the system. If everything is ok then new licenses and their entitlement for usage is updated and everything is as it should be.
In the case that there is something wrong with the system License Manager inform Centralised Software Asset Management (CESAM) that there is something wrong in the system or in the way licenses have been added to the system. CESAM have more deep understanding about the SAM system so they will be the second line of support that have the needed resources to tackle harder cases that might occur in the SAM system and it is their responsibility to find the solution to the problem at hand and do the fix for it.
License compliance use case is triggered by review need that everything is good with any particular license or vendor. This can be triggered by internal needs like periodical internal check-up that everything is as it should be or by external request like license review by vendor in question.
As inventory data of installations is automated in CMDB and the other use cases for license management ensure that the data is up to date it is quite easy to make this comparison. License compliance check is done and according the result is made decision by system that either company is compliant or not as the result of this check. In case company is compliant results are presented as such. If there is situation of incompliancy it must be fixed that is presented as next use case. In the case of incompliancy CESAM and License Owner are informed about the situation so they are aware of this situation and can resolve incompliancy in that case that License Manager is not able to resolve it. But incompliancy must be always resolved one way or other so that company is not in incompliance state. How this can be done is shown in next use case Incompliance.
6.7 Incompliance
In the case on incompliancy the situation is simple company has too many installations compared to the rights to use produced by licenses that company has. There is two ways to correct this situation, either purchase more licenses or get rid of installations that are not needed. When one or both of these correcting actions are done as result there should be always compliance. Software decommission use case is introduced later.
Licence manager is responsible for making necessary Purchasing actions if needed according the purchase use case. CESAM and License Owner are kept in loop when incompliance cases are resolved because they have the need to be informed about these cases in the case that for example vendor wants to have review with company so there will not be any unpleasant surprises that might harm the image of company.
6.8 Software decommission
Software decommission will be triggered when need for software or system will end. For example, new financial system would mean that eventually old existing system will be shut down and all licenses that it used to use should be removed and made sure that also all installations must be removed also so that usage is really ended. This makes it sure that there cannot be any inconvenient situation where vendor wants to have licenses for software that is not in use although it might be installed on servers.
License user knows best when need for licence usage ends, therefore user makes the change request for change Manager that coordinates removal of software from server. Operational unit does the uninstallation for software and Change Manager will remove the installation from CMDB free up the license for other use. Change manager will also send notification to License Manager about the change so License Manager can follow what is happening inside that license pool.
6.9 Server decommission
Server decommission use case is very similar to software decommission use case. The only significant difference why we wanted to describe it as separate use case is that usually server decommissioning is handled and seen as separate process in the case company. Also in server decommissioning the is usually more than one software where the installation is removed, so it could be seen in that way that with in server decommissioning use case the software decommission case is run multiple times.
6.10 License decommission
License decommission use case is triggered for example if company decides that they will get rid of some software or system. In that case License Manager will investigate if there are any installations of software in question in the case company. If there is no installation then License owner can decide removing of licenses. If there is installations of software they must be removed before licenses can be decommissioned and if there is still real need for software the licence termination cannot be done because this would result incompliance state for company and that is not acceptable.
In some cases it makes sense to recycle the licenses inside the company for some other use then the licenses are kept for further use. If Licenses can be removed there is still decision to be made on will the licenses be terminated or should the licenses just be put on the shelve. When shelving licenses usually the support for license is terminated, when activating licenses vendor usually request an activation fee for license and also require that new support contract for licenses is ordered. Therefore sometimes make more sense to terminate whole license in order to avoid any future liabilities to that old license.

6.11 License optimization
License optimization is most advanced use case from the maturity point of view for SAM. It is a proactive use case where License manager utilizes more advanced features of Licence Management tools offer today. It can recommend alternative licensing models for installations that company has in order to minimize the costs that the installations create to the company. License Manager can optimize the costs the license create even when the vendors change their licencing models as they time to time do in order to maximize their own profits.
![License optimization diagram]
Picture 14 License optimization use case
7 Conclusions
This paper illustrated project that was run in case organization. It aimed to get current state analysis on the Software Asset Management. Then based on learnings from this analysis the project made use cases that could be used for SAM tool. They were done in that way that they could be used regardless which tool would be chosen or used later by organization.
Project was successful by delivering the current state analysis and use cases for SAM tool. Use cases are done in a way that they are not depended on tool itself, so they can be also utilized in case that in future we will look at other tools than those we already have in the case company.
The model that was shown above was done as the result of workshops that showed us the current state of software asset management. Originally the model had only seven use cases, but after reviewing it with whole CESAM group we ended up extending its present 11 cases so that we could cover the whole lifecycle of licence management.
Model was also presented to all who participated in to the workshops in order to get their view on the model. Some minor adjustments were made as result of those reviews. Model was approved to be the blue print for future development by Director of CESAM.
As the most important finding from this project was the clear need for improving processes, roles and responsibilities in the case company. Even there was found plenty of good things and people still there is very good value in pursuing the savings and efficiency by improving SAM activities.
8 Recommendations
My personal recommendation is that there should be a larger SAM improvement action in the company. As the saving potential suggests that it is sensible thing to do as long as the investment costs are kept in control. And improvement in this field also could be seen as asset in negotiations of future software agreements. Because the case company is more trust worthy for vendors than without these improvements in internal processes and tools. Further development would also mean faster responses to License reviews saving time, money and efforts of all parties involved.
References
Snyder, William R. (2010), *Identifying the Three Key Reasons for Tracking IT Assets, and Why IT Asset Managers Should Care*, Gartner G00208090
Snyder, William R., (2012), *Gartner Survey Shows How to Save Money with ITAM*, Gartner G00225114
Barber Victoria, (2010), *Best Practice for software Asset Management: Identify and Locate Entitlement Data*, Gartner G00174730
Barber V, et al, (2010), *Best Practice for Software Asset Management: Software inventory; Locating and Identifying Installed Software*, Gartner G002877
Best Project Management Software Reviews 2014, Understanding Responsibility Assignment Matrix (RACI Matrix), and [WWW document]
http://project-management.com/understanding-responsibility-assignment-matrix-raci-matrix/
Mindtools Ltd, Swim Lane Diagrams, [WWW document]
|
{"Source-Url": "http://www.theseus.fi/bitstream/handle/10024/90569/Peltonen_VillePekka.pdf?isAllowed=y&sequence=1", "len_cl100k_base": 10277, "olmocr-version": "0.1.50", "pdf-total-pages": 33, "total-fallback-pages": 0, "total-input-tokens": 67971, "total-output-tokens": 11348, "length": "2e13", "weborganizer": {"__label__adult": 0.0005006790161132812, "__label__art_design": 0.0008807182312011719, "__label__crime_law": 0.0010128021240234375, "__label__education_jobs": 0.02294921875, "__label__entertainment": 0.00022935867309570312, "__label__fashion_beauty": 0.00026488304138183594, "__label__finance_business": 0.035400390625, "__label__food_dining": 0.0005497932434082031, "__label__games": 0.0016012191772460938, "__label__hardware": 0.0008988380432128906, "__label__health": 0.00066375732421875, "__label__history": 0.00047898292541503906, "__label__home_hobbies": 0.0002620220184326172, "__label__industrial": 0.0006709098815917969, "__label__literature": 0.000591278076171875, "__label__politics": 0.0004198551177978515, "__label__religion": 0.0004096031188964844, "__label__science_tech": 0.017974853515625, "__label__social_life": 0.0002675056457519531, "__label__software": 0.174560546875, "__label__software_dev": 0.73779296875, "__label__sports_fitness": 0.0004122257232666016, "__label__transportation": 0.0006146430969238281, "__label__travel": 0.0004804134368896485}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 51656, 0.02668]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 51656, 0.13255]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 51656, 0.96264]], "google_gemma-3-12b-it_contains_pii": [[0, 230, false], [230, 1728, null], [1728, 1728, null], [1728, 4007, null], [4007, 6293, null], [6293, 8684, null], [8684, 10078, null], [10078, 12338, null], [12338, 14586, null], [14586, 16540, null], [16540, 18557, null], [18557, 19940, null], [19940, 22145, null], [22145, 24354, null], [24354, 26177, null], [26177, 28581, null], [28581, 31088, null], [31088, 33246, null], [33246, 34080, null], [34080, 36009, null], [36009, 37080, null], [37080, 38822, null], [38822, 40162, null], [40162, 41521, null], [41521, 43015, null], [43015, 44171, null], [44171, 45027, null], [45027, 46031, null], [46031, 46553, null], [46553, 47842, null], [47842, 48864, null], [48864, 50632, null], [50632, 51656, null]], "google_gemma-3-12b-it_is_public_document": [[0, 230, true], [230, 1728, null], [1728, 1728, null], [1728, 4007, null], [4007, 6293, null], [6293, 8684, null], [8684, 10078, null], [10078, 12338, null], [12338, 14586, null], [14586, 16540, null], [16540, 18557, null], [18557, 19940, null], [19940, 22145, null], [22145, 24354, null], [24354, 26177, null], [26177, 28581, null], [28581, 31088, null], [31088, 33246, null], [33246, 34080, null], [34080, 36009, null], [36009, 37080, null], [37080, 38822, null], [38822, 40162, null], [40162, 41521, null], [41521, 43015, null], [43015, 44171, null], [44171, 45027, null], [45027, 46031, null], [46031, 46553, null], [46553, 47842, null], [47842, 48864, null], [48864, 50632, null], [50632, 51656, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 51656, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 51656, null]], "pdf_page_numbers": [[0, 230, 1], [230, 1728, 2], [1728, 1728, 3], [1728, 4007, 4], [4007, 6293, 5], [6293, 8684, 6], [8684, 10078, 7], [10078, 12338, 8], [12338, 14586, 9], [14586, 16540, 10], [16540, 18557, 11], [18557, 19940, 12], [19940, 22145, 13], [22145, 24354, 14], [24354, 26177, 15], [26177, 28581, 16], [28581, 31088, 17], [31088, 33246, 18], [33246, 34080, 19], [34080, 36009, 20], [36009, 37080, 21], [37080, 38822, 22], [38822, 40162, 23], [40162, 41521, 24], [41521, 43015, 25], [43015, 44171, 26], [44171, 45027, 27], [45027, 46031, 28], [46031, 46553, 29], [46553, 47842, 30], [47842, 48864, 31], [48864, 50632, 32], [50632, 51656, 33]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 51656, 0.09442]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
1591aceff632a97a3536ebca6d254261c8d72395
|
[REMOVED]
|
{"Source-Url": "https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/20060020115.pdf", "len_cl100k_base": 9383, "olmocr-version": "0.1.53", "pdf-total-pages": 19, "total-fallback-pages": 0, "total-input-tokens": 49678, "total-output-tokens": 12147, "length": "2e13", "weborganizer": {"__label__adult": 0.0003664493560791016, "__label__art_design": 0.00032806396484375, "__label__crime_law": 0.00035309791564941406, "__label__education_jobs": 0.0007338523864746094, "__label__entertainment": 6.961822509765625e-05, "__label__fashion_beauty": 0.00017535686492919922, "__label__finance_business": 0.00020825862884521484, "__label__food_dining": 0.00034880638122558594, "__label__games": 0.0006136894226074219, "__label__hardware": 0.0009860992431640625, "__label__health": 0.0005326271057128906, "__label__history": 0.00024437904357910156, "__label__home_hobbies": 8.89897346496582e-05, "__label__industrial": 0.00046706199645996094, "__label__literature": 0.00028324127197265625, "__label__politics": 0.0003173351287841797, "__label__religion": 0.00049591064453125, "__label__science_tech": 0.0360107421875, "__label__social_life": 9.387731552124023e-05, "__label__software": 0.005718231201171875, "__label__software_dev": 0.9501953125, "__label__sports_fitness": 0.00030922889709472656, "__label__transportation": 0.0006771087646484375, "__label__travel": 0.00020968914031982425}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 48371, 0.02698]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 48371, 0.41117]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 48371, 0.89592]], "google_gemma-3-12b-it_contains_pii": [[0, 2755, false], [2755, 6124, null], [6124, 9111, null], [9111, 10781, null], [10781, 12630, null], [12630, 15828, null], [15828, 18793, null], [18793, 20861, null], [20861, 23423, null], [23423, 24897, null], [24897, 26673, null], [26673, 29268, null], [29268, 32677, null], [32677, 35005, null], [35005, 38194, null], [38194, 41454, null], [41454, 44186, null], [44186, 47487, null], [47487, 48371, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2755, true], [2755, 6124, null], [6124, 9111, null], [9111, 10781, null], [10781, 12630, null], [12630, 15828, null], [15828, 18793, null], [18793, 20861, null], [20861, 23423, null], [23423, 24897, null], [24897, 26673, null], [26673, 29268, null], [29268, 32677, null], [32677, 35005, null], [35005, 38194, null], [38194, 41454, null], [41454, 44186, null], [44186, 47487, null], [47487, 48371, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 48371, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 48371, null]], "pdf_page_numbers": [[0, 2755, 1], [2755, 6124, 2], [6124, 9111, 3], [9111, 10781, 4], [10781, 12630, 5], [12630, 15828, 6], [15828, 18793, 7], [18793, 20861, 8], [20861, 23423, 9], [23423, 24897, 10], [24897, 26673, 11], [26673, 29268, 12], [29268, 32677, 13], [32677, 35005, 14], [35005, 38194, 15], [38194, 41454, 16], [41454, 44186, 17], [44186, 47487, 18], [47487, 48371, 19]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 48371, 0.0186]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
563a97bb81fbc1ef3f875acdeb6a1ebe225ff7f7
|
Formal Analysis of Concurrent Java Systems
Peter H. Welch
Computing Laboratory
University of Kent at Canterbury
CT2 7NF, UK
Jeremy M.R. MARTIN
Oxford Supercomputing Centre
Wolfson Building
Parks Road
Oxford OX1 3QD, UK
Abstract. Java threads are synchronised through primitives based upon monitor concepts developed in the early 1970s. The semantics of Java’s primitives have only been presented in natural language – this paper remedies this with a simple and formal CSP model. In view of the difficulties encountered in reasoning about any non-trivial interactions between Java threads, being able to perform that reasoning in a formal context (where careless errors can be highlighted by mechanical checks) should be a considerable confidence boost. Further, automated model-checking tools can be used to root out dangerous states (such as deadlock and livelock), find overlooked race hazards and prove equivalence between algorithms (e.g. between optimised and unoptimised versions). A case study using the CSP model to prove the correctness of the JCSP and CTJ channel implementations (which are built using standard Java monitor synchronisation) is presented. In addition, the JCSP mechanism for ALTing (i.e. waiting for and, then, choosing between multiple events) is verified. Given the history of erroneous implementations of this key primitive, this is a considerable relief.
Keywords: Java, threads, monitor, CSP, JCSP, CTJ, formal verification.
1 Introduction
Java has a built-in concurrency model based upon threads and monitors. It is simple to understand but very hard to apply. Its methods scale badly with complexity. Almost all Java multi-threaded codes making direct use of these primitives that we have seen (including our own) have contained race hazards – with some of our own remaining undetected for over two years (although in daily use, with their source codes on the web and their algorithms presented without demur to several Java-literate audiences). Our failures only showed themselves when faster JITs (Just-In-Time compilers) enabled certain threads to trip the wrong way over unspotted race hazards, corrupting some internal state that (in due course) led to deadlock. Debugging the mess was not easy – fortunately, the application was not safety-critical!
We regard this as evidence that there is something hard about Java multithreading. We are not alone in this opinion – numerous warnings circulate on the web (e.g. [4] from Sun’s own web pages).
Java monitors, therefore, are not language elements with which we want to “think” – at least, not without some serious help. The first step in getting that help is to build a formal model that describes what is happening. The particular semantics given here is a CSP (Communicating Sequential Processes)[5] one. The importance of CSP is that it is an algebra for concurrent systems – a formal piece of mathematics with which we can specify requirements precisely (including properties like deadlock-freedom) and prove that our implementations satisfy them. Further, some powerful and mature CSP tools can be applied – for example, FDR (Failures-Divergences-Refinement) from Formal Systems Ltd.[2] and Jeremy Martin’s deadlock/sat checker[9, 10].
There is some - but, worryingly, not widespread - concern in the Java community about the absence of such a formal model (e.g. see [12]). Without it, we will always remain uncomfortable about the security of any multithreaded product. As Tony Hoare said in his 1980 Turing Award speech[6], there are two kinds of computer systems that sell:
- those that are obviously right ...
- and those that are not obviously wrong ...
and he noted that it’s much easier, of course, to produce the latter. Guess which kind we are peddling! We wonder how many surprises will pop up when we start applying CSP tools to Java codes?
This paper extends the original presentation of this model[17]. The case studies include verification of the JCSP channel implementation (Sections 3-5), the CTJ channel (Section 6) and the JCSP ALTing mechanism (Section 7).
Reaching the last of these three goals was the real motivation behind the development of this formal model for Java monitor operations. Although the code – two interacting monitors hit by many threads – fits on to less than two pages (see Section 7), its safety analysis repeatedly fooled professional Java experts. The original[15] JCSP implementation of ALTing, which had the same length as the one presented here, was declared safe – albeit with a certain amount of finger crossing! Two years later, when we were finally feeling comfortable with it, we had quite a shock when it suddenly deadlocked.
This monitor implementation of ALTing is not particularly lengthy or complex. Modern and near future systems will demand multithreaded code synchronisation that will be at least as difficult. Many of these systems will be safety-critical, where in-service failure costs lives. It is for such reasons that this formal model is offered.
2 The CSP Model
The key Java primitives for thread synchronisation are:
- synchronized methods and blocks;
- the methods wait, notify and notifyAll of the Object superclass.
Their informal (natural language) semantics will be briefly summarised as we build their CSP model. Otherwise, we assume familiarity with CSP and Java basics.
2.1 Objects and Threads
We shall model a system consisting of a set of Java objects and threads. Let Objects be an enumeration of all Java objects. For any particular Java application being CSP-modelled, this can be restricted to just those objects on which any threads in the application are ever synchronized. Usually, this will be finite and small – for example:
\[ \text{Objects} = \{0, 1, 2\} \]
Let Threads be an enumeration of all Java threads. For any particular Java application being CSP-modelled, this can be restricted to just those threads that are created and started. Sometimes, this may be unbounded or large – for example:
\[ \text{Threads} = \{0, 1\} \]
2.2 Synchronisation Events
We define a collection of channels to model Java’s synchronisation events:
\[ \text{channel claim, release, waita, waitb, notify, notifyall : Objects.Threads} \]
This introduces six families of channel, with each family indexed by an object and a thread. For example \( \text{claim.o.t} \), where \( o \) is in \( \text{Objects} \) and \( t \) is in \( \text{Threads} \).
2.3 The User Process Interface to Java Monitors
We define the Java programmer’s interface to monitors. For the moment, we’ll ignore recursive locks by a particular thread on a particular object (i.e. the re-acquisition of a monitor lock by a thread that already has it). This can easily be handled by using processes to represent the relevant lock counts. Also, we will set aside the possible InterruptedException that may get raised by the \( \text{wait} \) method. Our model can be simply extended to account for this but these extensions will be reported in a later paper.
Entry and exit to a synchronized block or method, \( o.\text{wait()} \), \( o.\text{notify()} \) and \( o.\text{notifyAll()} \) are modelled, respectively, by the following five processes:
\[
\begin{align*}
\text{STARTSYNC}(o, me) &= \text{claim.o.me} \rightarrow \text{SKIP} \\
\text{ENDSYNC}(o, me) &= \text{release.o.me} \rightarrow \text{SKIP} \\
\text{WAIT}(o, me) &= \text{waita.o.me} \rightarrow \text{release.o.me} \rightarrow \\
& \quad \text{waitb.o.me} \rightarrow \text{claim.o.me} \rightarrow \\
& \quad \text{SKIP} \\
\text{NOTIFY}(o, me) &= \text{notify.o.me} \rightarrow \text{SKIP} \\
\text{NOTIFYALL}(o, me) &= \text{notifyAll.o.me} \rightarrow \text{SKIP}
\end{align*}
\]
where \( me \) is the thread performing the action.
The interesting one is the \( \text{WAIT}(o, me) \) process. The first event \( \text{(waita.o.me)} \) puts its invoking thread \( (me) \) in the \( \text{wait-set} \) of the monitor object \( (o) \) – see Section 2.4.2. The second event \( \text{(release.o.me)} \) releases the lock it was holding on the monitor object \( (o) \) – see Section 2.4.1. The third event \( \text{(waitb.o.me)} \) represents its commitment to leave the \( \text{wait-set} \) of \( (o) \). The final event \( \text{(claim.o.me)} \) is its re-acquisition of the monitor lock.
Note that this \( \text{WAIT}(o, me) \) process has been modified from the original version of this model [16]. At first, we had the \( \text{release} \) event preceding the \( \text{waita} \). Subsequent FDR analysis threw up some unexpected deadlocks and we returned to the java definition document which revealed a misunderstanding in our interpretation of the natural language explanation. This has now been corrected as described above: an object needs to join the wait-set \( \text{before} \) releasing the monitor – otherwise it might miss being notified. Again, this shows the importance and usefulness of having a simple formal definition of these semantics.
2.4 Monitor Processes
Every Java object can be used as a monitor. In our model, there will be a monitor process, \( \text{MONITOR}(o) \), for each \( o \) in \( \text{Objects} \). This process is itself the parallel composition of two processes:
\[ \text{MONITOR}(o) = \text{MLOCK}(o) \parallel \text{MWAIT}(o, \{\}) \]
where \( MLOCK(o) \) controls the locking of object \( o \)'s monitor (i.e. deals with synchronized) and \( MWAIT \) controls the (initially empty) wait-set of threads currently stalled on this monitor (i.e. deals with \( \text{wait}, \text{notify}, \text{notifyAll} \)). The alphabet of \( MONITOR(o) \) is the union of its component processes, which are defined next.
### 2.4.1 Locking the Monitor
Each \( MLOCK(o) \) process is basically a binary semaphore. Once it has been claimed by a thread (i.e. entry to a synchronized method or block), only a release from that same thread (i.e. exit from the entered synchronized method or block) will let it go. If this were all it had to do, it could be simply modelled by:
\[
\begin{align*}
MLOCK(o) &= \text{claim.o.t} \rightarrow \text{release.o.t} \rightarrow MLOCK(o) \\
\alpha MLOCK(o) &= \{ \text{claim.o.t, release.o.t} \mid t \in \text{Threads} \}
\end{align*}
\]
However, one of the constraints in Java is that an \( o.\text{wait}() \), \( o.\text{notify()} \) or \( o.\text{notifyAll()} \) is only allowed if the invoking thread has the monitor lock on \( o \). In Section 2.3, these invocations are modelled by (user) processes commencing, respectively, with the events \( \text{wait.o.t, notify.o.t} \) and \( \text{notifyAll.o.t} \) (where \( i \) is the invoking thread).
This constraint is enforced by including these events in the alphabet of \( MLOCK(o) \), but refusing them in its (initial) unlocked state. In the locked state, these events are accepted but have no impact on the state:
\[
\begin{align*}
MLOCK(o) &= \text{claim.o.t} \rightarrow MLOCKED(o,t) \\
MLOCKED(o,t) &= \text{release.o.t} \rightarrow MLOCK(o) \\
&\quad \square \text{notify.o.t} \rightarrow MLOCKED(o,t) \\
&\quad \square \text{notifyall.o.t} \rightarrow MLOCKED(o,t) \\
&\quad \square \text{waita.o.t} \rightarrow MLOCKED(o,t) \\
\alpha MLOCK(o) &= \left\{ \begin{array}{l}
\text{claim.o.t, release.o.t,} \\
\text{notify.o.t, notifyall.o.t,} \\
\text{waita.o.t} \mid t \in \text{Threads} \\
\end{array} \right\}
\end{align*}
\]
### 2.4.2 Managing the Wait-Set
The \( MWAIT(o, ws) \) process controls the wait-set \( (ws) \) belonging to the monitor object \( (o) \). This set contains all threads that have invoked \( o.\text{wait}() \) but have not yet been notified.
New threads are added to the set via the \( \text{waita} \) channel. A notify event results in one thread being non-deterministically selected from the set and reactivated – if the set is empty, the event is still accepted but nothing changes. A notifyall event results in all the waiting threads being reactivated in some non-deterministic order
\[
\begin{align*}
MWAIT(o, ws) = \\
&\left\{ \left( \text{waita.o.t} \rightarrow MWAIT(o, ws \cup \{t\} ) \right) \right\} \square \\
&\quad \text{if } |ws| > 0 \text{ then} \\
&\quad \left( \bigcap_{s \in ws} \text{waitb.o.s} \rightarrow MWAIT(o, ws - \{s\} ) \right) \square \\
&\quad \text{else} \\
&\quad MWAIT(o, \{\} ) \\
&\quad (\text{notifyall.o.t} \rightarrow \text{RELEASE}(o, ws))
\end{align*}
\]
and where:
\[
\begin{align*}
\text{RELEASE}(o, ws) &= \\
\text{if}(\|ws\| > 0) \text{ then} & \quad \bigwedge_{t \in ws} \text{wait} \cdot o \cdot t \rightarrow \text{RELEASE}(o, ws - \{t\}) \\
\text{else} & \quad \text{MWAIT}(o, \{\})
\end{align*}
\]
\(\alpha\text{MWAIT}(o) = \{ \text{wait} \cdot o \cdot t, \text{wait} \cdot b \cdot o \cdot t, \text{notify} \cdot o \cdot t, \text{notifyall} \cdot o \cdot t \mid t \in \text{Threads} \} \)
### 2.4.3 Visualisation
One of the difficulties of working with threads and monitors is that it is hard to visualise what is happening. One of the strengths of CSP models is that they correspond to notions of hardware – *layered networks of components connected by wires* – that are easy to visualise and whose operations correspond to intuitive concepts of communication and synchronisation.
Figure 1: The Monitor Process (for Object o)
Figure 1 represents the CSP process enforcing the monitor rules for a Java object o. Each arrow represents an array of channels – one for each thread, t, that needs to synchronize, wait or notify on this monitor. The split channels are *broadcasters* – both sub-processes must input for the communication to take place.
An observation is that this is a rather specialised and complex object to be given as the *sole* primitive for synchronisation control of multithreaded systems. At least, that is in comparison with the CSP channel primitive, whose visualisation is as a bare wire!
### 3 A Case Study: the JCSP Channel
In Section 1, we said that we do not like to “think” at the level of Java monitors. Although the semantics of individual operations are simple enough and now formally defined, correct usage requires an understanding of how the monitor methods interact. This means that such methods cannot be designed or understood individually – their logics are very tightly coupled and all must be considered at the same time. This is compounded when different monitors invoke each others’ methods! This approach to design does not scale well.
Formal methods certainly help but even they may become overwhelmed without some discipline that eliminates the strong coupling between threads. That discipline can be provided by CSP itself.
Java’s flexibility means that we can ignore the built-in monitor model and build a high-level API to something that does scale and with which we can “think” - for instance, the occam/CSP model. JCSP[18] (CSP-for-Java) is a Java class library spun out from work started in the occam-for-all project in the Portable Software Tools for Parallel Architectures managed programme of the UK Engineering and Physical Sciences Research Council. It provides an occam3 concurrency framework for Java applications. CSP designs (with some occam-like caveats) can be directly crafted into Java code with no stress.
Currently, JCSP is built upon standard Java monitors and suffers (but does not increase) their overheads. Ultra-low overheads for process management, which lead to ultra-low latencies for event handling (e.g. for external message passing) are possible, although this would require building something close to (and derived from) our occam kernels into specialised JVMs. JCSP already supports shared-memory (SMP) concurrency, for which CSP primitives provide excellent control. JCSP does not currently support distributed memory architectures, although being derived from the old transputer model, of course it could.
If we can prove that the JCSP implementation of its primitives (e.g. channels) really gives us the corresponding CSP semantics, this will not only be a huge relief (since that was the original intention after all!), but it will also mean that formal analysis of JCSP designs can be done directly in terms of those primitives (and not on Java monitors). That will be a considerable simplification. Since CSP semantics are well-behaved under parallel composition, formal design and analysis of large multithreaded systems becomes practical. It will raise both our confidence in these systems and their real quality.
3.1 Visualisation of the Verification
3.1.1 Main Theorem
The theorem to be proved is that the system shown in Figure 2 is equivalent to the one shown in Figure 3.

Figure 2 shows two processes, A and B, communicating over a CSP channel c. Each process guarantees that no parallel writes (for A) or reads (from B) are allowed. The internal channel, c, is hidden from the outside world.
Figure 3 shows two processes, Aj and Bj, communicating via three intermediary processes. Aj is the same as A except that the CSP output, c ! mess, is changed to the JCSP method invocation, c.write (mess), where c is now a JCSP One2OneChannel object (see Section 3.2). The implementation of this method uses c as a monitor – hence the MONITOR(c) process. It also uses two state variables, channel_hold and channel_empty, represented by the variable processes Hold(c) and Empty(c). These latter
processes service simple get and set channels for getting and setting values - no requests are ever refused. \text{Empty}(c)\ holds boolean values (that indicate if one side is ready to communicate) and \text{Hold}(c)\ holds the message being sent (and whose actual value is irrelevant to the operation of this channel).
\text{Bj} is related to \text{B} in the same way as \text{Aj} relates to \text{A}. The CSP input, \text{c \ ? mess}, is changed to the JCSP method invocation, \text{mess = c.read()}, where \text{c} is the same \text{One2OneChannel} object used by \text{Aj}. The implementation of this method (see Section 3.2) interacts with the same three intermediary processes as \text{Aj}, but uses its own sets of channels.
To prove this theorem is quite daunting and we would like some mechanical help. The FDR model checker cannot yet be employed because the behaviour of the \text{A/Aj} and \text{B/Bj} processes when they are not communicating has not been specified. Model checkers need a completely specified system into which to get their teeth.
### 3.1.2 Parallel Introduction
Although this may seem a strange thing to want to do, gratuitous introduction of parallel processes can be done anywhere.
Suppose \text{X} contains one or more instances of a process \text{P} that are executed in sequence – see Figure 4. Then, \text{X} may be replaced by the system shown in Figure 5. \text{X'} is the same as \text{X}, except that each occurrence of \text{P} is replaced by synchronisation on the CSP events \text{ping} and \text{pong} (in that order). These events are in the alphabet of \text{Xb'}, but are hidden from the outside environment. \text{Xb'} is a buddy process for \text{X'} and is completely defined apart from \text{P}.
All it does is wait for a ping, perform P on behalf of its buddy and, then, let its buddy know that it’s finished by synchronising on pong. It repeats doing this forever.
We don’t need a model checker to prove the equivalence of figures 4 and 5 – it’s almost a one-liner from the basic algebraic laws of CSP.
Figure 6 shows a slightly more useful version of Figure 4, where the process to be delegated accesses and modifies some of the state of X. Further, the particular states being modified vary between instances.
In this case, the ping and pong events used previously become channels that carry and return state information to and from the buddy process – see Figure 7. Again, the proof of the equivalence between figures 6 and 7 (with the ping/pong channels hidden) follows directly from basic CSP algebra.
3.1.3 Applying Parallel Introduction
We can now deduce that the original system in Figure 2 is equivalent to the one in Figure 8.

$A'$ and its buddy $Ab'$ are derived from $A$ as prescribed for Parallel Introduction – the same for $B'$ and $Bb'$ (derived from $B$). Notice that the buddy processes, $Ab'$ and $Bb'$, are this time completely specified. They each have a simple loop in which they do their respective ping, their common channel communication, and then their respective pongs.

We may also deduce, using Parallel Introduction, that the Java-ised system of Figure 3 is equivalent to that shown in Figure 9. Note that $A_j$ has become the parallel composition of the same $A'$ process as in Figure 8, but with a different buddy process $A_j'$. Processes $Ab'$ and $A_j'$ are identical except that the former does a CSP channel write and the latter does a JCSP one. Similarly, $B_j$ from Figure 3 has become $B'$ (the same as in Figure 8) in parallel with $B_j'$ – and $B_j'$ is related to $Bb'$ as $A_j'$ is to $Ab'$.
3.1.4 Applying the Model Checker
Looking at figures 8 and 9, we see that all processes, apart from $A'$ and $B'$, are completely specified. If we can prove that the completely specified middles – i.e. figures 10 and 11 – are equivalent, we are done. This is because simple CSP laws of parallel composition (associativity) would allow us to deduce that figures 8 and 9 are equivalent and, hence, so must be our original systems in figures 2 and 3.
To prove figures 10 and 11 equivalent, we stand on the shoulders of giants and apply the FDR model checker. Both systems are fully specified and have the same channel interface (two pairs of ping/pong channels) to their environments – everything else is hidden. Type these systems into FDR and ask if they are equivalent. Within seconds, Q.E.D!
3.1.5 Connecting this Visualisation with the Rest of this Paper
The above rationalisation was constructed after the detailed proof reported in Sections 3.2, 3.3, 3.4 and 4 was developed.
Section 3.2 develops a CSP model of the JCSP channel that already includes Parallel Introduction. The READ(o,t) and WRITE(o,t) processes correspond to the buddy processes Aj' and Bj' from Figure 9. The write.o.t and ack.o.t channels are ping and pong for Aj'. The ready.o.t and read.o.t are ping and pong channels for Bj'.
In Section 3.3, the LEFT and RIGHT correspond to Ab' and Bb' from Figure 10. Their respective ping and pong channels are, of course, the same as those for Aj' and Bj'.
Finally, Section 4 performs the Parallel Introduction lemma in reverse (i.e. Parallel Removal) to go from Figure 8 to Figure 2.
3.2 The CSP Model of the JCSP Channel
The JCSP channel, One2OneChannel, is currently implemented as a (100% pure) Java monitor. The following CSP model is derived directly from its source code and the CSP model of Java monitors just presented. This derivation was done by hand, but a tool could be built to assist this process considerably. Here is the outline of the class:
public class One2OneChannel {
// data in transit
private Object channel_hold;
// synchronisation flag
private boolean channel_empty = true;
... public sync Object read ()
... public sync void write (Object mess)
}
A JCSP channel object has two attributes (channel_empty and channel_hold), which we shall model as processes always ready both to have their values reset or to report them to any willing thread. For simplicity, we assume that each channel carries the same three-valued type (Data) that we shall use for the Java ‘boolean’.
\[
\text{datatype Variables} = \text{channel_empty} | \text{channel_hold}
\]
\[
\text{datatype Data} = \text{TRUE} | \text{FALSE} | \text{OTHER}
\]
The operation of the channel, however, is independent of the type of data that it carries – at no point is the value of the data it stores used to decide its future behaviour. So this analysis is equally valid for channels of any type\(^1\).
Variables are initialised as TRUE. Their values may then be read or written by any thread using channels getvar and setvar.
\[
\text{channel getvar, setvar : Objects.Variables.Threads.Data}
\]
\[
\text{VARIABLE}(a, v) = \text{VAR2}(a, v, \text{TRUE})
\]
\[
\text{VAR2}(a, v, d) =
\begin{align*}
& \quad \{ \text{Threads getvar.}o.v.t.d \rightarrow \text{VAR2}(a, v, d), \text{Threads setvar.}o.v.t.x \rightarrow \text{VAR2}(a, v, x) \} \\
& \quad \alpha \text{VARIABLE}(a, v) = \\
& \quad \{ \text{getvar.}o.v.t.d, \text{setvar.}o.v.t.d \mid t \in \text{Threads}, d \in \text{Data} \}
\end{align*}
\]
\[
\text{VARIABLES}(o) = \text{VARIABLE}(o, \text{channel_empty}) \parallel \text{VARIABLE}(o, \text{channel_hold})
\]
One purpose of JCSP is to seal off the thread/monitor synchronisation calls from the programmer. Instead a read/write interface is provided by two simple methods. We shall model this interface with the following events:
- write.o.t.d – thread \( t \) invokes java method write(d) of object \( o \), message \( d \) is supplied for transmission;
- ack.o.t – call to write(d) terminates;
- ready.o.t’ – thread \( t' \) invokes method read() of object \( o \);
- read.o.t’ .d – call to read() terminates, returning \( d \).
\(^1\)A formal theory of data independence in CSP has been developed by Ranko Lazic and Bill Roscoe[13].
The JCSP channel should behave like a synchronised channel. Each successful communication requires that at some point both threads are simultaneously involved.
```
channel read, write : Objects.Threads.Data
channel ready, ack : Objects.Threads
```
The Java code for the JCSP read method is as follows[14]:
```java
public synchronized Object read ()
throws InterruptedException {
if (channel_empty) {
channel_empty = false; // first to the rendezvous
wait (); // wait for writer process
notify (); // schedule the writer to finish
} else {
channel_empty = true; // second to the rendezvous
notify (); // schedule the waiting writer
}
return channel_hold;
}
```
We model this JCSP read method as a process which repeatedly waits to be activated by a ready event and then executes the monitor synchronisation code to receive a message:
```
READ(o,t) =
ready.o.t →
claim.o.t →
getvar.o.channel_empty.t?c →
if (c = TRUE) then
setvar.o.channel_empty.t!FALSE →
WAIT(o,t);
NOTIFY(o,t)
else
setvar.o.channel_empty.t!TRUE →
NOTIFY(o,t)
getvar.o.channel_hold.t!mess →
release.o.t →
read.o.flmess →
READ(o,t)
```
```
αREAD(o,t) =
claim.o.t, getvar.o.v.t.d, notify.o.t,
notifyall.o.t, setvar.o.v.t.d, read.o.t.d,
release.o.t, waita.o.t, waitb.o.t,
ready.o.t | v ∈ Variables, d ∈ Data
```
By including all the relevant Java synchronisation events, such as notifyall.o.t in the alphabet of the CSP READ process we model our intention to prohibit the user of JCSP from calling the corresponding Java methods directly from within his or her code. JCSP is intended as a complete, user-friendly alternative to using monitors for programming multi-threaded applications.
The Java code for the JCSP write method is as follows:
```java
public synchronized void write (Object mess)
throws InterruptedException {
channel_hold = mess; // first to the rendezvous
if (channel_empty) {
channel_empty = false; // wait for reader process
wait ();
} else {
channel_empty = true; // second to the rendezvous
notify (); // schedule the waiting reader
wait(); // let the reader regain the lock
}
}
```
This write method is similarly modelled as a repeating process, activated by the write event:
```
WRITE(0, 1) =
write.o.t.mess →
claim.o.t →
setvar.o.channel_hold.t.mess →
getvar.o.channel_empty.t.c →
(if (c = TRUE) then
setvar.o.channel_empty.t.FALSE →
WAIT(0, 1)
else
setvar.o.channel_empty.t.TRUE →
NOTIFY(0, 1);
WAIT(0, 1)
release.o.t →
ack.o.t →
WRITE(0, 1)
```
Note also that in the above definition (and in the rest of this paper) we shall assume that the alphabet of a parallel composition is the union of the alphabets of the component processes (in line with Hoare’s book[5]).
3.3 Equivalence to a Simpler Channel
Now we shall define a simplified model of how the JCSP channel should work, and then use FDR to show that this is equivalent to the JCSP implementation.
The simple channel consists of two parallel processes, \texttt{LEFT} and \texttt{RIGHT}, to handle input and output respectively. The processes are joined by a hidden channel \texttt{transmit} – see Figure 12.
\begin{figure}[h]
\centering
\includegraphics[width=0.5\textwidth]{example.png}
\caption{Special Version of Figure 10}
\end{figure}
We define:
\[
\text{channel } \texttt{transmit} : \text{Objects}.\text{Data}
\]
\[
\text{LEFT}(o,t) =
\text{write} \cdot \texttt{o.t}' \cdot \texttt{mess} \to \text{transmit} \cdot \texttt{o.mess} \to \text{ack} \cdot \texttt{o.t} \to \text{LEFT}(o,t)
\]
\[
\alpha\text{LEFT}(o,t) =
\{ \text{write} \cdot \texttt{o.t.m}, \text{transmit} \cdot \texttt{o.m}, \text{ack} \cdot \texttt{o.t} \mid m \in \text{Data} \}
\]
\[
\text{RIGHT}(o,t') =
\text{ready} \cdot \texttt{o.t'} \to \text{transmit} \cdot \texttt{o.mess} \to \text{read} \cdot \texttt{o.t'} \cdot \texttt{mess} \to \text{RIGHT}(o,t')
\]
\[
\alpha\text{RIGHT}(o,t') =
\{ \text{ready} \cdot \texttt{o.t'}, \text{transmit} \cdot \texttt{o.m}, \text{read} \cdot \texttt{o.t'.m} \mid m \in \text{Data} \}
\]
\[
\text{CHANNEL}(o,t,t') =
(\text{LEFT}(o,t') \parallel \text{RIGHT}(o,t)) \setminus \{ \text{transmit} \cdot \texttt{o.m} \mid m \in \text{Data} \}
\]
\[
\alpha\text{CHANNEL}(o,t,t') =
\{ \text{write} \cdot \texttt{o.t'.m}, \text{ack} \cdot \texttt{o.t'}, \text{read} \cdot \texttt{o.t.m} \mid m \in \text{Data} \}
\]
In order to compare this specification with the JCSP implementation we need to conceal all the additional events in the alphabet of \texttt{JCSPCHANNEL}.
\[
\text{Private} = \alpha\text{JCSPCHANNEL}(0,0,1) - \alpha\text{CHANNEL}(0,0,1)
\]
The CSP language of Hoare is a notation for describing patterns of communication by algebraic expressions. It is widely used for the design of parallel and distributed hardware and software, and for the formal proof of vital properties of such systems. Underpinning CSP is a formal semantic model based on \textit{traces}, \textit{failures} and \textit{divergences}. Two CSP systems are equivalent if the possible sequences of events (traces) they may perform are identical, and also if the circumstances under which they might deadlock or livelock are the same.
We assert that $JCSP\text{CHANNEL}(o, t_1, t_2) \setminus \text{Private}$ is equivalent to $\text{CHANNEL}(o, t_1, t_2)$ in the failures/divergences model:
$$\text{assert } \text{CHANNEL}(0, 0, 1) = JCSP\text{CHANNEL}(0, 0, 1) \setminus \text{Private}$$
The FDR[2] tool can check for equivalence between CSP systems. The sizes of the problems it may tackle are limited to around one billion states with current workstation technologies. The above assertion is verified within seconds using this tool.
### 3.4 Interference by Other Threads
The above works fine with the current system when only two threads are in existence. When we increase the number of threads in the system beyond two, perhaps by defining $\text{Threads} = \{0, 1, 2\}$, we find that the above pair of assertions no longer holds. FDR reveals that this is because the other threads may tamper with the state of the channel implementation, using the getvar and setvar channels.
So how do we stop other threads from interfering with the channel object? In CSP we can add a parallel process to the channel implementation which blocks access to any of the relevant events!
$$\text{PROTECTION}(o, t, t') = \text{STOP}$$
$$\alpha\text{PROTECTION}(o, t, t') =$$
$$\begin{cases}
\text{claim } o, t', \text{setvar } o.v, t', d, \\
\text{getvar } o.v, t', d, \text{wait } o, t' \\
| t' \in \text{Threads} - \{t, t\}, \\
v \in \text{Variables}, \\
d \in \text{Data}
\end{cases}$$
$$\text{SAFEJCSP\text{CHANNEL}}(o, t, t') =$$
$$JCSP\text{CHANNEL}(o, t, t') \parallel \text{PROTECTION}(o, t, t')$$
$$\text{assert } \text{CHANNEL}(0, 0, 1) = \text{SAFEJCSP\text{CHANNEL}}(0, 0, 1) \setminus \text{Private}$$
This pair of assertions is indeed found to hold when the number of threads is increased beyond 2.
This is not supported by the actual Java implementation of One2OneChannel and so must be regarded as a usage rule for JCSP. The same usage rule is, of course, enforced for occam channels by its semantics (and by its compilers). Given that JCSP channels are just Java objects (i.e. held by reference to stack addresses), that usage rule must be enforced either manually or by good design tools.
However, JCSP also provides Any2OneChannels (as well as One2AnyChannel and Any2AnyChannel) that do give the necessary protection to control parallel reads and writes. This is considered in Section 5. First, we must complete the proof of correctness for One2OneChannel.
### 4 Correctness of the JCSP Channel
So far we have succeeded in reducing the JCSP channel to a vastly simplified form, which does away with monitors, and involves just two very simple processes: $\text{LEFT}$ and $\text{RIGHT}$. This is useful but it would be nice to go one stage further and be rid of these two processes, leaving just a single unprotected CSP channel. It turns out that this is only possible if we
assume certain ‘usage’ rules about how networks are constructed using JCSP, similar to those enforced by occam compilers in that alternative implementation of CSP.
For the moment let us consider simple ‘ALT-free’ CSP programs that use only one-to-one channels and no have alternation. Define an SCSP network as a special kind of parallel CSP network \( P_1 \parallel P_2 \parallel \ldots \parallel P_n \), where each \( P_i \) is an SCSP:
\[
SCSPROC = \begin{cases}
\text{SKIP} \\
a!x \to \text{SCSPROC} \\
a?x \to \text{SCSPROC}(x) \\
\text{SCSPROC} \cap \text{SCSPROC} \\
\text{SCSPROC} \cup \text{SCSPROC}
\end{cases}
\]
A usage rule is enforced which is that each channel \( a \) is used by exactly one process \( P_i \) for input and exactly one process \( P_j \) for output, i.e. the network is triple-disjoint. (There will be other external events though to represent things like reading in data and printing out results).
So we have described a simple set of CSP processes that we would like to model using JCSP. However there is an obstacle to this – we don’t really have any CSP channels available for use - we only have JCSP channels, which behave like extra parallel processes:
\[
JCSP CHANNEL(a) = (\text{LEFT}(a) \parallel \text{RIGHT}(a)) \setminus \{a\}
\]
where \( \text{LEFT}(a) = \text{write}.a?x \to a!x \to \text{ack}.a \to \text{LEFT}(a) \)
and \( \text{RIGHT}(a) = \text{ready}.a \to a?!x \to \text{read}.a!x \to \text{RIGHT}(a) \)
This representation of a JCSP channel has been proven correct above using FDR.
We are now going to show that these JCSP channels can be used just like ordinary CSP channels. We shall consider an SCSP network \( V = P_1 \parallel \ldots \parallel P_n \) and transform it in some way that replaces all the CSP channels with JCSP channel processes, and show that the external behaviour of the program is preserved.
Let us define the transformation as follows: suppose that network \( V \) originally contains a CSP channel \( a \), which is written to by process \( P_i \) and read from by process \( P_j \). To replace CSP channel \( a \) in \( V \) we introduce an additional parallel process \( \text{JCSP CHANNEL}(a) \), and we transform process \( P_i \) to \( P'_i \) by replacing all occurrences of \( a!x \to \text{Process} \) by \( \text{write}.a!x \to \text{ack}.a \to \text{Process} \). And we transform process \( P_j \) to \( P'_j \) by replacing all occurrences of \( a?!x \to \text{Process}(x) \) by \( \text{ready}.a \to \text{read}.a?!x \to \text{Process}(x) \).
Now, because of the nice algebraic laws of CSP, if we can show that the external behaviour of subnetwork \( P_i \parallel P_j \) is unchanged by this transformation then it follows that there is no effect on the external behaviour of the network as a whole.
What we actually need to prove is that:
\[
(P_i \parallel P_j) \setminus \{a\} = (P'_i \parallel \text{JCSP CHANNEL}(a) \parallel P'_j) \setminus \{\text{write}.a, \text{ack}.a, \text{ready}.a, \text{read}.a\}
\] \hspace{1cm} (1)
Let’s start with the RHS:
\[
(P'_i \parallel \text{JCSP CHANNEL}(a) \parallel P'_j) \setminus \{\text{write}.a, \text{ack}.a, \text{ready}.a, \text{read}.a\} = \\
(P'_i \parallel ((\text{LEFT}(a) \parallel \text{RIGHT}(a)) \setminus \{a\}) \parallel P'_j) \setminus \{\text{write}.a, \text{ack}.a, \text{ready}.a, \text{read}.a\} = \\
(P'_i \parallel \text{LEFT}(a) \parallel \text{RIGHT}(a) \parallel P'_j) \setminus \{a, \text{write}.a, \text{ack}.a, \text{ready}.a, \text{read}.a\} = \\
\left( (P'_i \parallel \text{LEFT}(a)) \setminus \{\text{write}.a, \text{ack}.a\} \right) \parallel \left( (\text{RIGHT}(a) \parallel P'_j) \setminus \{\text{ready}.a, \text{read}.a\} \right) \setminus \{a\}
\]
We can establish result (1) if we can prove the following:
\[(P_i' \parallel LEFT(a)) \setminus \{write.a, ack.a\} = P_i\]
and\[(RIGHT(a) \parallel P_j') \setminus \{ready.a, read.a\} = P_j\]
Let us consider the validity of equation 2. This would not hold true in general for any CSP process \(P_i\). But due to our restriction on the syntax of SCSP processes, we can see that it is true as follows.
1. In moving from \(P_i\) to \(P_i'\) we replace \(a \rightarrow PROCESS\) with \(write.a \rightarrow ack.a \rightarrow PROCESS\).
2. The effect of putting \(LEFT(a)\) in parallel with \(P_i'\) is then to replace \(write.a \rightarrow ack.a \rightarrow PROCESS\) in \(P_i'\) with \(write.a \rightarrow a \rightarrow ack.a \rightarrow PROCESS\).
3. The effect of hiding \(\{write.a, ack.a\}\) is to set \(write.a \rightarrow a \rightarrow ack.a \rightarrow PROCESS\) back to \(a \rightarrow PROCESS\), which is what we started with. This is the only part of the proof which makes use of the restricted syntax. For if we were to allow external choice to be applied to JCSP channels, then hiding the \(write\) channel would introduce unwanted non-determinism.
Equation 3 is proved similarly and we conclude that the transformation to replace CSP channel \(a\) with \(JCSP\ CHANNEL(a)\) has not affected the external behaviour of the network.
Having applied a transformation to replace one CSP channel \(a\) with a JCSP channel we can repeat the step on other channels until only JCSP channels remain. (This is because the transformation from \(P_i\) and \(P_j\) to \(P_i'\) and \(P_j'\) preserves the restricted syntax of the SCSP processes). Finally we will have shown that
\[(P_1 \parallel \ldots \parallel P_n) \setminus \{a_1, \ldots, a_m\} = (P_1' \parallel \ldots \parallel P_n') \setminus \{JCSP\ CHANNEL(a_1) \mid \ldots \parallel JCSP\ CHANNEL(a_m)\}\ \setminus \{write.a_1, ack.a_1, ready.a_1, read.a_1, \ldots, write.a_m, ack.a_m, ready.a_m, read.a_m\}\]
Which means that the external behaviour of the network (i.e. its behaviour when all internal channels are concealed) is exactly preserved.
So we are perfectly safe to reason about JCSP programs in their natural form, modelling calls to \(read\) and \(write\) as atomic communication events. There is no need for the additional baggage of \(LEFT/RIGHT\) process pairs for each channel.
5 Any-to-One (Shared) Channels
A serious omission from the JCSP model considered so far is (occam) Alternation. We consider here a simplified version that affords a much more efficient implementation. It corresponds to the occam3 notion of a \textit{shared} channel. In JCSP, this is the \textit{Any2OneChannel}, which allows any number of concurrent writers but only a single reader.
\textit{Any2OneChannel} is similar to \textit{One2OneChannel}, but contains an extra attribute – \textit{write\_monitor}. This is a dumb object whose only use is to provide a monitor lock that must be acquired by a writer before writing. The \textit{read} method is unchanged from \textit{One2OneChannel}. The modified \textit{write} method is as follows:
public void write (Object mess) throws InterruptedException {
synchronized (write_monitor) { // compete with other writers
synchronized (this) { // compete with a single reader
channel_hold = mess;
if (channel_empty) {
channel_empty = false; // first to the rendezvous
wait (); // wait for reader process
} else {
channel_empty = true; // second to the rendezvous
notify (); // schedule the waiting reader
wait(); // let the reader regain the lock
}
}
}
}
For the CSP version, an extra object is needed for the write monitor.
\[
\text{WRITE}'(o, \text{writemonitor}, t) =
\begin{align*}
\text{write}.o.t\text{ mess} & \rightarrow \\
\text{claim}.writemonitor.t & \rightarrow \\
\text{claim}.o.t & \rightarrow \\
\text{setvar}.o.\text{channel\_hold}.t\text{ mess} & \rightarrow \\
\text{getvar}.o.\text{channel\_empty}.t\text{ c} & \rightarrow \\
\begin{cases}
\text{if}(c = \text{TRUE}) & \Rightarrow \\
\text{setvar}.o.\text{channel\_empty}.t\text{!FALSE} & \rightarrow \\
\text{WAIT}(o, t) & \\
\text{else} & \Rightarrow \\
\text{setvar}.o.\text{channel\_empty}.t\text{!TRUE} & \rightarrow \\
\text{NOTIFY}(o, t); & \\
\text{WAIT}(o, t) & \\
\text{release}.o.t & \rightarrow \\
\text{release}.\text{writemonitor}.t & \rightarrow \\
\text{ack}.o.t & \rightarrow \\
\text{WRITE}'(o, \text{writemonitor}, t)
\end{cases}
\end{align*}
\]
\[
\alpha\text{WRITE}'(o, o', t) =
\begin{align*}
\{ & \text{claim}.o'.t, \text{getvar}.o.v.t.d, \text{notify}.o''.t, \\
& \text{notifyall}.o''.t, \text{setvar}.o.v.t.d, \text{write}.o.t.d, \\
& \text{release}.o''.t, \text{wait}.o''.t, \text{waitb}.o'''.t, \text{ack}.o.t \\
& \mid v \in \text{Variables}, d \in \text{Data}, o'' \in \{o, o'\} \}
\end{align*}
\]
The read method is essentially the same as before, except that we need to include certain events in its alphabet to prevent the reading thread from interfering with the write monitor. (This was only discovered after FDR caught a livelock on an early run.)
\[
\text{READ}'(o, o', t) = \text{READ}(o, t)
\]
\[
\alpha\text{READ}'(o, o', t) = \alpha\text{READ}(o, t) \cup \{\text{claim}.o'.t\}
\]
\[
\text{JCSPSHAREDCHANNEL}(o, o', t_1, t_2, t_3) = \\
\text{READ}'(o, o', t_1) || \\
\text{WRITE}'(o, o', t_2) || \text{WRITE}'(o, o', t_3) || \\
\text{MONITOR}(o) || \text{MONITOR}(o') || \text{VARIABLES}(o)
\]
Now we arrive at the step of reducing the CSP representation of an \texttt{Any2OneChannel} to a simpler equivalent form. Since the FDR tool can perform only static analysis of finite systems we shall restrict ourselves to the case of a two-to-one channel. (However it should be possible to extend this analysis to the general case by a form of mathematical induction devised by Creese and Roscoe for CSP programs with arbitrary network topologies\cite{1}.)
The specification for a two-to-one JCSP channel consists of three simple processes – one for each user thread (see Figure 13):

We define:
\[
\text{channel transmit}
\]
\[
\text{whenever}
\]
\[
\text{ready}
\]
\[
\text{write}
\]
\[
\text{ack}
\]
\[
\text{transmit}
\]
\[
\text{read}
\]
\[
\text{assert}
\]
\[
\text{Private}
\]
\[
\text{assert}
\]
\[
\text{assert}
\]
\[
\text{Again, this is easily verified using FDR.}
\]
The analysis of Section 4 can now be extended to cater for simple networks with shared CSP channels.
6 Analysis of the CTJ Channel
Having proved correctness for the JCSP channel it is natural to move on to consideration of its rival: CTJ (Communicating Threads for Java)[7].
The CTJ channel algorithm is quite similar to JCSP, but it handles the monitor synchronisation in a subtly different, and somewhat simpler way. CTJ only has any-to-any channels. The following shows the core one-to-one code that it uses, written in the same style as the JCSP version presented in Section 3.2:
```java
public class One2OneChannel {
private Object channel_hold; // data in transit (as in JCSP)
private boolean channel_empty = true; // sync flag (used differently)
// the above flag indicates whether there has been a write.
// previously, it indicated whether there had been a read or a write.
public synchronized Object read () throws InterruptedException {
if (channel_empty) {
wait (); // wait for a writer
}
// there has been a write and channel_empty is now definitely false.
channel_empty = true;
notify (); // there is a writer waiting
return channel_hold; // this will still be valid
}
public synchronized void write (Object value) throws InterruptedException {
channel_empty = false;
channel_hold = value;
notify (); // redundant if first to the channel
wait (); // wait for a reader
}
}
```
This is implemented in our CSP model by redefining the READ and WRITE processes as follows:
```
READ(o, t) =
ready.o.t → claim.o.t →
getvar.o.channel_empty.t?c → (if (c = TRUE) then WAIT(o, t) else SKIP);
setvar.o.channel_empty.t?TRUE → NOTIFY(o, t);
getvar.o.channel_hold.t?mess →
release.o.t → read.o.t.mess → READ(o, t)
WRITE(o, t) =
write.o.t.mess → claim.o.t →
setvar.o.channel_empty.t?FALSE →
setvar.o.channel_hold.t?mess →
NOTIFY(o, t); WAIT(o, t);
release.o.t → ack.o.t → WRITE(o, t)
```
The channel is then shown to be equivalent to the simpler form, and hence also the JCSP channel, in the same manner as before:
```
assert CHANNEL(0, 0, 1) = JCSPCHANNEL(0, 0, 1) \ Private
```
7 Verifying the JCSP ALT Construct
General ALTing provides much more control (albeit at $O(n)$-cost) than the shared channels considered in Section 5. For example, shared channels do not enable a process to listen exclusively to channel $x$ one moment and channels $x$ and $y$ another - so ALTing is crucial.
Further, it was the (published[15], compact and well-used) JCSP implementation of ALTing that contained the unfortunate race hazard that eventually yielded the deadlock mentioned in Section 1. This requires the analysis of many interacting monitors – the Alternative object itself (which has two methods) and the channels (which are extended to four methods).
We have recently performed a formal verification of the current JCSP Alternative class (given below). This was done in a fairly restricted manner: considering only two channels firing at a single alternation construct. However, its success has brought great relief and given us confidence in the correctness of the $n$-way ALT with pre-conditions.
The analysis was considerably more complex than that of the single and shared channels, and so we shall only include an outline of the methods employed here. The complete FDR document is available for reference at [11].
```java
class Alternative {
private static final int enabling = 0;
private static final int waiting = 1;
private static final int ready = 2;
private static final int inactive = 3;
private int state = inactive;
private final Channel[] c;
public Alternative (final Channel[] c) {
this.c = c;
}
public int select () throws InterruptedException {
int selected = -999999; // this value should *never* be returned!
int i;
state = enabling; // ALT START
for (i = 0; i < c.length; i++) {
if (c[i].enable (this)) { // ENABLE CHANNEL
state = ready;
selected = i;
break;
}
}
synchronized (this) {
if (state == enabling) { // ALT WAIT
state = waiting;
wait ();
state = ready;
}
}
// assert : state == ready
for (i--; i >= 0; i--) {
if (c[i].disable ()) { // DISABLE CHANNEL
selected = i;
}
}
state = inactive;
return selected; // ALT END
}
}
```
synchronized void schedule () {
switch (state) {
case enabling:
state = ready;
break;
case waiting:
state = ready;
notify ();
break;
// case ready: case inactive:
// break
}
}
Here is the modified Channel class, which allows for alternation:
```
public class Channel {
private int channel_hold; // buffer (not detectable to users)
private boolean channel_empty = true; // synchronisation flag
private Alternative alt; // state of reader
public synchronized int read () throws InterruptedException {
if (channel_empty) {
channel_empty = false; // first to the rendezvous
wait (); // wait for the writer thread
notify (); // schedule the writer to finish
} else {
channel_empty = true; // second to the rendezvous
notify (); // schedule the waiting writer thread
}
return channel_hold;
}
public synchronized void write (int n) throws InterruptedException {
channel_hold = n;
if (channel_empty) {
channel_empty = false; // first to the rendezvous
if (alt != null) { // the reader is ALTing on this Channel
alt.schedule ();
}
wait (); // wait for the reader thread
} else {
channel_empty = true; // second to the rendezvous
notify (); // schedule the waiting reader thread
wait (); // let the reader regain this monitor
}
}
synchronized boolean enable (Alternative alt) {
if (channel_empty) {
this.alt = alt;
return false;
} else {
return true;
}
}
}
```
synchronized boolean disable () {
alt = null;
return !channel_empty;
}
}
For the purpose of verification we modelled a system of two channels interacting with a single alternation construct. Even this resulted in a fairly complicated CSP network as shown in Figure 14.

Figure 14: A 2-way AL Ting Channel Version of Figure 11
The code for the various CSP processes is too long to be included here, but it is a straightforward translation of the equivalent Java (in the same way as for the One2OneChannel). The function of those processes not defined above is as follows.
- **WRITE2** – Channel.write method, modified to keep track of alt variable and to invoke Alternative.schedule.
- **ENABLE** – models both Channel.enable and Channel.disable.
- **ALTING** – process to model the alt variable.
- **SCHEDULE** – Alternative.schedule method (there are two copies of this: one to service each channel).
- **STATE** – process to model the state variables for Alternate: state, selected and i.
- **SELECT** – Alternative.select method.
For our specification, which we wish to prove equivalent to the alternation construction, we consider two simple CHANNEL processes placed in parallel with an ALT process which snoops their write channels in order to provide the reader with selection information:
ALT(o'', t'', o, t, o', t', ready0, ready1, waiting) =
write.o'.t'?mess → ALT(o'', t'', o, t, o', t', TRUE, ready1, waiting)
\[
\text{if (waiting = TRUE) then (}
\text{ if (ready0 = TRUE and ready1 = TRUE) then (}
\text{ (return.o'.t''0 → ALT(o'', t'', o, t, o', t', FALSE, ready1, FALSE))}
\text{ }\}
\text{else if (ready1 = TRUE) then (}
\text{ return.o''.t''1 → ALT(o'', t'', o, t, o', t', ready0, FALSE, FALSE))}
\text{ )else if (ready0 = TRUE) then (}
\text{ return.o''.t''0 → ALT(o'', t'', o, t, o', t', FALSE, ready1, FALSE))}
\text{ )else STOP)
\text{else (}
\text{ query.o''.t'' → ALT(o'', t'', o, t, o', t', ready0, ready1, TRUE))}
\]
In each state ALT maintains three variables: ready0, ready1 and waiting. The two former variables record which channels are carrying data, and the latter records whether an unanswered query has been issued by the reader. Note that this is not a prioritized alternation construct. If both channels are carrying data then a non-deterministic choice is made between them.
Now in order to compare the two processes we have to add a final parallel component to each one to reflect a JCSP usage rule, which is that the reader must first call the select method and then call which ever read method is appropriate. We are not interested in comparing how the specification and implementation would behave under an illegal usage pattern.
\[
\text{USAGE}(o'', t'', o, o') =
\text{query.o''.t'' → return.o''.t''?c →}
\text{if (c = 0) then (}
\text{ ready.o'.t'' → read.o'.t''?mess → USAGE(d'', t'', o, o')}
\text{)else (}
\text{ ready.o'.t'' → read.o'.t''?mess → USAGE(d'', t'', o, o'))}
\]
FDR, after working through around 48,000 states, confirms that the implementation is equivalent to the specification – which is a good step towards total confidence in the JCSP ALT code. However there is still further work that could be done to consider alternation structures with pre-conditions, and those which contain more than two channels.

Figure 16: Snapshot of FDR Finding the Deadlock in the Original (Bad) JCSP ALT
The original faulty JCSP implementation was also analysed using FDR and, sure enough, the deadlock trace that had taken two years to discover and then eliminate was revealed in a matter of seconds – Figure 16.
8 Conclusions and Future Work
The CSP model of Java's monitor primitives means that any multithreaded Java code – not just code using the JCSP or CTJ[7] libraries that give direct access to CSP primitives – becomes amenable to formal and (partly) automated analysis. This should be of interest to the Java community.
Earlier work in the area of model-checking concurrent Java programs has been performed by Naumovich et al[12]. Their approach involves specifying the behaviour of Java monitors as a collection of constraints over event orderings. These constraints are defined using finite transition systems. There is a certain similarity to the language of sat which is used to specify behavioural characteristics of CSP programs using set theory and logic[10]. However, theirs is a traces-only model and so provides no easy way to check for deadlocks. By using CSP as
our underlying model, we are able to test for a wide ranging set of properties: e.g. deadlock-freedom, livelock-freedom, program equivalence and program refinement. And we can use the powerful and mature FDR tool.
In general, Java designers, implementors, testers and maintainers are running scared of its multithreading primitives. However, applications force their use very quickly (e.g. to maintain decent response times from continuous systems with external control). The Mars Pathfinder mission of 1998 suffered a race hazard from just three interacting threads that led to real-time failure on the Martian surface [8]. In future, if not already, finance-critical and safety-critical applications will be multithreaded. How sure will the authors be of their security?
A CSP model of multithreading puts this on a solid engineering foundation. Nobody can ignore that. Anyone doing so would, at the very least, be exposed to a risk of litigation after a messy accident gets traced back to a system failure arising from some race condition:
“So, Bill, you sold your system without really knowing whether it was deadlock-free ... and you never even tried these standard procedures that might have found out?!? Disclaimer notices notwithstanding, would you say that that shows a lack of due care or a lack of diligence towards your customer?”
References
|
{"Source-Url": "https://kar.kent.ac.uk/21982/1/Formal_Analysis_of_Concurrent_Java_Systems.pdf", "len_cl100k_base": 14563, "olmocr-version": "0.1.53", "pdf-total-pages": 28, "total-fallback-pages": 0, "total-input-tokens": 89587, "total-output-tokens": 17377, "length": "2e13", "weborganizer": {"__label__adult": 0.0002884864807128906, "__label__art_design": 0.0002467632293701172, "__label__crime_law": 0.0002856254577636719, "__label__education_jobs": 0.000530242919921875, "__label__entertainment": 4.64320182800293e-05, "__label__fashion_beauty": 0.00011140108108520508, "__label__finance_business": 0.0001552104949951172, "__label__food_dining": 0.0002658367156982422, "__label__games": 0.0004661083221435547, "__label__hardware": 0.0007925033569335938, "__label__health": 0.0003304481506347656, "__label__history": 0.00020134449005126953, "__label__home_hobbies": 7.95125961303711e-05, "__label__industrial": 0.0003151893615722656, "__label__literature": 0.0002157688140869141, "__label__politics": 0.00026345252990722656, "__label__religion": 0.0004253387451171875, "__label__science_tech": 0.01163482666015625, "__label__social_life": 7.152557373046875e-05, "__label__software": 0.00435638427734375, "__label__software_dev": 0.97802734375, "__label__sports_fitness": 0.00030350685119628906, "__label__transportation": 0.000537872314453125, "__label__travel": 0.00018787384033203125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58976, 0.00952]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58976, 0.5451]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58976, 0.85641]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 3238, false], [3238, 6035, null], [6035, 9308, null], [9308, 12384, null], [12384, 14617, null], [14617, 17379, null], [17379, 19134, null], [19134, 19950, null], [19950, 21902, null], [21902, 23089, null], [23089, 25400, null], [25400, 27206, null], [27206, 28270, null], [28270, 30701, null], [30701, 33563, null], [33563, 37321, null], [37321, 40423, null], [40423, 42885, null], [42885, 43945, null], [43945, 46096, null], [46096, 48515, null], [48515, 50292, null], [50292, 51367, null], [51367, 53274, null], [53274, 54852, null], [54852, 57706, null], [57706, 58976, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 3238, true], [3238, 6035, null], [6035, 9308, null], [9308, 12384, null], [12384, 14617, null], [14617, 17379, null], [17379, 19134, null], [19134, 19950, null], [19950, 21902, null], [21902, 23089, null], [23089, 25400, null], [25400, 27206, null], [27206, 28270, null], [28270, 30701, null], [30701, 33563, null], [33563, 37321, null], [37321, 40423, null], [40423, 42885, null], [42885, 43945, null], [43945, 46096, null], [46096, 48515, null], [48515, 50292, null], [50292, 51367, null], [51367, 53274, null], [53274, 54852, null], [54852, 57706, null], [57706, 58976, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58976, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58976, null]], "pdf_page_numbers": [[0, 0, 1], [0, 3238, 2], [3238, 6035, 3], [6035, 9308, 4], [9308, 12384, 5], [12384, 14617, 6], [14617, 17379, 7], [17379, 19134, 8], [19134, 19950, 9], [19950, 21902, 10], [21902, 23089, 11], [23089, 25400, 12], [25400, 27206, 13], [27206, 28270, 14], [28270, 30701, 15], [30701, 33563, 16], [33563, 37321, 17], [37321, 40423, 18], [40423, 42885, 19], [42885, 43945, 20], [43945, 46096, 21], [46096, 48515, 22], [48515, 50292, 23], [50292, 51367, 24], [51367, 53274, 25], [53274, 54852, 26], [54852, 57706, 27], [57706, 58976, 28]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58976, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
d3ddea5d221ec55d3a8a5f389e223b3b497e0259
|
A VNF modeling approach for verification purposes
Original
Availability:
This version is available at: 11583/2749813 since: 2019-09-05T07:53:08Z
Publisher:
Institute of Advanced Engineering and Science
Published
DOI:10.11591/ijece.v9i4.pp2627-2636
Terms of use:
openAccess
This article is made available under terms and conditions as specified in the corresponding bibliographic description in the repository
Publisher copyright
(Article begins on next page)
A VNF modeling approach for verification purposes
Guido Marchetto, Riccardo Sisto, Matteo Virgilio, Jalolliddin Yusupov
Department of Control and Computer Engineering, Politecnico di Torino, Italy
ABSTRACT
Network Function Virtualization (NFV) architectures are emerging to increase networks flexibility. However, this renewed scenario poses new challenges, because virtualized networks need to be carefully verified before being actually deployed in production environments in order to preserve network coherency (e.g., absence of forwarding loops, preservation of security on network traffic, etc.). Nowadays, model checking tools, SAT solvers, and Theorem Provers are available for formal verification of such properties in virtualized networks. Unfortunately, most of those verification tools accept input descriptions written in specification languages that are difficult to use for people not experienced in formal methods. Also, in order to enable the use of formal verification tools in real scenarios, vendors of Virtual Network Functions (VNFs) should provide abstract mathematical models of their functions, coded in the specific input languages of the verification tools. This process is error-prone, time-consuming, and often outside the VNF developers’ expertise. This paper presents a framework that we designed for automatically extracting verification models starting from a Java-based representation of a given VNF. It comprises a Java library of classes to define VNFs in a more developer-friendly way, and a tool to translate VNF definitions into formal verification models of different verification tools.
Copyright © 2019 Institute of Advanced Engineering and Science. All rights reserved.
1. INTRODUCTION
The telecommunications world is exhibiting a very rapid change in its various aspects such as service flexibility, architectural design, and the way services are created, sourced, deployed, and supported. New studies in this sector are coming out every day to change the entire structure of the system by introducing dynamic adjustment of the network resources, custom configuration on a per-user basis, network programmability, etc. The expectation for significant cost savings is frequently mentioned as one of the primary benefits of these studies.
The virtualization technology has emerged as a way to decouple software applications from the underlying hardware and enable software to run in a virtualized environment, with a consequent increase of the service flexibility. In particular, the notion of Network Functions Virtualization (NFV) [1] is evolving to remedy the static nature of traditional networks by promoting innovation in network management and deployment of network services. In an NFV environment, a network is comprised of software-based applications called Virtual Network Functions (VNF) that take on the responsibility of handling specific network functions that run on one or more virtual machines (VMs) or in containers, on top of the physical networking infrastructure.
The idea that now almost anyone can introduce complex VNF software, in today’s modern networks, increases the impact of possible network configuration errors. As a result, a substantial amount of effort is required to ensure networks’ correctness, safety, and security. Therefore, verification of networks is key to eliminate errors and build robust infrastructures. With this respect, mechanized formal techniques have proved to be powerful engines for a formal verification of the network behavior in many different contexts [2-4].
In the networking panorama, most existing verification tools - model checking, SAT solvers, and theorem provers - rely on a formal model provided according to a given description language. The main challenge providers of NFV software have to face in order to enable formal verification of virtualized networks is the model construction: there is a large semantic gap between the artifacts produced by software developers and those accepted by current verification tools. For example, powerful approaches such as [2] and [5] have already evolved from research and are now being rolled into production, but this gap might be a significant hurdle for their wide adoption in real network environments. In essence, these tools are based on a complex modeling technique, tend to lock the user into a single kind of checking technology, require to accurately model network functionality, which relies on expert input, and usually oblige developers to learn a whole new language (e.g., SEFL in [5]).
This motivates the work presented in this paper, i.e., a framework for a user-friendly VNF modeling that developers can use to provide a formal description of their functions to be used in a verification process. The major highlight of our framework is its simplicity and we develop it targeting three specific objectives:
- To simplify the definition of a network function forwarding model in a well-known language.
- To leave some general concepts and flexibility to developers in such a way that they could define the desired behavior for all their network functions.
- To provide an automatic translation from the function model definition into an abstract formal model for verification tools.
In order to meet the above-mentioned principles, we select Java as a well-known and wide spread language that developers find simple and easy to grasp. The specific library we propose in this Java-based framework represents the typical set of high-level operations commonly used for describing the network function’s behavior. Starting from a skeleton class definition of a generic network function, a VNF developer can easily extend the provided artifacts to inherit basic properties, data types, and methods and customize function behavior. Our framework also includes a parser that analyzes the Java source code and produces an abstract formal model of the network function that can be automatically translated into the input language proper of a given verification tool. This second step is clearly tool dependent and we plan to enrich our framework in order to support the vast majority of the existing tools. Currently, the parser operation is oriented to formal verification tools based on the analysis of logic formulas and in particular the VeriGraph [6] tool is adopted as a use case. Verigraph requires to model complex network scenarios as sets of First Order Logic (FOL) formulas and uses an SMT solver, Z3[7], to verify satisfiability of these formulas. We hence developed a translator for converting the output of the parser to the proper set of FOL formulas. To check the correctness of the models obtained, the developer can run tests to verify network properties in terms of reachability between different nodes in several simple graphs that include the developed VNF model. We first introduce our modeling technique in Section 3. The use case and the obtained results are presented in Section 4. In addition, Section 2 discusses related work, while Section 5 concludes the paper.
2. RELATED WORK
There has been a significant amount of activity in the past years on attempting to provide a proper support for the translation of software system descriptions to the input models for verification tools. Among the others, we can mention Bandera [8] and JavaPathFinder [9]. The two approaches are based on model checking, and the models they extract are models of Java software. The main difference with respect to the proposed framework is that they consider general-purpose Java programs and their main target is the identification of programming errors and bugs. Here instead we deal with the forwarding behavior of VNFs. We are not interested in all the details of the VNF code execution. Furthermore, we want our analysis to be extremely fast.
Moreover, many approaches and methods for static network analysis have been proposed [2, 5, 10]. Network Optimized Datalog [2] relies on Datalog both for network models and policy constraints. BUZZ [10] uses hand-generated models of network functions in a domain specific language. As we discussed earlier, modeling network functionality for using these tools is difficult and requires a detailed understanding of the verification tool’s semantics. Therefore, our automated approach to generate models eliminates
the necessity of having detailed domain knowledge and helps network engineers to quickly determine the
behavior of a network function.
On the other hand, SymNet [5] constructs models using an imperative, modeling language, called
Symbolic Execution Friendly Language (SEFL). While the way this language has been designed has
similarities with the Java-based library we propose, this approach lacks the idea of ease of modeling, by
introducing a new language. Despite the fact that they provide parsers to automatically generate SEFL
models from real network functions, this generation only covers routers and switches. Our approach, instead,
is based on the well-known user-friendly Java language and can be used to verify any virtual network
function.
A proposal more similar to our target is, NFactor [11], which provides a solution to automatically
analyze the source code of a given network function to generate an abstract forwarding model, motivated by
network verification applications. While relying on advanced tools [12] and techniques [13, 14] from the
program analysis community, they do not require a specific structure of the source code of the function to be
analyzed. This feature is considered as an advantage from a generality point of view. Unfortunately, creating
a model that captures all code paths of a network function is challenging, because the state processing may be
hidden deep in the code. This may cause the analysis to miss certain state changes. For example, implementations
might use pointer arithmetic to access state variables, which is difficult to trace, and NFactor does not seem to deal with these language features appropriately. Another limitation of the
approaches based on the extraction of models from source code is that the code of most network functions is
typically proprietary. Instead of relying on vendors to release their code, the aim of our framework is to give
developers the opportunity to implement their network functions resembling as much as possible the real
one’s behavior.
3. MODELING TECHNIQUE
Currently, there is no standard or modeling language to accurately represent the diversity and
complexity of network functions. Most of the research efforts in proposing VNF models are focused on
network verification and gained popularity in the verification community. In this section, we list the open
problems that we have encountered while looking at the proposed VNF models in the verification context.
3.1. Overview and problem statement
Modeling of VNFs is useful in a number of ways ranging from finding scalability issues in
applications to finding network configuration bugs, in particular by means of formal verification tools.
However, formal modeling of network functionalities is difficult and requires a detailed understanding of the
specific verification tool’s internals, semantics, and modeling language. With this respect, an automated
approach to generate models eliminates the necessity of having detailed knowledge in the formal verification
domain and helps engineers to quickly determine the behavior of a VNF-based network, starting from a more
user-friendly description of the involved VNFs. In particular, the possibility to describe VNFs by means of a
Java-like modeling language would significantly lower barriers to entry for these powerful verification
approaches.
An imperative language such as Java focuses on describing how a program operates. A VNF
developer can write a code that describes in exact detail the steps that the VNF must make when a packet is
received from one of its interfaces. In contrast, declarative languages adopted in logic-based formal
verification tools do not specify a step or sequence of steps to execute, but rather predicates that must hold.
The conceptual gap between these two representations is the important challenge solved by our
approach. The proposed framework provides a Java library and a parser. The library can be used for
modeling network functions by means of an imperative language. The parser then automatically generates
abstraction models from these descriptions. Basically, the parser takes as an input the definition written using
our library and produces an abstract formal model describing the behavior of the network function. This
gives the possibility of automatically translating the definition that is written in a well-known language
into a more high-level, domain-specific constraint language, that would be difficult to deal with manually.
The rest of this section presents the Java library and the parser we developed, which sum up our
modeling technique.
3.2. Overview and problem statement
The framework provides a library that allows users to easily write models of virtual network
functions. By means of this library, the user can simply describe the functionality of the network function by
instantiating objects of the library classes and by calling certain methods that correspond to typical operations
performed inside network functions and using the basic syntax of Java and the methods offered. We define
library classes based on the following characteristics which represent the generic behavior of a network function:
- A network function may behave as an end host or a forwarding host.
- An end host represents a terminal node. It can receive packets, but it can also send packets in response to a received one (e.g. a response to the request) or new packets to initiate a communication (e.g. a request). Examples of end host nodes are a mail client, a web client, a mail server, etc. A forwarding host represents an intermediate node that processes traffic according to its internal logic to accomplish a specific mission, e.g. performing NAT translation, filtering packets, etc.
- A forwarding host can drop or determine which exit interface to use to send the packet to its next hop.
- A host, either an end or a forwarding one, can have network interfaces for receiving or sending packets and can be stateful, i.e. have a state that can depend on the history of the actions performed before (related to sent and received packets).
- The packets exchanged by the hosts are abstracted in order to capture only those characteristics that are relevant for verification. For example, source address, destination address, source port, destination port, protocol used, target URL, mail source, mail destination, body, etc.
Class definition: The definition of a virtual network function must be written in a unique file that must extend one of the following two abstract classes: ForwardingHost, EndHost (which in turn extends the Host class). These are the two main Java classes of the library, which are supplemented with other complementary classes depending on the type of the network function being described, either end host or forwarding host. For example, the Packet class describes a packet and objects of this type and the RoutingResult class represents the routing decision of a VNF might be included in EndHost and ForwardingHost objects. While the HostTable class represents an internal memory of the intermediate network function and hence is related to the ForwardingHost class only. Additionally, the internalNodes object of the Host class is used to differentiate the internal nodes of the corresponding network function from the external ones and the hostTableList object stores the list of available tables.
Class methods: The behavior of the network function must be described using a number of public methods provided by the library classes. The content of the methods is under the control of the user, which has to specify the network function behavior by means of the methods available in complementary classes of the library.
- defineState(): this method defines the components from which the state of the network function and configuration settings are extracted. It is present either if an EndHost or a ForwardingHost is defined. This method provides instructions to create a table that stores packet fields and a flag to indicate whether the network function is able to store the packets received (sent) or not.
- defineSendingPacket(): this method defines the characteristics of the packets sent by the current node (not the responses to the received packets but the request packets sent by the host). It may be present only if an EndHost is defined. This method must return a Packet object, which can be built inside the method by instantiating the Packet class. The Packet class offers a match() method to compare the fields of the packet against the other field or constants. There are “mutator” methods defined in this class to control changes to a packet fields.
- onReceivedPacket(): this method defines the behavior of the network function in response to a received packet. The parameter of the onReceivedPacket() method is the packet that the network function receives and the return value is a RoutingResult object. As discussed above, RoutingResult is a complementary class that represents the routing decision of the VNF, after processing of the incoming packet. Its constructor receives three parameters:
- A packet object that the network function produces.
- The action to perform on this packet (forward or drop).
- The forwarding direction (i.e. the interface the packet is forwarded to in case of forward action; this can be the upstream interface or one of the other interfaces of the network function).
The actions that can be inserted inside this method are divided into the following categories: instructions to check the contents of a packet field, instructions to check the state of the network function, instructions to store a value into a table defined by the user, instructions to define the action to be performed on a packet (through a RoutingResult), and setting a value into a field of a packet.
An example of the Java description of an Antispam network function is shown in Figure 1. This is an intermediary VNF designed to handle the mail traffic between end hosts on the basis of a blacklist table. The name of the table, the number of columns assigned for this table and the type of the table entries are passed as an argument to the constructor of the HostTable class in the defineState() method (row 2, Figure 1). In this scenario, the table named “Blacklist” containing a single column to store the blacklisted senders of e-mail messages with the type enum FieldType.MAIL FROM is created. Whereas the forwarding behavior of the Antispam VNF is described using the onReceivedPacket() method. In particular, lines 6 and 7 describe the
forwarding action if the protocol of the received packet is equal to a POP3 request. If this is not the case and the protocol of the packet is equal to a POP3 response, then the mail source field of the packet must be checked against blacklist entries as shown in lines 9 and 10. If the mail source field of the POP3 request packet does not match any table entry, then the new RoutingResult object must be returned specifying the forwarding action (line 11). Finally, line 13 corresponds to a drop action that must be enforced by the network function, in case of no other conditions are met. It is important to mention that there is not any restriction on the order of packet processing actions performed inside the onReceivedPacket() method, as this is handled accordingly by the parser.
```java
public class Antispam extends ForwardingHost {
void defineState() {
this.hostTableList.addHostTable("Blacklist", 1, FieldType.NAIL_FROM);
return new RoutingResult();
}
void onReceivedPacket(Packet packet) {
if (packet.match()) {
return new RoutingResult(packet, Action.FORWARD, ForwardDirection.UPSTREAM);
}
return new RoutingResult(packet, Action.DROP, ForwardDirection.UPSTREAM);
}
}
```
Figure 1. Java description of the behavior of the Antispam network function in response to a received packet
### 3.3. Parser
One of the most important aspects of our approach is the parser that analyses the user class describing the virtual network function. The parser operating principles clearly depend on the adopted verification tool. As said above, this paper considers logic-based tools. The rest of this section is then focused on this approach, but similar solutions might be applied to define parsing processes that are suitable for other formal verification techniques. This is part of our ongoing work and we plan to have a wider tool coverage in a near future.
The main functionalities of the parser in this framework are the following:
- the identification of the instructions in the Java code that lead to a packet being sent through an interface;
- the identification of the conditions (IF statements) that are traversed to reach the above mentioned send instructions.
In other words, we need to identify all the conditions that trigger a packet sending operation, which is actually what defines the behavior of a network function. We parse the source code to convert these conditions into a specific data structure, considering both the fields of the packets that traverse the function and the function status, if any.
In order to deliver the aforementioned functionalities, we take advantage of Eclipse AST API [15] in the extraction process from source code to a data structure. It is a tool that can generate an Abstract Syntax Tree (AST) [16] representation of existing Java source code. AST is a representation of a graph in the form of a tree from abstract syntactic structure of code. Using this library, our parser is able to represent every Java file as a tree of AST nodes. This step helps to perform a semantic analysis using the information in each node, where all these nodes are specialized for the symbolic events of the Java programming language. For example, there are nodes for method declaration, variable declaration, assignment, and others, while the edges describe the relationships between AST nodes. In other words, the parser helps to perform a semantic analysis using the information in each node. The parser recursively visits the AST of the code and stores in local variables all the characteristics such as: method declarations, variables, conditions, return predicates, and statements. Alternatively, the parser takes a “snapshot” of the current definition of the network function and proceeds in generating the final model in terms of high-level logical expressions. The final model is motivated by the vision of Open Flow [17] forwarding abstraction of the form <match, action>. This abstraction model has been borrowed from the existing modeling techniques [11, 18] and most of the verification tools of forwarding behavior [5, 6, 19] rely on the models adhering this abstraction.
To store the information obtained from the nodes, we defined a NDefintion class. It is a sort of “container” where the characteristics of the VNF (e.g., available tables, type of the network function, protocols used for the packets etc.) obtained from the methods defineState() and defineSendingPacket() are stored in local variables. According to the structure of the onReceivedPacket() method, we store if-then statement blocks in a list of Implication objects. It is a basic form of implication and simply states that “if statement A is true, then statement B is also true” and separated with an implication (⇒) sign.
The Implication class contains the following set of condition objects and is created only in the presence of a forwarding action:
- ifConditions is a list of conditions that are related to a received packet, a packet received or sent in the past or an internal state of the node.
- thenConditions is a list of conditions that are related to a sending packet and storing instructions.
- result is a condition that contains an action to be performed.
In the next step, the list of Implication objects are partitioned into a list of implications before and after the implication (⇒) sign. The following rules serve as a guideline to this partitioning phase:
- Before the implication sign, there must be a send condition and all the conditions that regard only the sending packet may be present. The send condition is considered at time $t_0$.
- For every packet previously sent or received, a send or receive condition must be present after the implication sign. These conditions are considered at time $t_1 < t_0$
- For every store condition, another implication must be created. It has the store instruction before the implication sign and all the other conditions after the sign.
This is the way in which our framework extracts the required information from the user defined classes and using them creates an internal abstract representation of the network function. The further step, i.e., the final creation of the input file for the specific verification tool, consists of a simple translation from the abstract model to the specific input language. Our framework currently includes a translator for the VeriGraph tool and discussed in the following.
4. USE CASE AND RESULTS
4.1. Translation pattern. VeriGraph (Z3)
Among the existing logic-based verification tools, we selected VeriGraph [6] as a use case to show how the model generated by the parser can be exploited, after proper translation, by a real verification tool. VeriGraph is a formal verification tool that can automatically verify networks by checking certain properties before the real service deployment. VeriGraph exploits VNF models expressed as formulas in first order logic. These formulas are difficult to write. Hence, it can greatly benefit from the automatic generation of models. In this context, the term network is used to indicate a sequence of several network functions (NAT, web cache, firewall, IDS and so on) that starts from a source node and ends into a different destination node. In response to a verification request, a model of the network and the involved network functions, consisting of First Order Logic (FOL) formulas [20], is checked against the provided policies, for example reachability properties between two nodes in the network.
In order to achieve high performance, the verification engine exploits an off-the-shelf SAT solver (Z3), which determines whether the considered policies are satisfied or not, thanks to the translation of these problems into SAT problems.
VeriGraph requires VNF models written in a FOL-based formalism. Hence, we included in our framework a translator that takes elements stored in the NFdefinition object and converts them into FOL formulas, namely, into boolean constraints in the form of if-then rule-based conditional statements. For example, the onReceivedPacket() method is modeled according to this template:
\[
SEND(p) \rightarrow CONDITIONS
\]
This is a special language form that can be interpreted through a satisfaction relation. In particular, this kind of rule represents the operation of sending a packet by means of this recurring pattern:
\[
send(VNF, destination, packet, t0) \rightarrow recv(source, VNF, packet, t1) \&\& t1 < t0
\]
Send and recv are two specific methods defined in the VeriGraph framework that receive as an input two nodes representing the source and the destination of a packet, a packet and a time. The above formula means that VNF can send a packet to a given destination if it has previously received the packet. This is the starting point of the final rule that will be enriched during the analysis depending on the conditions needed to forward the packet. Table 1 represents these statements for the Antispam VNF, as an example. In essence, it is the translation of the Java code depicted in Figure 1. If the Antispam (row 1) can send a packet p to the node n0 at time t0 then the protocol type of the packet should be either POP3 request or response. In (2) packet p should have the address of the Antispam in its src field. In (3) if the Antispam can send a packet p to the node n0 at time t0 and if the packet protocol type is a POP3 response, then these two conditions imply that there exists another node n1 at another time t1, such that, Antispam received same packet p from another node n1 and emailFrom field is not in black list. Formula (4) can be described in a similar way.
Table 1. Translator output format for the Antispam VNF
<table>
<thead>
<tr>
<th>Rule</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>`send(Antispam, n_0, p, t_0) → p.proto = = POP_REQ</td>
</tr>
<tr>
<td>2</td>
<td><code>send(Antispam, n_0, p, t_0) → nodeHasAddr(Antispam, p.src)</code></td>
</tr>
<tr>
<td>3</td>
<td><code>send(Antispam, n_0, p, t_0) && p.proto(POP3_RESP) → (n n_1, t_1 : (recv(n_1, Antispam, p, t_1) && t_1 < t_0)) && !isInBlackList(p.emailFrom)</code></td>
</tr>
<tr>
<td>4</td>
<td><code>send(Antispam, n_0, p, t_0) && p.proto(POP3_REQ) → (n n_1, t_1 : (recv(n_1, Antispam, p, t_1) && t_1 < t_0))</code></td>
</tr>
<tr>
<td>5</td>
<td><code>isInBlackList(p.emailFrom) = = false</code></td>
</tr>
<tr>
<td>6</td>
<td><code>if(isInBlackList(p.emailFrom) = = or (for bl in list (p.emailFrom= =bl)) ? true : false)</code></td>
</tr>
</tbody>
</table>
As it is done for each table object created in the Java code definition of the network function, the translator generates the interpretation of the `isInBlackList()` method as shown in formulas (5,6). By default, the method is assigned to a false value, that is equivalent to an empty table without any blacklist entries. Then, the clauses indicating the comparison between the `emailFrom` field of the packet for each table entries in the blacklist is added as a disjunction of new equalities to `isInBlackList()` in every loop iteration.
4.2. VNF catalog
The framework comes with a set of generic VNF models, written by means of our Java library during the development phase and used to evaluate the performance and the effectiveness of our method. All evaluations are executed on a workstation with 8GB RAM and an Intel i5-4210M CPU and, as described in the previous section. The available VNFs are listed in Table 2, together with the parsing time to generate each VeriGraph input model. It is worth noticing how these times are a satisfying result, also considering that the parsing process is not a real-time task and is executed only once during the data plane verification phase. In this subsection we describe some of the network functions included in the catalog.
Table 2. Time spent to parse VNF models
<table>
<thead>
<tr>
<th>VFN model</th>
<th>Time to parse (ms)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Web Client</td>
<td>849</td>
</tr>
<tr>
<td>Mail Client</td>
<td>857</td>
</tr>
<tr>
<td>Mail Server</td>
<td>862</td>
</tr>
<tr>
<td>End Host</td>
<td>864</td>
</tr>
<tr>
<td>IDS</td>
<td>869</td>
</tr>
<tr>
<td>NAT</td>
<td>880</td>
</tr>
<tr>
<td>Web Server</td>
<td>920</td>
</tr>
<tr>
<td>Web Cache</td>
<td>952</td>
</tr>
<tr>
<td>Firewall</td>
<td>957</td>
</tr>
<tr>
<td>Antispam</td>
<td>963</td>
</tr>
</tbody>
</table>
IDS (intrusion detection system) VNF acts similar to the firewall but IDS performs application layer packet filtering. It is a reactive IDS that not only detects suspicious or malicious traffic and alerts the administrator but will take pre-defined proactive actions to respond to the threat. The IDS includes a table of type `FieldType.BODY` with a single column. This type of the table allows to store the data of Layer 5,6,7 in OSI network model. The IDS in this example, stores the data corresponding to the `BODY` field of the packet.
If the protocol of the received packet is equal to HTTP REQUEST or HTTP RESPONSE the IDS performs a table lookup based on the `BODY` field of the packet. The presence of an entry corresponding to that data in the table results in a drop action of the packet. In addition, this is the structure of the code of IDS VNF share in common with Antispam VNF.
Web Server VNF checks if the protocol of the packet is HTTP REQUEST, creates a new packet by copying all the fields of the received packet. The source/destination IP and port addresses of the cloned packet are swapped and `PROTOCOL` field is set to HTTP RESPONSE. Analogously, MailServer VNF definition follows the same structure of the code.
Web Cache is a stateful VNF containing a table of two columns. On Received Packet () method of the class comprises four forwarding actions. The first two actions correspond to a if branch where the interface of the arrived packet is internal. If the `PROTOCOL` of the packet is equal to HTTP REQUEST and the table contains an entry matching the requested URL, the forwarding action is taken. This forwarding action is performed on internal interface with a new packet containing the requested web page. If the requested web page is not available in the table of the VNF, the original packet is forwarded through the external interface. On the other hand, the next two forwarding actions follow the else branch and without altering the packet. However, if the `PROTOCOL` of the packet is equal to HTTP RESPONSE, web content of the packet stored in the table of the VNF.
Firewall ACL (access control list) enabled stateless network function, contains table object aclTable of two columns, characterized by type FieldType.IP SRC and FieldType.IP DEST. The table acts as a “blacklist” and if the packet received matches the tuple in the table, a drop action is performed.
NAT (Network Address Translator)- enabled network function divides the network into two areas (an internal area and an external one) and applies different rules on the incoming packets if they are received from the internal or the external interface. In this case, packets received from the internal Nodes are always forwarded by replacing the source address field of the packet with the NAT IP address. Whereas, incoming packets from the external interface are forwarded to their destination only if a connection already exists.
4.3. Experimental results
In order also to check the correctness of the generated final models, we constructed a set of network topologies containing the available network functions and we demonstrate a number of custom tests on the selected verification tool. VeriGraph can perform different kinds of verification tests: reachability, which consists in checking if at least one packet can arrive at the destination from the source node, and isolation, which consists in verifying that no packet flowing from source to destination passes through a certain network function. In this section, we consider reachability properties. The overall network’s behavior (e.g., routing tables, middlebox configurations, host metadata, etc.) and the network topology information are represented as a set of additional FOL formulas and completed with other formulas that express the properties to be verified (e.g., a reachability property between two nodes in the network), in such a way that the satisfiability of the formulas implies the truth of the specified properties.
Figure 2 illustrates the set of topologies adopted for our tests. For example, topology (1) involves two firewalls and three end hosts. Firewalls are configured according to the following rules:
- Firewall 1 denies traffic between host A and host C.
- Firewall 2 denies traffic between host B and host C.

The test includes two reachability properties to be checked. In particular, we consider two packets, one flowing from node A to node C, and another flowing from node A to node B. Taking into account the above firewall policies, we expect the reachability property is not satisfied in case of A-to-C, indicating that no such packet can exist, while we expect it is satisfied in the case of A-to-B. The other test cases are set up as follows (the number in brackets refers to the corresponding topology in Figure 2):
1. Configurations: firewall denies traffic between host C and host B; the cache is set to serve any request (any URL of the resource requested is present in the cache); host A sends an HTTP REQUEST towards B; host A and C are set as internal nodes of NAT and cache. Property: reachability between host A and host B. Result: not reachable. Reason: cache never forwards packets towards host B.
2. Configurations: antispam blocks a traffic flow originating from (mail) client, thus there is a table entry with the address of the (mail) client in the blacklist of the network function. Property: reachability between mail server and (mail) client. Action: send a packet from mail client to mail server. Result: not reachable. Reason: antispam drops packets sent towards mail server.
4. Configurations: requests cannot be served by the cache (the URL of the resource requested is not present in the cache). Property: reachability between host A and host B. Result: reachable. Reason: cache forwards packets, since requested packets are not present in cache table.
5. Configurations: firewall denies traffic between host and web server; cache is set to serve any request. Property: reachability between host and web server. Result: not reachable. Reason: firewall blocks the traffic originating from host and addressed to web server.
Table 3 delivers the results we obtained implementing these categories of tests and consider the two different approaches in defining the VNF models for VeriGraph. In particular, the table includes a first column referred to hand-coded VNF models, and a second one referred to VNF models autogenerated by means of our framework. The string “SAT” means that the property stated in the test class is satisfied, while the string “UNSAT” refers to the case where the property is not satisfied. Comparing the test results between the hand-coded models and the automatically generated ones (starting from the Java description and then generated using the parser), we can notice how the obtained results are identical. This confirms the correctness of our modeling approach and also shows the efficiency of the developed framework. Column N represents the number of the corresponding topology illustrated in Figure 2.
<table>
<thead>
<tr>
<th>N</th>
<th>Tests</th>
<th>Verification results using hand-coded VNF model</th>
<th>Verification results using autogenerated VNF model</th>
<th>Time to verify autogenerated VNF model (ms)</th>
</tr>
</thead>
<tbody>
<tr>
<td>(1) DoubleFwTest 1</td>
<td>UNSAT</td>
<td>UNSAT</td>
<td>214</td>
<td></td>
</tr>
<tr>
<td>(2) DoubleFwTest 2</td>
<td>SAT</td>
<td>SAT</td>
<td>318</td>
<td></td>
</tr>
<tr>
<td>(3) CacheNaFwTest</td>
<td>UNSAT</td>
<td>UNSAT</td>
<td>275</td>
<td></td>
</tr>
<tr>
<td>(4) AntispamTest</td>
<td>UNSAT</td>
<td>UNSAT</td>
<td>192</td>
<td></td>
</tr>
<tr>
<td>(5) IDSTest</td>
<td>SAT</td>
<td>SAT</td>
<td>260</td>
<td></td>
</tr>
<tr>
<td>(6) CacheFwTest</td>
<td>UNSAT</td>
<td>UNSAT</td>
<td>200</td>
<td></td>
</tr>
</tbody>
</table>
5. CONCLUSION
This paper presents a “user-friendly” approach to VNF modeling for formal verification of VNF-based networks. We focus on breaking the barrier between the two ways of representing a VNF: the imperative-centric function definition (proper of VNF developers) and the more higher-level declarative representation (used by formal verification experts in order to instruct logic-based verification tools). The proposed approach consists of translating from the former one to the latter one automatically. Considering the ease of use, even for non-technical users and the reliability in terms of creation of the classes that describe the behavior of the VNF, it is possible to use the outcome of this project in other future, wider works that will allow transformation of the current structure of the network into a more flexible, simpler to manage and cheaper one. Considering what are the current requests of the market and looking at the possible future developments, this framework presents a further step towards the real implementation of these new concepts inside the networks. In fact, the framework and the available verification tools may be a basic structure to define Virtual Network Functions and test the overall network functionality before deployment.
REFERENCES
**BIOGRAPHIES OF AUTHORS**
**Guido Marchetto** is an assistant professor at the Department of Control and Computer Engineering of Politecnico di Torino. He got his Ph.D. in Computer Engineering in April 2008 from Politecnico di Torino. His research topics cover distributed systems and formal verification of systems and protocols. His interests also include network protocols and network architectures.
**Riccardo Sisto** received the Ph.D. degree in Computer Engineering in 1992, from Politecnico di Torino, Italy. Since 2004, he is Full Professor of Computer Engineering at Politecnico di Torino. His main research interests are in the area of formal methods, applied to distributed software and communication protocol engineering, distributed systems, and computer security. He has authored and co-authored more than 100 scientific papers. He is a Senior Member of the ACM.
**Matteo Virgilio** received the M.S. degree in Computer Engineering from Politecnico di Torino, Italy, in 2012. He got his Ph.D. in Control and Computer Engineering at Politecnico di Torino. His research interests include innovative network protocols and architectures, Content Centric Networking and formal verification techniques applied in the context SDN/NFV.
**Jalolliddin Yusupov** received the M.S. degree in Computer Engineering from Politecnico di Torino, Italy, in 2016. Currently, He is a Ph.D. student in Control and Computer Engineering at Politecnico di Torino. His primary research interests include formal verification of security policies in automated network orchestration. His other research interests include modeling, cyber physical systems, and cloud computing systems.
|
{"Source-Url": "https://iris.polito.it/retrieve/handle/11583/2749813/275017/document.pdf", "len_cl100k_base": 8911, "olmocr-version": "0.1.50", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 33866, "total-output-tokens": 10653, "length": "2e13", "weborganizer": {"__label__adult": 0.0004503726959228515, "__label__art_design": 0.0003399848937988281, "__label__crime_law": 0.00042939186096191406, "__label__education_jobs": 0.0012683868408203125, "__label__entertainment": 9.66787338256836e-05, "__label__fashion_beauty": 0.0002160072326660156, "__label__finance_business": 0.0003287792205810547, "__label__food_dining": 0.0003960132598876953, "__label__games": 0.0007081031799316406, "__label__hardware": 0.0020084381103515625, "__label__health": 0.0008373260498046875, "__label__history": 0.0003287792205810547, "__label__home_hobbies": 0.00012254714965820312, "__label__industrial": 0.0005927085876464844, "__label__literature": 0.00030875205993652344, "__label__politics": 0.00028228759765625, "__label__religion": 0.0005240440368652344, "__label__science_tech": 0.12255859375, "__label__social_life": 0.00012540817260742188, "__label__software": 0.00930023193359375, "__label__software_dev": 0.857421875, "__label__sports_fitness": 0.0004241466522216797, "__label__transportation": 0.0009284019470214844, "__label__travel": 0.0002440214157104492}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 47169, 0.02267]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 47169, 0.63326]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 47169, 0.89797]], "google_gemma-3-12b-it_contains_pii": [[0, 685, false], [685, 3780, null], [3780, 9086, null], [9086, 14165, null], [14165, 19668, null], [19668, 24466, null], [24466, 29339, null], [29339, 33863, null], [33863, 38039, null], [38039, 43610, null], [43610, 47169, null]], "google_gemma-3-12b-it_is_public_document": [[0, 685, true], [685, 3780, null], [3780, 9086, null], [9086, 14165, null], [14165, 19668, null], [19668, 24466, null], [24466, 29339, null], [29339, 33863, null], [33863, 38039, null], [38039, 43610, null], [43610, 47169, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 47169, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 47169, null]], "pdf_page_numbers": [[0, 685, 1], [685, 3780, 2], [3780, 9086, 3], [9086, 14165, 4], [14165, 19668, 5], [19668, 24466, 6], [24466, 29339, 7], [29339, 33863, 8], [33863, 38039, 9], [38039, 43610, 10], [43610, 47169, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 47169, 0.12174]]}
|
olmocr_science_pdfs
|
2024-11-27
|
2024-11-27
|
f10fb4e1b61ecf335e6f29de3358c752302abef0
|
We are IntechOpen, the world’s leading publisher of Open Access books
Built by scientists, for scientists
3,800 Open access books available
116,000 International authors and editors
120M Downloads
154 Countries delivered to
TOP 1% Our authors are among the most cited scientists
12.2% Contributors from top 500 universities
WEB OF SCIENCE™
Selection of our books indexed in the Book Citation Index in Web of Science™ Core Collection (BKCI)
Interested in publishing with us?
Contact book.department@intechopen.com
Numbers displayed above are based on latest data collected.
For more information visit www.intechopen.com
1. Introduction
MATLAB® (The MathWorks, Natick, MA, USA) is a software package for numerical computing that can be used in various scientific disciplines such as mathematics, physics, electronics, engineering and biology. More than 40 toolboxes are available in the current release (R2013b released in September 2013), which include numerous built-in functions enhanced by access to a high-level programming language.
Since images can be represented by 2D or 3D matrices and the MATLAB processing engine relies on matrix representation of all entities, MATLAB is particularly suitable for implementation and testing of image processing workflows. The Image Processing Toolbox™ (IPT) includes all the necessary tools for general-purpose image processing incorporating more than 300 functions which have been optimised to offer good accuracy and high speed of processing. Moreover, the built-in Parallel Computing Toolbox™ (PCT) has recently been expanded and now supports graphics processing unit (GPU) acceleration for some functions of the IPT. However, for many image processing applications we still need to write our own code, either in MATLAB or, in the case of GPU-accelerated applications requiring specific control over GPU resources, in CUDA (Nvidia Corporation, Santa Clara, CA, USA).
In this chapter, the first part is dedicated to some essential tools of the IPT that can be used in image analysis and assessment as well as in extraction of useful information for further processing and assessment. These include retrieving information about digital images, image adjustment and processing as well as feature extraction and video handling. The second part is dedicated to GPU acceleration of image processing techniques either by using the built-in PCT functions or through writing our own functions. Each section is accompanied by MATLAB example code. The functions and code provided in this chapter are adopted from the MATLAB documentation [1], [2] unless otherwise stated.
2. Image processing on CPU
2.1. Basic image concepts
2.1.1. Pixel representation
A digital image is a visual representation of a scene that can be obtained using a digital optical device. It is composed of a number of picture elements, pixels, and it can be either two-dimensional (2D) or three-dimensional (3D). Different bit-depth images can be found but the most common ones in scientific image processing are the 1-bit binary images (pixel values 0 or 1), the 8-bit grey-scale images (pixel range 0-255) and the 16-bit colour images (pixel range 0-65535) [3]. Figure 1 shows the grey-scale variation, from black to white, for an 8-bit image.

2.1.2. MATLAB pixel convention
MATLAB uses one-based indexing, where the first pixel along any dimension has index 1, whereas many other platforms are zero-based and consider the first index to be 0. By convention, counting of pixel indices starts from the top-left corner of an image with the first and second indices increasing down and towards the right, respectively. Figure 2 visualises the way that MATLAB indexes a 512×512 pixel image. This information is particularly important when the user intends to apply a transformation to a specific pixel or a neighbourhood of pixels.

2.1.3. Image formats
Various image formats are supported by MATLAB including the most commonly used ones, such as JPEG, TIFF, BMP, GIF and PNG. Images can be read, processed and then saved in a format other than their initial one. Various parameters such as the resolution, the bit-depth, the compression level or the colour-space can be adjusted according to the user’s preferences.
2.1.4. Digital image processing
Digital image processing refers to the modification of a digital image using a computer in order to emphasise relevant information. This can be achieved by both revealing latent details and suppressing unwanted noise. The process is usually designed according to the desired final outcome which can include simple image enhancement; object detection, segmentation or tracking; parameter estimation; or condition classification. Moreover, when dealing with images intended for inspection by people, the structure and function of the human visual system may be a critical factor in designing any such technique as this determines what can be perceived as an easily distinguishable feature.
2.2. Image pre-processing
Image pre-processing is a procedure that gives initial information about the digital condition of a candidate image. In order to receive such information, we need to load the image on the software platform and examine its type and pixel values.
2.2.1. Image input and output
Just as any other data set in MATLAB, images are represented by a variable. If we consider an image file with name ‘image’ and format ‘.tif’, using the function `imread`, the image can be loaded as a 2D or 3D matrix, depending on the image type. Image visualisation is achieved using the function `imshow`; to produce a new figure, a call to `imshow` has to be preceded by a call to `figure`. If, instead, `imshow` is used on its own, the new image replaces the last one in the last open figure window. Saving an image can be achieved using the function `imwrite` where the desired image (i.e. variable), the format and the final name have to be specified. Although the name of the saved image can be chosen freely, when building a large pipeline, it is suggested that the name of each resulting image be representative of the processing step in order to maintain process coherence. The following example represents how this sequence can be achieved.
```matlab
% Image import as variable
im=imread('image.tif');
% Image visualisation
figure; imshow(im);
% Application of image processing
im_proc=...
% Export processed image as file
imwrite(im_proc, 'im_processed.tif', 'tif');
```
The function `imshow` can also be accompanied by a two-element vector `[low high]` with which the user specifies the display range of the grey-scale image. If this vector is left empty `[]`, the minimum and maximum grey-scale values of the image are displayed as black and white pixels, respectively, with the intermediate values as various shades of grey.
```matlab
% Image visualisation with automatically and manually selected
% intensity ranges, respectively
figure; imshow(im, []);
figure; imshow(im, [low high]);
```
### 2.2.2. Image type conversions
Image type conversion is a useful tool as the user can convert the input image into any desired type. A special and often useful conversion is the transformation of an unsigned integer into a double-precision representation. Any image processing algorithm may thus result in more accurate outcomes since this conversion increases the dynamic range of intensities. The range of the resulting image is 0.0 to 1.0 with MATLAB maintaining up to 15 decimal digits. The following commands are examples of image conversions.
```matlab
% Conversion to double-precision and 8-bit unsigned integer, respectively
im_double=im2double(im);
im_ui8=im2uint8(im);
```
### 2.2.3. Pixel information
A histogram is a useful intensity representation as it reveals the pixels’ intensity distribution. It can be obtained using the function `imhist`. This information can, for example, be used for selecting an appropriate threshold value. Apart from this, a profile of intensities can also reveal information about local intensity variations which can be used to model small details. The function `improfile` can either be applied on pre-selected pixels or as a command prompt function for the user in order to manually select the desired area. Example of code for such processes follows [1], [3].
```matlab
% Histogram presentation: output is the number of pixels (pixel_count)
% distributed at each grey-scale intensity (grey_levels)
[pixel_count grey_levels]=imhist(im);
% Visualisation of histogram with manual limit in grey levels
figure; bar(pixel_count, 'r');
set(gca, 'XLim', [0 grey_levels(end)]);
% Normalisation of histogram
pixel_count_norm=pixel_count / numel(im);
figure; bar(pixel_count_norm, 'b');
set(gca, 'XLim', [0 grey_levels(end)]);
% Profiling of specific pixels
x=[1 205 150 35];
y=[105 230 25 15];
figure; improfile(im, x, y);
```
2.2.4. Contrast adjustment
One of the main pre-processing techniques is contrast adjustment since this process can enhance desired features whilst suppressing other, unwanted ones. MATLAB has various tools for varying the image contrast. The function `imcontrast` supplies a manual adjustment tool through which the user can experiment and find the optimal contrast. The resulting parameters can then be adopted, saved and applied to a stack of images taken under the same conditions. The function `imadjust` can be used to specify an intensity range when the user knows the optimal values or has found them using the `imcontrast` tool. The same function also provides input for the gamma factor of the power-law non-linear contrast adjustment. Besides, a custom-made logarithmic transformation can be applied [3].
```matlab
% Contrast adjustment
im_adj=imadjust(im, [low_in hign_in], [low_out high_out], gamma);
% Example of contrast adjustment
im_adj=imadjust(im, [0.2 0.8], [0.1 0.9], 1.0);
```
Figure 3 presents an original grey-scale image and its contrast adjusted counterpart using the parameters specified in the previous example.

Figure 3. Original image (left, 1296x1936 pixels) and contrast adjusted outcome (right).
% Custom logarithmic transformation
im_log=a*log(b+im);
im_log=a+b*log(c - im);
Parameters \(a\), \(b\) and \(c\) can be defined and adjusted by the user meaning any such custom-made logarithmic transformation can be introduced according to specific needs.
Other techniques that can affect contrast are histogram-based ones. A histogram represents an image’s grey-level intensity distribution or probability density function. Such knowledge can assist in further processing by helping the user choose the right tools [4]. Histogram stretching can be performed through the `imadjust` function while histogram equalisation can be performed through the function `histeq`. Adaptive histogram equalization can also be applied using
the function \textit{adapthisteq} which considers small image neighbourhoods instead of the whole image as an input.
```matlab
% Histogram equalisation and adaptive equalisation, respectively
im_eq=histeq(im);
im_adeq=adapthisteq(im, 'NumTiles', NumTilesValue);
```
The parameter \textit{NumTilesValue} takes the form of a vector that specifies the number of tiles in each direction. Other parameters can also be specified in the \textit{adapthisteq} function such as the dynamic range of the output data or the histogram shape. Figure 4 shows examples of histogram equalisation and adaptive histogram equalisation, respectively.
\textbf{Figure 4.} Histogram equalisation transformation (left) and adaptive histogram equalization transformation (right) of the original image in Figure 3. Notice the enhanced details in the right-hand image.
\subsubsection*{2.2.5. Arithmetic operations}
Arithmetic operations refer to addition, subtraction, multiplication and division of two images or an image and a constant. Images subject to arithmetic operations need to have the same dimensions and grey-scale representation. The resulting image will have the same dimensions as the input. When a constant value is added or subtracted (instead of a second image), this constant is added or subtracted to each pixel’s intensity, increasing or reducing the image luminosity. Most often, such operations are used for detail enhancement or suppression of unnecessary information.
```matlab
% Addition, subtraction, multiplication and division of two images,
% respectively
im_add=imadd(im1, im2);
im_sub=imsubtract(im1, im2);
im_mult=immultiply(im1, im2);
im_div=imdvide(im1, im2);
```
In the code above, the second input parameter (\textit{im2}) can be replaced by a scalar constant.
2.2.6. Miscellaneous transformations
Other useful image transformations include cropping, resizing and rotation. Cropping can be used if the user is interested only in one particular part of the input image. The user can define a specific region of interest and apply any transformation only to this part. Resizing can be applied in order either to expand or reduce the image size. Image size reduction can be especially useful in speeding up a process in case of larger images or large data sets. Rotation can be particularly useful when an image includes features of a particular directionality. The user can specify the applied interpolation method out of nearest neighbour, bilinear and bicubic. Inversion of grey-scale intensities can be useful when the interesting objects have intensities lower than the background. The following functions perform these processes.
```
% Image cropping, resizing, rotation and inversion, respectively
im_crop=imcrop(im, [x y size_x size_y]);
im_res=imresize(im, scale);
im_rot=imrotate(im, angle, method);
im_com=imcomplement(im);
```
2.3. Image processing
2.3.1. Thresholding
Thresholding is one of the most important concepts in image processing as it finds application in almost all projects. Thresholding can be manual or automatic, global or local. In manual mode, the user defines a threshold value, usually depending on the conception of the image (several trials may be needed). In automatic mode, a more detailed understanding of the image is required in order to select the correct method. The IPT provides the function `graythresh` which is based on Otsu’s method and the bimodal character of an image [5]. This global threshold will create a black-and-white image where pixels with intensities above this threshold will become white (value 1) and pixels with intensities below this threshold will become black (value 0).
This method can be easily extended to multi-thresholding by using the IPT function `multithresh`. Using this function, the user specifies a suitable number of threshold levels \(k\) for the image. If this parameter is not supplied, it has the same functionality as the original `graythresh` function. The IPT can visualise the result of the `multithresh` function by using the `imquantize` function. The latter labels the various areas of the image according to the number of thresholds previously specified. The labelled image can then be transformed into an RGB image, preserving the type (e.g. `uint8`) of the original input. The following code can be used in these processes.
```
% Single threshold application and binarisation, respectively
thresh=graythresh(im);
im_bin=im2bw(im, thresh);
% Multiple threshold application and visualisation of thresholded
% areas as colours of image, respectively
```
Figure 5 provides an example of single- and multi-threshold application on the original image of Figure 3.
2.3.2. Edge detection
Edge detection is an essential part of image processing as it usually emphasises objects’ outline or internal structure. An edge is a representation of discontinuity in an image and may characterise a surface that separates two objects or an object from the image background [4]. Boundaries can be characterized by single pixels or connected sequences of pixels. Such a feature can assist in further object recognition and, thus, edge detection is applied in many image processing sequences. The outcome of edge detection is a binary image with edges presented by white pixels.
2.3.3. First-order edge detection operators
The IPT includes the standard first-order edge detectors such as the Roberts, Sobel and Prewitt. Roberts edge detector relies on $2 \times 2$ masks whereas the other two rely on $3 \times 3$ masks. An optional threshold can be specified in order to define the minimum gradient magnitude. Useful code for such detectors follows.
```matlab
% First-order edge detection
im_bin = edge(im, 'roberts', threshold);
im_bin = edge(im, 'sobel', threshold);
im_bin = edge(im, 'prewitt', threshold);
```
Other first-order edge-detectors can also be designed. Examples are the Kirsch and the Robinson masks which are not included in the IPT but are easy to design. They are examples of directional edge detectors which scan the image from different directions in order to detect edges with various orientations. A single kernel is used which, through rotations from $0^\circ$ to
315° in steps of 45°, creates eight different masks. The image is convolved with each mask and the pixels in the final image are assigned the highest edge detection magnitude obtained from any of the masks [4]. The following code presents these two edge-detectors, respectively [6], [4].
```
% Kirsch edge detector
K(:,:,1)=[-5 3 3; -5 0 3; -5 3 3];
K(:,:,2)=[-5 -5 3; -5 0 3; 3 3 3];
K(:,:,3)=[3 -5 -5; 0 0 5; 3 3 3];
K(:,:,4)=[3 -5 -5; 3 0 -5; 3 3 3];
K(:,:,5)=[3 3 -5; 3 0 -5; 3 3 -5];
K(:,:,6)=[3 3 3; 3 0 -5; 3 -5 -5];
K(:,:,7)=[3 3 3; 3 0 3; -5 -5 -5];
K(:,:,8)=[3 3 3; -5 0 3; -5 -5 3];
% Robinson edge detector
R(:,:,1)=[-1 0 1; -2 0 2; -1 0 1];
R(:,:,2)=[0 1 2; -1 0 1; -2 -2 0];
R(:,:,3)=[1 2 1; 0 0 0; -1 -2 -1];
R(:,:,4)=[2 1 0; 1 0 -1; 0 -1 -2];
R(:,:,5)=[1 0 -1; 2 0 -2; 1 0 -1];
R(:,:,6)=[0 -1 -2; 1 0 -1; 2 1 0];
R(:,:,7)=[-1 -2 -1; 0 0 0; 1 2 1];
R(:,:,8)=[-2 -1 0; -1 0 1; 0 1 2];
```
The point detector, another example of an edge detector, detects bright points based on the intensity difference between a central pixel and its neighbours. A point detector can be specified by the following code [7].
```
% Point edge detector
P=[-1 -1 -1; -1 8 -1; -1 -1 -1];
```
### 2.3.4. Second-order edge detection operators
In addition to first-order edge detectors, second-order edge detectors can find wide application. Such detectors are for example the Canny, zero-cross and Laplacian-of-Gaussian (LoG; also called Marr-Hildreth). The Canny method uses the derivative of a Gaussian filter for finding the gradient of the original image after which it relies on local maxima of the resulting image. [3] The zero-cross method searches for zero crossings after an arbitrary filter has been applied. Finally, the LoG method searches for zero crossings after the LoG transformation has been applied. Useful code for such detectors follows.
```
% Second-order edge detection
im_bin=edge(im, 'log', threshold, sigma);
im_bin=edge(im, 'canny', threshold, sigma);
im_bin=edge(im, 'zero-cross', threshold, filter);
```
In this case, `threshold` refers to the strength of an edge; `sigma` refers to the standard deviation of the Gaussian filter while `filter` refers to any filter that the user applies prior to the edge
detection. In LoG and Canny methods, threshold and sigma can be left unspecified but in the case of the zero-cross method the user has to define a filter. Figure 6 presents the resulting images after application of ‘Roberts’ and ‘Canny’ edge detectors, respectively.

### 2.3.5. Image filtering
Spatial filtering is one of the most important processes in image processing as it can extract and process specific frequencies from an image while other frequencies can be removed or transformed. Usually, filtering is used for image enhancement or noise removal. IPT includes the standard tools needed for image filtering. The function `fspecial` can be used for filter design. Mean, average, Gaussian, Laplacian, Laplacian-of-Gaussian, motion, Prewitt-edge and Sobel-edge filters can be introduced. The designed filter is applied to the image by using the function `imfilter`. Typical examples of code follow.
```matlab
% Filter design
filt_av=fspecial('average', hsize);
filt_gaus=fspecial('gaussian', hsize, sigma);
```
The parameter `hsize` is a vector that represents the number of rows and columns of the neighbourhood that is used when applying the filter. The parameter `sigma` is the standard deviation of the applied Gaussian filter.
```matlab
% Filter application
im_filt_av=imfilter(im, filt_av);
im_filt_gaus=imfilter(im, filt_gaus);
```
Edge detectors can also be applied by filtering the image with the edge operator. An example follows with the application of the previously mentioned point edge detector.
```matlab
% Filter with point edge detector
im_filt_p=imfilter(im, P);
```
Apart from user designed filters, IPT includes filters that can be directly applied to the image. Such examples are the median filter (medfilt2), the Wiener filter (wiener2) or the 2D order-statistics filter (ordfilt2).
```matlab
% Filter application
im_filt_med = medfilt2(im, neighbourhood);
im_filt_ord = ordfilt2(im, order, domain);
im_filt_win = wiener2(im);
```
The `neighbourhood` in the `medfilt2` function specifies the dimensions of the area in which the median value of the pixel will be found. The `ordfilt2` function is a generalised version of the median filter. A neighbourhood is defined by the non-zero pixels of `domain`, and each pixel in the image is replaced by the `order`-th smallest of its neighbours within this domain [1]. An example could be the following command, where each pixel is replaced by the 6th smallest value found in its 3x3 neighbourhood.
```matlab
% Order-statistics filter example
im_filt_ord = ordfilt2(im, 6, ones(3));
```
Figure 7 shows examples of Gaussian and order statistics filtered images.

**Figure 7.** Images filtered using a Gaussian filter (left – hsize [9 9] and sigma 1) and a 6th order statistics filter (right).
### 2.3.6. Morphological image processing
Morphological image processing refers to the extraction of descriptors which describe objects of interest and, thus, their morphology determines the tools that are used [8]. Structuring elements are used to probe the image [3]. The function `bwmorph` performs all morphological operations with the addition of suitable parameters. Since the processing time of this function may increase significantly with image complexity, it is supported by the PCT for increased speed of processing. Morphological processing includes dilation, erosion, opening, closing, top-hat and bottom-hat transformation, hit-or-miss transformation as well as other processes that perform pixel-specific changes.
% Morphological processing
im_bin=bwmorph(im, operation, n);
The parameter operation accounts for the type of morphological operator while n is the number of times that this process should be repeated. If n is not defined, the process is applied only once. Processes such as dilation and erosion can also be applied using individual functions when a custom-made structuring element is to be used. Examples of individual processes follow.
% Definition of flat and non-flat structuring element, respectively
se=strel('disk', 5);
se=strel('ball', 10, 5);
% Dilation, erosion and top-hat transformation
im_mor=imdilate(im, se);
im_mor=imerode(im, se);
im_mor=imtophat(im, se);
Figure 8 presents examples of dilation and top-hat transformation with a ‘disk’ structuring element of radius 10.

The distance transform is usually applied to binary images and represents the distance between a white pixel and its closest zero pixel. Pixels in the new image obtain higher values with larger distance from a zero pixel. This transformation can act as a segmentation function and it is often used in the segmentation of overlapping disks [1], [8].
% Distance transform
im_dist=bwdist(im_bin);
2.3.7. Colour image processing
Colour images are subject to processing in many scientific fields, as different colours can represent different features. The most commonly used colour representation is RGB (Red-Green-Blue). Transformation of RGB images into grey-scale intensity or extraction of a specific colour can be done using the following code:
2.4. Feature extraction
Feature extraction is the process through which recognised objects are assessed according to some geometrical criteria. The first step of this process is to ‘label’ the objects of a binary image \( im_{\text{bin}} \) using the function \( \text{bwlabel} \). The resulting image is referred to as labelled image \( (im_{\text{lab}}) \). The function scans the image from top to bottom and from left to right attributing a number to each pixel indicating to which object it belongs. Additionally, the IPT has the function \( \text{regionprops} \) which measures features of labelled objects such as area, equivalent diameter, eccentricity, perimeter, and major and minor axis lengths. This function operates on labelled images which contains \( n \) labelled objects. A full list of the features that can be calculated using \( \text{regionprops} \) can be found in the MATLAB IPT documentation [1].
Apart from the standard features that are included in the IPT, custom-defined features can be measured either by using already calculated features or by introducing completely new measurements. An example could be the measurement of an object’s standard geometric characteristics as well as the thinness ratio and compactness (or irregularity) using a \( \text{for} \) loop for assessment of all \( n \) objects. Since the user may have to handle numerous measurements for many objects, it is usually useful to pre-allocate memory in order to reduce the processing time. The following code can be used to label image objects, measure the features and store them as a table of variables [1], [3].
```matlab
% RGB image to grey-scale intensity image
im_grey=im_rgb=rgb2gray(im);
% Extraction of each colour
im_r=im_rgb(:,:,1);
im_g=im_rgb(:,:,2);
im_b=im_rgb(:,:,3);
```
```matlab
% Labelling and measurement of geometrical features
[im_lab, n] = bwlabel(im_bin);
% Measurement of geometrical features
stats = regionprops(im_lab, 'Area', 'Perimeter', 'Eccentricity', ...
'MinorAxisLength', 'MajorAxisLength', 'EquivDiameter');
% Memory pre-allocation for new features
[stats(1:n).Roundness] = deal(0);
[stats(1:n).Compactness] = deal(0);
% Measurement of new features Thiness Ratio and Compactness
for k = 1:n
[stats(k).ThinessRatio] = ...
4*pi*(stats(k).Area)/((stats(k).Perimeter).^2);
[stats(k).Compactness] = 1 / [stats(k).ThinessRatio];
end
```
Measured features are stored in structure arrays. Usually, further processing of features requires transforming structure arrays into matrices. MATLAB cannot perform such transformations without the application of an intermediate step: the structure arrays have first to be transformed into cell arrays which in turn can be converted into matrices.
Notice that the transpose of the cell array `cell_features` has been used in order to allocate features to matrix columns rather than rows. For performance reasons it is usually best to orient the data in such a way that they are processed column by column rather than row by row; in this case we are expecting to go through the data feature by feature rather than image by image.
2.5. Processing series of images
In many cases, handling of multiple images can be a laborious task unless an automated process can be established. Assuming we have a batch of 100 images that we would like to process, using a `for` loop and defining the path to the image directory, we can load, process and save the images one at a time. After saving the first image, the next one in the directory is automatically loaded, processed and saved. The procedure continues until the last image has been saved. The following code performs this operation.
```matlab
% Find the images in the current directory with the expected name
% The symbol * indicates that the name 'image' can be followed by any additional characters
filelist = dir('image*.tif');
% Find the number of files to be processed
numImages = length(filelist);
% Loop to read, process and save each image
for k = 1:numImages
myfilename = filelist(k).name;
im = imread(myfilename);
im_proc = ... (processing)
imwrite(im_proc, ['image', num2str(k), '_new.tif'], 'tif');
end
```
2.6. Video handling
2.6.1. Video to frames
An interesting application for image processing is handling video data. In this case, the video file has to be divided into single frames. The function `VideoReader` can be used in order to input the file as a variable. For $n$ frames, each frame is then saved as a separate image in any format.
The following code reads a video (called `movie`) to a MATLAB structure and saves the frames one by one into ‘.tif’ format. [9]
```matlab
% Initialisation and frames characteristics
video = VideoReader('movie.avi');
n = video.NumberOfFrames;
height = video.Height;
```
width = video.Width;
% Preallocate movie structure
movie(1:n) = struct('cdata', zeros(height, width, 3, 'uint8'));
% Reading video and saving frames
for k = 1:n
movie(k).cdata = read(video, k);
imwrite(movie(k).cdata, ...
strcat('frame_', (strcat(int2str(k), '.tif'))));
end
2.6.2. Frames to video
Since every frame is stored as a single image it can be processed accordingly, either one by one or in batch mode. A possible next step in this process can be to combine the processed images into a single video again. If a new movie (called movie_new) is to be created from frames (called frame#), then the following code supplies the backbone for such a process [9].
% Specify video name and frame rate
video = VideoWriter('movie_new.avi');
video.FrameRate = 1;
% Open video recorder to add frames
open(video);
% Find the images in the current directory with the expected name
% The symbol * indicates that the name 'frame' can be followed by any % additional characters
filelist = dir('frame*.tif');
% List the files and find the number of frames to be added
fileNames = {filelist.name}'
numImages = length(filelist);
% Loop over all images to read, and write to movie file
for k=1:numImages
myfilemane = strcat('frame', num2str(k), '.tif');
frame = imread(myfilemane);
writeVideo(video, frame);
end
% Close video file recorder and play the new video
close(video);
implay('movie_new.avi');
3. Image processing on GPU in MATLAB
Large amounts of image data are produced in many technical and experimental situations, in particular where images are repeatedly acquired over time or when dealing with images of higher dimensionality than two. Time-lapse imaging and video recording can be mentioned as examples of the former, whereas the latter can be represented by any of the many tomographic imaging modalities present. 4D computed tomography (CT), where 3D CT images are
acquired at regular intervals to monitor internal patient motion, is an example of an application pertaining to both categories. It is often desirable or even critical to speed up the analysis and processing of such large image data sets, especially for applications running in or near real-time. Due to the inherently parallel nature of many image processing algorithms, they are well suited for implementation on a graphics processing unit (GPU), and consequently we can expect a substantial speedup from such an implementation over code running on a CPU. However, despite the fact that GPUs are nowadays ubiquitous in desktop computers, only 34 out of the several hundred functions of the IPT are GPU-enabled by the PCT in the current MATLAB release (2013b). In this sub-chapter we will explore the possibilities available for someone either wanting to harness the computing power of the GPU directly from MATLAB or to incorporate external GPU code into MATLAB programs. The focus will be on image processing applications, but the techniques presented can with little or no effort be adapted to other applications.
In the first part of this part we look at how to use the built-in, GPU-enabled image processing functions of the PCT. Following this, we explain how pixel-wise manipulations can be carried out using the GPU-enabled version of `arrayfun` and how we can write our own image processing functions making use of over one hundred elementary MATLAB functions that have been implemented to run on GPUs. In the second part of this section, we show how the PCT can be used to call kernel functions written in the CUDA programming language directly from MATLAB. This allows us to make use of existing kernel functions in our MATLAB applications. Further, for those with knowledge of CUDA, it makes more of the GPU’s potential available from MATLAB and also provides an easy way of testing kernel functions under development. The third and final part is dedicated to more advanced users who might want to make use of one of the CUDA libraries provided by NVIDIA, who prefer to write their code in a language different from CUDA, who lack access to the Parallel Computing Toolbox, or who have access to an existing GPU code library that they would like to call from MATLAB. We look at how CUDA code can be compiled directly into MEX functions using the PCT, followed by a description of how GPU code written in either CUDA or OpenCL can be accessed from MATLAB by compiling it into a library and creating a MEX wrapper function around it. Finally, we show how the code for a MEX wrapper function can be built directly in our external compiler and, for example, included in an existing Visual Studio (Microsoft Corporation, Redmond, WA, USA) solution so that this is done automatically when building the solution.
3.1. `gpuArray` and built-in GPU-enabled functions
For the examples in this part to work, we need a computer equipped with an NVIDIA GPU of CUDA compute capability 1.3 or greater which is properly set up and recognised by the PCT [10]. The functions `gpuDeviceCount` and `gpuDevice` can be used to identify and select a GPU as described in the PCT documentation [11].
To be able to process an image on the GPU, the corresponding data first have to be copied from main CPU memory over the PCI bus to GPU memory. In MATLAB, data on the GPU are accessed through objects of type `gpuArray`. The command
creates a new \texttt{gpuArray} object called \texttt{imGpu} and assigns to it a copy of the image data in \texttt{im}. \texttt{imGpu} will be of the same type as \texttt{im} (e.g., double, single, int32, etc.), which might affect the performance of the GPU computation as discussed below. Let us for now assume that \texttt{im} is a 3072×3072 array of single precision floating point numbers (single). Correspondingly, when we have executed all our GPU calculations, we call the function \texttt{gather} to retrieve the result. For example,
\begin{verbatim}
result=gather(resultGpu);
\end{verbatim}
copies the data in the \texttt{gpuArray} object \texttt{resultGpu} back to \texttt{result} in CPU memory. In general, copying data over the PCI bus is relatively slow, meaning that when working with large data sets we should try to avoid unnecessary copies between CPU and GPU memory. When possible, it might therefore be faster to create filters, masks or intermediates directly on the GPU. To do this, \texttt{gpuArray} has several static constructors corresponding to standard MATLAB functions, currently \texttt{eye}, \texttt{false}, \texttt{inf}, \texttt{nan}, \texttt{ones}, \texttt{true}, \texttt{zeros}, \texttt{linspace}, \texttt{logspace}, \texttt{rand}, \texttt{randi} and \texttt{randn}, to pre-allocate and initialise GPU memory. These can be invoked by calling \texttt{gpuArray.\texttt{constructor}} where \texttt{constructor} is replaced by the function call. For example,
\begin{verbatim}
noiseGpu=gpuArray.randn(3072, 'single');
\end{verbatim}
creates a 3072 × 3072 array of normally distributed pseudorandom numbers with zero mean and standard deviation one. As with the corresponding standard MATLAB functions, the last function argument specifies the array element type (in this case \texttt{single}), and if omitted it defaults to \texttt{double}. While this is normally not a problem when working on modern CPUs, it is worth bearing in mind that NVIDIA’s consumer GPUs are often several times faster at processing single precision floating point numbers (single), compared to double precision (double) or integers (int32). This means that where double precision is not crucial, it is a good habit to declare arrays on the GPU as single precision. As an alternative, the first seven static constructors listed above can be called through their corresponding MATLAB function by appending the argument list with ‘\texttt{gpuArray}’. E.g.,
\begin{verbatim}
zerosGpu=zeros(3072, 'int32', 'gpuArray');
\end{verbatim}
creates a \texttt{gpuArray} object containing 3072×3072 32-bit integers (int32) initialised to zero. When calling these functions, an alternative to explicitly specifying the type is using the ‘\texttt{like}’ argument. This creates an array of the same type as the argument following the ‘\texttt{like}’ argument, i.e.
\begin{verbatim}
onesGpu=ones(3072, 'like', zerosGpu);
\end{verbatim}
creates a gpuArray object of int32 values initialised to one, whereas
```matlab
onesWsp=ones(3072, 'like', im);
```
creates a standard MATLAB array of single values initialised to one. This can be useful when creating new variables in functions that are meant to run both on the CPU and the GPU where we have no a priori knowledge of the input type. For a gpuArray object to be able to hold complex numbers, this has to be explicitly specified upon construction, either by using the 'complex' argument when creating it directly on the GPU or by explicit casting when copying non-complex data, e.g. `gpuArray(complex(im))`. To inspect the properties of a GPU array we can use the standard MATLAB functions such as `size`, `length`, `isreal` etc. In addition to these, the function `classUnderlying` returns the class underlying the GPU array (since `class` will just return `gpuArray`) while `existsOnGPU` returns true if the argument is a GPU array that exists on the GPU and is accessible.
Once our image data are in GPU memory, we have two options for manipulating them: either we can use the sub-set of the built-in MATLAB functions (including some functions available in toolboxes) that run on the GPU, or we can write our own functions using only element-wise operations and launch them through `arrayfun` or `bsxfun`. In the first case, we use normal MATLAB syntax with the knowledge that any of these functions are automatically executed on the GPU as long as at least one argument is of type `gpuArray`. Using `imGpu` and `randNoiseGpu` defined above we can create a new, noisy image on the GPU by typing:
```matlab
noisyImGpu=imGpu+0.2+0.3*noiseGpu;
```
A list of the GPU-enabled MATLAB functions available on the current system, together with all static constructors of `gpuArray`, can be displayed by typing `methods('gpuArray')`. For MATLAB 2013b, the list comprises around 200 standard functions plus any additional functions in installed toolboxes [2]. For example, using the GPU-enabled function `imnoise` from the IPT, the same result as above can be obtained through:
```matlab
noisyImGpu2=imnoise(imGpu, 'gaussian', 0.2, 0.09);
```
(where a variance of 0.09 equals a standard deviation of 0.3). Another, potentially more useful, GPU-enabled function from the IPT is `imfilter`. Using `imGpu` from earlier
```matlab
sobelFilter=fspecial('sobel');
filteredImGpu=imfilter(imGpu, sobelFilter);
```
filters the image using a horizontal Sobel filter. Note that `sobelFilter` is an ordinary 2D MATLAB array, `[1 2 1; 0 0 0; -1 -2 -1]`, but since `imGpu` is a GPU array, the GPU-
enabled version of `imfilter` is automatically called and the output, `filteredImGpu`, will be a GPU array.
The second option for manipulating GPU arrays directly from MATLAB is by calling our own functions through the built-in `bsxfun` or `arrayfun` functions. As before, if any of the input arguments to the function is a GPU array, the calculation will automatically be carried out on the selected GPU. Thus, a third way of producing a noisy version of `imGpu` would be to first create the function `addAndOffset` that performs an element-wise addition of two images and adds a constant offset:
```matlab
function result=addAndOffset(im1, im2, offset)
result=im1+im2+offset;
end
```
and then calling
```matlab
noisyImGpu3=arrayfun(@addAndOffset, imGpu, 0.3*noiseGpu, 0.2);
```
The benefit of writing functions and launching them through `bsxfun` or `arrayfun` compared to calling MATLAB functions directly on GPU arrays is a potential speedup for large functions. This is because in the former case, the whole function can automatically be compiled to a GPU function, called CUDA kernel, resulting in a single launch of GPU code (although the overhead for compiling the function will be added to the first call). In contrast, in the latter case, each operation has to be launched in sequence using the precompiled kernel functions available in MATLAB. However, when running on a GPU, `arrayfun` and `bsxfun` are limited to element-wise operations. In a typical image processing application, this means that each pixel is unaware of its neighbours, which limits the use to functions where pixels are processed independently of one another. As a result, many image filters cannot be implemented in this way, in which case we are left either to use built-in functions as described earlier, or to write our own kernel functions as described in the next part. Further, since we are constrained to element-wise manipulations, the number of built-in functions at our disposal inside our function is somewhat limited. For a complete list of the available built-in functions, as well as some further limitations when using `bsxfun` and `arrayfun` with GPU arrays, see the PCT documentation [12].
Before moving on to the next part we should stress that since GPUs are built to process large amounts of data in parallel, there is no guarantee that running code on a GPU instead of a CPU will always result in a speedup. Although image processing algorithms provide good candidates for substantial speedups, this characteristic of the GPU means that vectorisation of code and simultaneous processing of large amounts of data (i.e. avoiding loops wherever possible) becomes even more crucial than in ordinary MATLAB programs. Further, GPU memory latency and bandwidth often limit the performance of GPU code. This can be alleviated by ensuring that, as far as possible, data that are operated on at the same time are stored nearby in memory. Since arrays are stored in a sequential column-major order in MATLAB, this means avoiding random memory-access patterns where possible and organising our data so that we
mostly operate on columns rather than on rows. Finally, when evaluating the performance of our GPU code we should use the function `gputimeit`. It is called in the same manner as the regular MATLAB function `timeit`, i.e., it takes as argument a function, which itself does not take any arguments, and times it, but is guaranteed to return the accurate execution time for GPU code (which `timeit` is not). If this is not feasible, the code section we want to time can be sandwiched between `tic` and `toc`, as long as we add a call to `wait(gpuDevice) just before the `toc`. This ensures that the time is measured only after execution on the currently selected GPU has finished. (Otherwise MATLAB continues to execute ensuing lines of GPU-independent code, like `toc`, asynchronously without waiting for the GPU calculation to finish). Since the MATLAB profiler only measures CPU time, we need to employ a similar trick to get accurate GPU timings when profiling: if we sandwich the lines or blocks of GPU code we want to time between two calls to `wait(gpuDevice)`, the execution time reported for the desired line or block plus the time taken by the second call to `wait` gives the correct timing.
3.2. Calling CUDA kernel functions from MATLAB
By using the techniques described in the previous part we can use MATLAB to perform many of our standard image processing routines on the GPU. However, it does not allow us to test our own CUDA-implemented algorithms or use existing ones in our MATLAB programs, nor does it provide a means to explicitly control GPU resources, such as global and shared memory. In this part we demonstrate how this can be remedied by creating a `CUDAKernel` object from a kernel function written in CUDA. Instructions on how to write CUDA code is well beyond the scope of this chapter, and therefore this part assumes some previous basic knowledge of this language.
A `CUDAKernel` object required to launch a CUDA kernel function from MATLAB is constructed by calling the static constructor `parallel.gpu.CUDAKernel`. The constructor takes three MATLAB string arguments: the path to a `.ptx` file containing the kernel code, the interface of the kernel function, and the name of the desired entry point. For the first argument, a `.ptx` file with the same name as the source file, containing the corresponding code compiled into pseudo-assembly language, is automatically generated when using the NVIDIA CUDA Compiler (NVCC) with the flag `--ptx` to compile a `.cu` file (if it contains one or more kernel functions). (Note that if using an integrated development environment, you might have to instruct it not to delete `.ptx` files when finishing the build; for example Visual Studio 2010 requires that the flag `--keep` is used.) The second argument can be either the `.cu` file corresponding to the `.ptx` file specified in the first argument, from which the argument list of the desired kernel function can be derived, or the explicit argument list itself. The latter is useful when the `.cu` file is not available, or when the argument list contains standard data types that have been renamed through the use of the `typedef` keyword. The third argument specifies which kernel function in the `.ptx` file to use, and although NVCC mangles function names similar to a C++ compiler, the names in the `.ptx` file are guaranteed to contain the original function name. MATLAB uses substring matching when searching for the entry point and it is therefore often enough to provide the name of the original kernel function (see exceptions.
Let us assume that we have access to `myFilters.cu` containing several kernel functions named `myFilter1`, `myFilter2`, etc., and its corresponding `myFilters.ptx`. Then
```matlab
gpuFilter1=parallel.gpu.CUDAKernel('myFilters.ptx', 'myFilters.cu', 'myFilter1');
```
creates a `CUDAKernel` object called `gpuFilter1` that can be used to launch the CUDA kernel function `myFilter1` from MATLAB. If we further assume that `myFilter1` is declared as
```c
__global__ void myFilter1(const float *imIn, float *imOut, float parameter)
```
the second argument above, `myFilters.cu'`, could equally be replaced by the string `const float *, float *, float'`. In some cases, care has to be taken when specifying the entry point. For example, if `myFilter2` is a templated function instantiated for more than one template argument, each instance of the resulting function will have a name containing the string `'myFilter2'`. Likewise, if another kernel function called `myFilter1_v2` is declared in the same `.cu` file, specifying `'myFilter1'` as the third argument of `parallel.gpu.CUDAKernel` becomes ambiguous. In these cases, we should provide the mangled function names, which are given during compilation with NVCC in verbose mode, i.e. with `--ptxas-options=-v` specified. The full mangled name of the kernel used by a `CUDAKernel` object is stored in the object property `EntryPoint`, and can be obtained by e.g. `gpuFilter1.EntryPoint`.
Once a `CUDAKernel` object has been created, we need to specify its launch parameters which is done through the `ThreadBlockSize`, `GridSize` and `SharedMemorySize` object properties. Thus,
```matlab
gpuFilter1.ThreadBlockSize=[32 8 1];
gpuFilter1.GridSize=[96 384 1];
gpuFilter1.SharedMemorySize=4*32*8;
```
sets the block size to 32×8 threads, the grid size to 96×384 blocks and the shared memory size per block to 4×32×8=1024 bytes, enough to hold 256 single or int32 values, or 128 double values. In total this will launch 3072×3072 threads, one per pixel of our sample image. A fourth, read-only property called `MaxThreadsPerBlock` holds the upper limit for the total number of threads per block that we can use for the kernel function. If the kernel function is dependent on constant GPU memory, this can be set by calling `setConstantMemory`, taking as the first parameter the `CUDAKernel` object, as the second parameter the name of the constant memory symbol and as the third parameter a MATLAB array containing the desired data. For example, we can set the constant memory declared as `__constant__ float myConstantData [128]` in `myFilters.cu` and needed in `myFilter1`...
by calling:
```matlab
setConstantMemory(myFilter1, myConstData, sqrt(single(0:127)));
```
To execute our kernel function we call `feval` with our `GPUKernel` object as the first parameter, followed by the input parameters for our kernel. For input parameters corresponding to arguments passed by value in the CUDA kernel (here: scalars), MATLAB scalars are normally used (although single element GPU arrays also work), whereas for pointer type arguments, either GPU arrays or MATLAB arrays can be used. In the latter case, they are automatically copied to the GPU. A list of supported data types for the kernel arguments can be found in the PCT documentation [13]. In general these are the C/C++ standard types (along with their corresponding pointers) that have MATLAB counterparts, with the addition of `float2` and `double2` that map to MATLAB’s complex `single` and `double`, respectively. CUDA-Kernel objects have three additional, read-only properties related to input and output. `NumRHSArguments` and `MaxNumLHSArguments` respectively hold the expected number of input arguments and the maximum number of output arguments that the objects accepts, and `ArgumentTypes` holds a cell of strings naming the expected MATLAB input types. Each type is prefixed with either `in` or `inout` to signal whether it is input only (corresponding to an argument passed by value or a constant pointer in CUDA) or combined input/output (corresponding to a non-constant pointer in CUDA).
To function in a MATLAB context, a call to a CUDA kernel through a `GPUKernel` object must support output variables. Therefore, pointer arguments that are not declared `const` in the kernel declaration are seen as output variables and, if there is more than one, they are numbered according to their order of appearance in the declaration. This means that calling `gpuFilter1` above produces one output, whereas `gpuFilter2` created from
```c
__global__ void myFilter2(const float *imIn, int *imInterm, float *imOut)
```
produces two outputs. With this information we can now call the `myFilter1` kernel function through
```matlab
gpuRes1=gpuArray.zeros(3072, 'single');
gpuRes1=feval(gpuFilter1, gpuIm, gpuRes1, sqrt(2));
```
Similarly, we can call `myFilter2` through
```matlab
gpuInterm=gpuArray.zeros(3072, 'int32');
gpuRes2=gpuArray.zeros(3072, 'single');
[gpuInterm, gpuRes2]=feval(gpuFilter2, gpuIm, gpuInterm, gpuRes2);
```
The output from a `CUDAKernel` object is always a GPU array, which means that if the corresponding input is a MATLAB array it will be created automatically. Consider the kernel
with its corresponding CUDAKernel object gpuFilter3. Since im from our previous examples a MATLAB array, calling
\[
gpuRes3 = \text{feval}(\text{gpuFilter3}, \text{im}, 3.14);
\]
automatically copies im to the GPU and creates a new gpuArray object called gpuRes3 to hold the result.
### 3.3. MEX functions and GPU programming
In the previous part we saw how, by running our own CUDA kernels directly from MATLAB, we can overcome some of the limitations present when working only with built-in MATLAB functionality. However, we are still (in release 2013b) limited to using kernel functions that take only standard type arguments, and we can access neither external libraries, such as the NVIDIA Performance Primitives, the NVIDIA CUDA Fast Fourier Transform or the NVIDIA CUDA Random Number Generation libraries, nor the GPU’s texture memory with its spatial optimised layout and hardware interpolation features. Further, we need to have an NVIDIA GPU, be writing our code in CUDA and have access to the PCT to use the GPU in our MATLAB code. In this section we look at how we can use MEX functions to partly or fully circumnavigate these limitations. This again assumes previous experience of GPU programming and some knowledge of how to write and use MEX functions. A good overview of the latter can be found in the MATLAB documentation [14].
The first option, which removes the technical restrictions but still requires access to the PCT (running on a 64-bit platform) and a GPU from NVIDIA, is to write MEX functions directly containing CUDA code. The CUDA code itself is written exactly as it would be in any other application, and can either be included in the file containing the MEX function entry point or in a separate file. Although this process is well documented in the PCT documentation [15] and through the PCT examples [16], we briefly describe it here for consistency. The main advantage of this approach over the one described later is that it enables us to write MEX functions that use GPU arrays as input and output through the underlying C/C++ object mxGPUArray. As all MEX input and output, GPU arrays are passed to MEX functions as pointers to mxArray objects. The first step is therefore to call either mxGPUCreateFromMxArray or mxGPUCopyFromMxArray on the pointer to the mxArray containing the GPU data, in order to obtain a pointer to an mxGPUArray. In the former case, the mxGPUArray becomes read-only, whereas in the latter case the data is copied so that the returned mxGPUArray can be modified. (mxGPUCreateFromMxArray and mxGPUCopyFromMxArray also accept pointers to mxArray objects containing CPU data, in which case the data is copied to the GPU regardless of the function used.) We can now obtain a raw pointer to device memory that can be passed to CUDA kernels by calling mxGPUGetDataReadOnly (in the case of read-only data) or mxGPUGetData (otherwise) and explicitly casting the returned pointer to the correct type. Information about the number of dimensions, dimension sizes, total number of
elements, type and complexity of the underlying data of an \texttt{mxGPUArray} object can be further obtained through the functions \texttt{mxGPUGetNumberOfDimensions}, \texttt{mxGPUGetDimensions}, \texttt{mxGPUGetNumberOfElements}, \texttt{mxGPUGetClassID}, and \texttt{mxGPUGetComplexity}. We can also create a new \texttt{mxGPUArray} object through \texttt{mxGPUCreateGPUArray}, which allocates and, if we want, initialises memory on the GPU for our return data. With this we are in a position where we can treat input from MATLAB just as any other data on the GPU and perform our desired calculations. Once we are ready to return our results to MATLAB we call either \texttt{mxGPUCreateMxArrayOnCPU} or \texttt{mxGPUCreateMxArrayOnGPU} to obtain a pointer to an \texttt{mxArray} object that can be returned to MATLAB through \texttt{plhs}. The former copies the data back to the CPU making the MEX function return a standard MATLAB array whereas in the latter case the data stays on the GPU and a GPU array is returned. Finally, we should call \texttt{mxGPUDestroyGPUArray} on any \texttt{mxGPUArray} objects that we have created. This deletes them from the CPU and, unless they are referenced by another \texttt{mxArray} object (as will be the case if they are returned through \texttt{plhs}), frees the corresponding GPU memory. Note that there are several other functions in the \texttt{mxGPU} family to examine, duplicate, create and copy \texttt{mxGPUArray} objects, and in particular for working with complex arrays, that work in a similar way and which are described in the PCT documentation [17].
For the above to work, we need to include \texttt{mxGPUArray.h}, in addition to \texttt{mex.h}, in our source file. The source file has to have the extension .cu and it should contain a call to the function \texttt{mxInitGPU} before launching any GPU code in order to initialise the MATLAB GPU library. Provided that the environment variable \texttt{MW_NVCC_PATH} is set to the NVCC folder path and that a copy of the PCT version of \texttt{mexopts.bat} or \texttt{mexopts.sh} (matlabroot\toolbox\distcomp\gpu\extern\src\mex\xxx64) is located in the same folder as the source code, we can compile our CUDA containing MEX functions from MATLAB in the usual way using the \texttt{mex} command. If using external libraries, these also have to be provided, which can normally be done by passing the full library path, including file name, to the \texttt{mex} command after the \texttt{.c}, \texttt{.cpp} or \texttt{.cu} file path.
A bare-bone MEX function calling the kernel function \texttt{myFilter1} from earlier, which takes into account the considerations above but is stripped of any boundary or argument checking, follows:
```c
#include "mex.h"
#include "gpu\mxGPUArray.h"
__global__ void myFilter1(const float *imIn, float *imOut, float parameter)
{
// Kernel code
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
mxGPUArray const *gpuArrayIn;
mxGPUArray *gpuArrayOut;
float const *devIn;
```
float *devOut;
mxInitGPU();
gpuArrayIn = mxGPUCreateFromMxArray(prhs[0]);
devIn = (float const *)(mxGPUGetDataReadOnly(gpuArrayIn));
gpuArrayOut = mxGPUCreateGPUArray(mxGPUGetNumberOfDimensions(gpuArrayIn),
mxGPUGetDimensions(gpuArrayIn), mxGPUGetClassID(gpuArrayIn),
mxGPUGetComplexity(gpuArrayIn), MX_GPU_DO_NOT_INITIALIZE);
devOut = (float *)(mxGPUGetData(gpuArrayOut));
const mwSize *dim = mxGPUGetDimensions(gpuArrayIn);
int height = (int)(dim[0]);
dim3 block(32, 8, 1);
dim3 grid((height+block.x-1)/block.x, (width+block.y-1)/block.y, 1);
float param = *(float *)(mxGetData(prhs[1]));
myFilter1<<<grid, block>>>(devIn, devOut, param);
plhs[0] = mxGPUCreateMxArrayOnGPU(gpuArrayOut);
mxGPUDestroyGPUArray(gpuArrayOut);
mxGPUDestroyGPUArray(gpuArrayIn);
}
After compilation, assuming the above code is found in mexFilter1.cu, we can call the MEX
function like this:
gpuRes3=mexFilter1(imGpu, single(2.72));
Note the explicit type conversion necessary to convert the second argument to single to
match the input type. Note also that, depending on the compiler and system settings, in order
to have mwSize correctly identified as a 64-bit type, we might need to use the -largeAr-
raysDims flag when compiling using the mex command.
The rest of this part will be dedicated to describing how we can call GPU code from MATLAB
even if we do not have access to the PCT or write our code in CUDA. Since without the PCT,
the mex command is not set up to compile anything except standard C/C++ code, we have two
alternatives to achieve what we want. The only drawback with these, compared to when using
mxGPUArray from the PCT is that it is harder to have data in GPU memory persist when
returning to MATLAB between calls to MEX functions. (This can still be achieved by explicitly
casting a GPU memory pointer to an integer type of correct length which is returned to
MATLAB. The integer is then passed to the next MEX function which casts it back to the correct
pointer type. However, the problem with this approach lies in eliminating the risk of memory
leaks; although solutions for this exist, they are beyond the scope of this chapter.) This means
that unless we go through any extra effort, we are left either to perform all our GPU calculations
from the same MEX function before returning control to MATLAB or suffer the penalty associated with copying our data back and forth between each call. In many cases, however, this may provide less of a limitation than one might initially think, especially when using MATLAB to test GPU code under development or using MATLAB as a front-end to existing code libraries.
The first alternative is to compile our GPU code, regardless of in which language it is written, into a static library using our external compiler, and then to call this library from a MEX function that we compile using the `mex` command. Since our MEX function cannot call GPU functions directly, a small C/C++ wrapper function has to be written around our GPU code. A wrapper for the `myFilter1` CUDA kernel, which we can place either in the same file as the CUDA code or in a separate `.cu` file, could look like this (again, error checking has been omitted for brevity):
```c
void callMyFilter1(const float *imIn, float *imOut, float param, int height, int width)
{
float *devIn;
cudaMalloc((void**)&devIn, height*width*sizeof(float));
cudaMemcpy(devIn, imIn, height*width*sizeof(float), cudaMemcpyHostToDevice);
float *devOut;
cudaMalloc((void**)&devOut, height*width*sizeof(float));
dim3 block(32, 8, 1);
dim3 grid((height+block.x-1)/block.x, (width+block.y-1)/block.y, 1);
myFilter1<<<grid, block>>>(devIn, devOut, param);
cudaMemcpy(imOut, devOut, height*width*sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(devIn);
cudaFree(devOut);
}
```
Once the static library, normally a `.lib` file under Windows or a `.a` file under Linux and Mac OS, has been created, we can write a MEX function calling the wrapper:
```c
#include "mex.h"
void callMyFilter1(const float *, float *, float, int, int);
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
int height = mxGetM(prhs[0]);
int width = mxGetN(prhs[0]);
const float *imIn = (float *)(mxGetData(prhs[0]));
float param = (float)(mxGetScalar(prhs[1]));
plhs[0] = mxCreateNumericMatrix(height, width, mxSINGLE_CLASS, mxREAL);
}
```
float *imOut = (float *)(mxGetData(plhs[0]));
callMyFilter1(imIn, imOut, param, height, width);
}
Note that it is normally better, especially if using a pre-existing library, to use #include to include the corresponding header files rather than, as in the example above, to manually enter the function prototype. We should now be able to compile this wrapper using the mex command, remembering to pass the path to our own library as well as to the necessary GPU runtime library. For CUDA, this is cudart.lib on Windows, libcuda.so on Linux, or libcuda.dylib on Mac OS. Thus, assuming that the code above is found in myFilter1Mex.cpp and that the library is called myLibrary.lib, on Windows the call would look like:
mex myFilter1Mex.cpp 'library_path\myLibrary.lib' 'cuda_path\cudart.lib'
The second alternative is to build the MEX function directly in our external compiler, without going through the mex command. By doing this, the whole process can be carried out in one step and, if we wish, we are free to keep all our code in a single file. The main advantage of this approach occurs if we are developing or using an existing GPU library which we would like to call from MATLAB, for example for testing purposes. In such a case we can add the MEX file to our normal build routine so that every time we rebuild our library we automatically get the MEX function to call it from MATLAB.
A MEX function is a dynamically linked shared library which exports a single entry point called mexFunction. Hence, by mimicking the steps carried out by the mex command (found in mexopts.bat or mexopts.sh) when calling the external compiler, we can replicate this from outside MATLAB. We can view the exact steps carried out on a particular system by calling the mex command in verbose mode, i.e. using the -v flag. Detailed information on how to build MEX functions for Windows and UNIX-like systems (Linux and Mac OS) can be also found in the MATLAB documentation [18]. Here we look at a minimal example from a Windows-centric point of view, although the procedure should be similar on other systems. First, we need to specify that we want to build a dynamically linked shared library, called a dynamic-link library on Windows, and give it the correct file extension, e.g. .mexw64; second, we have to provide the compiler with the path to our MEX header files, normally mex.h; third, we have to pass the libraries needed to build our MEX function, called libmx.lib, libmex.lib and libmat.lib on Windows, to the linker together with their path; and finally, as mentioned above, we need to export the function named mexFunction. In Visual Studio 2010, all of the above steps are done in the Project Properties page of the project in question. Under Configuration properties-> General, we set the Target Extension to .mexw64 and the Configuration Type to Dynamic Library (.dll). For the header files we add {$MATLAB_ROOT}\extern\include; to Include Directories under Configuration Properties-> VC++Directories. For the libraries, we add libmx.lib;libmex.lib;libmat.lib; to Additional Dependencies under Con-
figuration Properties-> Linker-> Input and $(MATLAB_ROOT)\extern\lib\win64\microsoft; to Additional Library Directories under Configuration properties-> Linker-> General. Finally, we add /export:mexFunction to Additional Options under Configuration Properties-> Linker-> Command Line. In the above steps it is assumed that the variable MATLAB_ROOT is set to the path of the MATLAB root, otherwise we have to change $(MATLAB_ROOT) to, for example, C:\Program Files\MATLAB\2013b. The code for the MEX example, finally, would look something like this:
```c
#include <cuda.h>
#include "mex.h"
__global__ void myFilter1(const float *imIn, float *imOut, float parameter)
{
// Kernel code
}
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, mxArray const *prhs[])
{
int height = mxGetM(prhs[0]);
int width = mxGetN(prhs[0]);
const float *imIn = (float *)(mxGetData(prhs[0]));
float param = (float)(mxGetScalar(prhs[1]));
plhs[0] = mxCreateNumericMatrix(height, width, mxSINGLE_CLASS, mxREAL);
float *imOut = (float *)(mxGetData(plhs[0]));
float *devIn;
cudaMalloc((void**)&devIn, height*width*sizeof(float));
cudaMemcpy(devIn, imIn, height*width*sizeof(float),
cudaMemcpyHostToDevice);
float *devOut;
cudaMalloc((void**)&devOut, height*width*sizeof(float));
dim3 block(32, 8, 1);
dim3 grid((height-block.x-1)/block.x, (width-block.y-1)/block.y, 1);
myFilter1<<<grid, block>>> (devIn, devOut, param);
cudaMemcpy(imOut, devOut, height*width*sizeof(float),
cudaMemcpyDeviceToHost);
cudaFree(devIn);
cudaFree(devOut);
}
```
In the case of an existing code library, we can add a new component (such as a new project to an existing solution in Visual Studio) containing a MEX function calling our desired GPU functions so that it is built directly with the original library without any additional steps.
4. Conclusion
In conclusion, MATLAB is a useful tool for prototyping, developing and testing image processing algorithms and pipelines. It provides the user with the option of either using the functions of the IPT or leveraging the capabilities of a high-level programming language combined with many built-in standard functions to create their own algorithms. When high performance or processing of large amounts of data is required, the computational power of a GPU can be exploited. At this point, there are three main options for image processing on GPU in MATLAB: i) we can stick entirely to MATLAB code, making use of the built-in, GPU-enabled functions in the IPT and the PCT as well as our own GPU functions built from element-wise operations only; ii) we can use the framework provided by the PCT to call our own CUDA kernel functions directly from MATLAB; iii) we can write a MEX function in C/C++ that can be called from MATLAB and which in turn calls the GPU code of our choice.
Acknowledgements
The authors would like to acknowledge the European Commission FP7 ENTERVISION programme, grant agreement no.: 264552
Author details
Antonios Georgantzoglou*, Joakim da Silva and Rajesh Jena
*Address all correspondence to: ag718@cam.ac.uk
Department of Oncology, University of Cambridge, Cambridge, UK
References
|
{"Source-Url": "https://api.intechopen.com/chapter/pdf-download/46816", "len_cl100k_base": 16001, "olmocr-version": "0.1.49", "pdf-total-pages": 33, "total-fallback-pages": 0, "total-input-tokens": 64661, "total-output-tokens": 18915, "length": "2e13", "weborganizer": {"__label__adult": 0.00032711029052734375, "__label__art_design": 0.00091552734375, "__label__crime_law": 0.0003180503845214844, "__label__education_jobs": 0.00171661376953125, "__label__entertainment": 0.00014472007751464844, "__label__fashion_beauty": 0.0001850128173828125, "__label__finance_business": 0.00031876564025878906, "__label__food_dining": 0.00032401084899902344, "__label__games": 0.000980377197265625, "__label__hardware": 0.0030956268310546875, "__label__health": 0.0005059242248535156, "__label__history": 0.0004601478576660156, "__label__home_hobbies": 0.00016582012176513672, "__label__industrial": 0.0007605552673339844, "__label__literature": 0.0002887248992919922, "__label__politics": 0.0002796649932861328, "__label__religion": 0.0006170272827148438, "__label__science_tech": 0.2379150390625, "__label__social_life": 0.00013065338134765625, "__label__software": 0.030670166015625, "__label__software_dev": 0.71875, "__label__sports_fitness": 0.0003135204315185547, "__label__transportation": 0.0006241798400878906, "__label__travel": 0.0002346038818359375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 73020, 0.03045]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 73020, 0.81949]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 73020, 0.83656]], "google_gemma-3-12b-it_contains_pii": [[0, 624, false], [624, 2616, null], [2616, 3987, null], [3987, 6582, null], [6582, 8979, null], [8979, 10965, null], [10965, 12742, null], [12742, 15529, null], [15529, 17152, null], [17152, 19383, null], [19383, 21115, null], [21115, 23046, null], [23046, 24716, null], [24716, 27456, null], [27456, 29505, null], [29505, 31406, null], [31406, 34827, null], [34827, 37756, null], [37756, 40355, null], [40355, 43467, null], [43467, 47038, null], [47038, 49666, null], [49666, 52262, null], [52262, 55298, null], [55298, 58357, null], [58357, 60610, null], [60610, 62764, null], [62764, 65866, null], [65866, 67769, null], [67769, 69570, null], [69570, 72137, null], [72137, 73020, null], [73020, 73020, null]], "google_gemma-3-12b-it_is_public_document": [[0, 624, true], [624, 2616, null], [2616, 3987, null], [3987, 6582, null], [6582, 8979, null], [8979, 10965, null], [10965, 12742, null], [12742, 15529, null], [15529, 17152, null], [17152, 19383, null], [19383, 21115, null], [21115, 23046, null], [23046, 24716, null], [24716, 27456, null], [27456, 29505, null], [29505, 31406, null], [31406, 34827, null], [34827, 37756, null], [37756, 40355, null], [40355, 43467, null], [43467, 47038, null], [47038, 49666, null], [49666, 52262, null], [52262, 55298, null], [55298, 58357, null], [58357, 60610, null], [60610, 62764, null], [62764, 65866, null], [65866, 67769, null], [67769, 69570, null], [69570, 72137, null], [72137, 73020, null], [73020, 73020, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 73020, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 73020, null]], "pdf_page_numbers": [[0, 624, 1], [624, 2616, 2], [2616, 3987, 3], [3987, 6582, 4], [6582, 8979, 5], [8979, 10965, 6], [10965, 12742, 7], [12742, 15529, 8], [15529, 17152, 9], [17152, 19383, 10], [19383, 21115, 11], [21115, 23046, 12], [23046, 24716, 13], [24716, 27456, 14], [27456, 29505, 15], [29505, 31406, 16], [31406, 34827, 17], [34827, 37756, 18], [37756, 40355, 19], [40355, 43467, 20], [43467, 47038, 21], [47038, 49666, 22], [49666, 52262, 23], [52262, 55298, 24], [55298, 58357, 25], [58357, 60610, 26], [60610, 62764, 27], [62764, 65866, 28], [65866, 67769, 29], [67769, 69570, 30], [69570, 72137, 31], [72137, 73020, 32], [73020, 73020, 33]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 73020, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
3afff41ce9efbe8b994a801e47b4df737eda9a38
|
Chapter 2
Low-Level Image Processing
As regards obstacles, the shortest distance between two points can be a curve.
Bertolt Brecht
Abstract This chapter covers some basic concepts of low-level image processing. It introduces fundamental methods for two primary image processing tasks, namely contrast enhancement, image smoothing, and edge detection. The chapter also introduces methods of function optimization for searching the optimal configuration of edge points.
2.1 Introduction
In the previous chapter, we have introduced the concept of low-level, medium-level, and high-level digital image processing. In low-level processing tasks a digital image is used as input and another digital image is obtained as output (e.g., an image improved for the visualization). In high-level processing the outcome is a description of the content of the input image. In the medium-level processing some features are obtained from the input image, such as edges or regions.
Different operators are adopted for low-level processing. Usually, we distinguish among the following operators:
1. Point operators that produce a single output pixel by processing each pixel independently of the other pixels.
2. Local operators that produce a single output pixel by processing a neighborhood of that pixel.
3. Global operators that produce a single output pixel by processing the entire image.
Generally the aim of low-level operators is to improve the visual quality of the input image by means of enhancement or noise removal processes. In this chapter we introduce two fundamental image processing tasks that are contrast enhancement and edge detection. To this aim, some notations are given in the following.
Let $f(x, y)$ a gray-level image of $M \times N$ pixels with $L$ gray levels. We introduce the following definitions:
- the level scale or dynamic range of image $f(x, y)$ is the range $[a, b]$ of values such that $a \leq f(x, y) \leq b$ for each $(x, y)$;
- the histogram $h(i), i = 0, \ldots, L - 1$ of image $f(x, y)$ denotes the occurrence frequency of each level.
In Listing 2.1, we provide a simple applet to compute the histogram of a gray-level image. The applet can be executed using the HTML code listed in 2.2. To run the applet it is necessary to create a Java project with the files HistogramApplet.java and HistogramApplet.html, then include in the same directory an image named im.png to be processed.
In the RGB color space, individual histograms of each component can be computed. In [5] we find a plugin to compute the histogram of each R, G, B component given an input color image.
Listing 2.1 HistogramApplet.java: a Java applet for histogram visualization.
```java
import java.awt.*;
import java.awt.image.*;
import java.applet.Applet;
public class HistogramApplet extends Applet {
private Image image;
private ImageCanvas imageCanvas;
private Panel panel;
private TextArea text;
public void init() {
String image_file = getParameter("IMAGEFILE");
image = getImage(getDocumentBase(), image_file);
while (image.getWidth(this)<0);
Dimension imageSize = new Dimension(
image.getWidth(this), image.getHeight(this));
ImageCanvas = new ImageCanvas(image, imageSize);
int[] pixels = ImageCanvas.grabImage(image, imageSize);
panel = new Panel(new GridLayout(1,2,10,10));
text = new TextArea(20,5);
panel.add(imageCanvas); panel.add(text);
add(panel);
text.setText((new Histogram(pixels)).toString());
}
}
class Histogram {
private int histo[] = new int[256];
public String toString() {
String text = "";
for(int i=0; i<256; i++) {
text += i+":"+histo[i]+’\n’;
}
}
```
public class ImageCanvas extends Canvas {
static final int MIN_WIDTH = 64;
static final int MIN_HEIGHT = 64;
private Image image;
private Dimension size;
public ImageCanvas(Image img, Dimension dim) {
super();
image = img;
size = dim;
}
public Dimension getMinimumSize() {
return new Dimension(MIN_WIDTH, MIN_HEIGHT);
}
public Dimension getPreferredSize() {
return new Dimension(size);
}
public void paint(Graphics g) {
g.drawImage(image, 0, 0, getBackground(), this);
}
static public int[] grabImage(Image image, Dimension size) {
int[] data = new int[size.width * size.height];
PixelGrabber pg = new PixelGrabber(
image, 0, 0, size.width, size.height, data, 0, size.width);
try {
pg.grabPixels();
} catch (InterruptedException e) {
System.err.println("ImageSampler: interrupted while grabbing pixels");
return null;
}
if ((pg.status() & ImageObserver.ABORT) != 0) {
System.err.println("ImageSampler: pixel grab aborted or errored");
return null;
}
return data;
}
}
Generally, to define each low-level operator, a mapping \( T \) that transforms an input image \( f(x, y) \) into an output image \( g(x, y) \) has to be defined over some neighborhood of each pixel. Namely
\[
g(x, y) = T(f(x, y))
\]
(2.1)
where \( T \) is a linear or nonlinear function defined on the dynamic range \([a, b]\).
### 2.2 Contrast Enhancement
The contrast of an image refers to the range of gray levels used in the image—the dynamic range. It refers to the intensity variation of the pixels, defined by the minimum and maximum intensity value. Contrast resolution is the ability to distinguish between differences in intensity. For example, low contrast image values may be concentrated near a few values of the gray scale (e.g., mostly dark, or mostly bright, or mostly medium values). One definition of image contrast is the following:
\[
C = \frac{S_A - S_B}{S_A + S_B}
\]
where \( S_A \) and \( S_B \) are intensity average values computed on pixels of two different regions \( A \) and \( B \) (for example background and object).
Low contrast images can result from poor illumination, lack of dynamic range in the imaging sensor, or even wrong set up during image acquisition. A fundamental low-level task is to improve the contrast in an image, by means of contrast enhancement operators.
To improve the contrast it is necessary to transform the levels of the image into the range of all the levels available for visualization (typically the range \([0, 255]\)). Specifically, a contrast stretching, that means highlighting a specific range of gray levels in an image, is performed. The idea behind contrast stretching is to increase
the dynamic range in the image being processed. Moreover, to enlarge the dynamic range it is necessary to interpolate between successive levels. Figure 2.1 shows an example.
Fig. 2.1 a A RGB image and its brightness histogram. b The enhanced image and its histogram. c The enhanced and interpolated image and its histogram
2.2.1 Gray-Level Transformation
Some methods for contrast enhancement are based on gray-level transformation and histogram modification. These are point operators that are applied to a neighborhood reduced to (1 × 1) pixel. Hence Eq. (2.1) can be expressed in the form \( l' = T(l) \) where \( l \) and \( l' \) denote the input pixel value and the output pixel value, respectively. Since the mapping \( T(\cdot) \) denotes a point operator, it is independent on the pixel coordinates and it is the same for all the image pixels. Hence, each output pixel depends only on the input pixel having the same coordinates. These operators may be expressed by means of lookup tables.
Gray-level transformation operators can be divided into two main classes: linear operators and nonlinear operators. In the following we give some examples of both classes.
**Linear Contrast Stretching**
This transformation enhances the dynamic range by linearly stretching the original gray-level range \([a, b] \subset [0, 255]\) to the range \([0, 255]\). The transformation is defined as
\[
l' = T(l) = 255 \frac{(l - a)}{(b - a)}
\]
(2.2)
where \( a \leq l \leq b \).
Generally the linear transformation from the range \([a, b]\) to the range \([a', b']\) is
\[
l' = T(l) = (l - a) \frac{(b' - a')}{(b - a)} + a'
\]
(2.3)
where \( a \leq l \leq b \).
**Linear Contrast Stretching with Clipping**
This transformation is used when \([a, b] \supset [0, 255]\). If the number of levels outside the range \([0, 255]\) is small, these levels are clipped in the following manner: levels \( l \leq 0 \) are set to 0, levels \( l \geq 255 \) are set to 255. For all the other levels, the Eq. (2.2) is applied.
**Logarithmic Transformation**
This transformation is defined as
\[
l' = c \log(1 + |l|)
\]
(2.4)
with \( c > 0 \). This transformation is used to compress the dynamic range so as to enhance details related to low levels. For example, it is used to visualize the Fourier spectrum of an image [11].
2.2 Contrast Enhancement
Power-Law Transformation
This transformation is defined as
\[ l' = l(c \cdot \exp^{\gamma}) \] (2.5)
where \( c \) and \( \gamma \) are positive constant values. By varying the value of \( \gamma \) different functions can be obtained to compress or to expand the dynamic range of gray levels. Conventionally, the exponent in the power-law function is referred to as \textit{gamma}. The power-law transformations are useful to perform gamma correction in the visualization of images on a monitor or generally they are useful for general-purpose contrast manipulation [6].
Sigmoid Transformation
This transformation is defined as:
\[ S(x) = \frac{1}{1 + \exp^{-\gamma(x-c)}} \] (2.6)
where the value of \( c \) indicates the abscissa of the inflection point of the function and the parameter \( \gamma \) controls the contrast (values greater than 5 results in an enhancement of the contrast). Figure 2.2 shows a plot of the S-function with \( c = 0.2 \) and \( \gamma = 15 \). By applying the S-function with different values of \( \gamma \) and \( c \) we obtain different contrast enhancement results.
In Fig. 2.3 we show some examples of contrast modification using the plugin given in Listing 2.3 that provides different S-functions.
Fig. 2.2 A plot of the S function with \( c = 0.2 \) and \( \gamma = 15 \)
Fig. 2.3 Some examples of contrast modification using different S-functions.
(a) Original image (brightness component of Fig. 2.1 and its histogram).
(b) Contrasted image obtained using the S-function with $c = 0.2$ and $\gamma = 15$.
(c) Contrasted image obtained by applying the S-function with $c = 0.2$ and $\gamma = 24$.
**Notes:**
- Count: 18522
- Mean: 44.364
- StdDev: 16.908
- Min: 0
- Max: 109
- Mode: 44 (765)
**Notes:**
- Count: 18522
- Mean: 123.015
- StdDev: 54.347
- Min: 0
- Max: 255
- Mode: 13 (280)
**Notes:**
- Count: 18522
- Mean: 120.528
- StdDev: 70.551
- Min: 0
- Max: 255
- Mode: 2 (327)
Listing 2.3 S-function.java: Java plugin for contrast enhancement using the sigmoid function.
```java
/**
* Contrast enhancement by the following sigmoid function:
* \[ bb = \frac{1}{1+\exp(GAMMA \times (c-aa))}. \]
*
* Different values for the parameters c and GAMMA can be chosen
*
* Author: Ignazio Altomare
* Date: 4/11/2010
*/
import ij.ImagePlus;
import ij.plugin.filter.PlugInFilter;
import ij.process.ImageProcessor;
import ij.gui.GenericDialog;
import ij.*;
import ij.gui.*;
import ij.plugin.filter.PlugInFilter;
import ij.process.*;
import java.awt.*;
import java.awt.event.*;
import javax.swing.*;
import javax.swing.event.*;
import ij.text.*;
public class Sigmoid_Correction extends WindowAdapter
implements PlugInFilter, ChangeListener, ActionListener {
private int w;
private int h;
private ImagePlus im_sig;
private ImageProcessor ip_orig, ip_sig;
private byte[] im;
private ImageWindow w_sig;
// variables
private int K = 256;
private int aMax = K - 1;
private float GAMMA_ini=15f;
private float c_ini=0.5f;
// window for visualizing the sigmoid function
private JFrame windowSig;
private PlotPanel graphicSig;
// button for applySig e resetSig
private JButton applySig;
private JButton resetSig;
// labels for C and Gamma
private JLabel C_label;
private JLabel Gamma_label;
// sliders for the values C and Gamma
private JSlider C_slider;
private JSlider Gamma_slider;
public int setup(String arg, ImagePlus img) {
return DOES_8G;
}
public void run(ImageProcessor ip) {
w = ip.getWidth();
h = ip.getHeight();
}
}
```
ip_orig=ip;
// create a copy of the image
im_sig = NewImage.createByteImage("Sigmoid Correction",w,h,1,
NewImage.FILL_BLACK);
ip_sig = (im_sig.getProcessor()).convertToByte(true);
ip_sig.copyBits(ip,0,0,Blitter.COPY);
// get pixel values
im = (byte[]) ip_sig.getPixels();
// process
this.process();
// show the sigmoid window
this.showSig();
w_sig = new ImageWindow(im_sig);
w_sig.addWindowListener(this);
im_sig.updateAndDraw();
}
private void process(){
// create a lookup table for the mapping function
int[] Fgc = new int[K];
for (int a = 0; a < K; a++) {
double aa = (double) a / (double)aMax; // scale to [0,1]
double bb = 1/(1+Math.exp(GAMMA_ini*(c_ini-aa)));
// scale back to [0,255]
int b = (int) Math.round(bb * aMax);
Fgc[a] = b;
}
ip_sig.applyTable(Fgc); // modify the image
}
private ImagePlus plotSig(){
float[] x = new float[256];
float[] y = new float[256];
for(int i=0; i<256; i++){
x[i]=(float)i/(float)aMax;
y[i]=(float)1/(1+Math.exp(GAMMA_ini*(float)(c_ini-x[i])));
}
Plot p = new Plot("Sigmoid Correction",","",x,y);
p.setLimits(0.0,1.0,0.0,1.0);
p.setLineWidth(2);
return p.getImagePlus();
}
private void showSig(){
// create buttons
applySig=new JButton("Apply");
applySig.addActionListener(this);
resetSig=new JButton("Reset");
resetSig.addActionListener(this);
//create panels
JPanel panelSigmoid=new JPanel(new GridLayout(2,2));
JPanel panelApply_Reset=new JPanel();
graphicSig = new PlotPanel(this.plotSig().getImage());
//set borders of panel
panelSigmoid.setBorder(BorderFactory.createTitledBorder("Sigmoid Correction"));
//create labels
C_label=new JLabel();
Gamma_label=new JLabel();
this.setLabelSig();
//add labels to panel
panelSigmoid.add(C_label);
panelSigmoid.add(Gamma_label);
//create sliders
C_slider=new JSlider(JSlider.HORIZONTAL);
Gamma_slider=new JSlider(JSlider.HORIZONTAL);
this.setSliderSig();
C_slider.addChangeListener(this);
Gamma_slider.addChangeListener(this);
//add slider to panel
panelSigmoid.add(C_slider);
panelSigmoid.add(Gamma_slider);
//add Apply and Reset buttons
panelApply_Reset.add(applySig);
panelApply_Reset.add(resetSig);
//create window for the Sigmoid function
windowSig = new JFrame("Sigmoid Correction");
windowSig.setSize(700,550);
windowSig.setLocation(300,200);
windowSig.setDefaultCloseOperation(JFrame.DISPOSE_ON_CLOSE);
windowSig.setLayout(new FlowLayout());
Container contentPane=windowSig.getContentPane();
contentPane.add(graphicSig);
contentPane.add(panelSigmoid);
contentPane.add(panelApply_Reset);
windowSig.setVisible(true);
private void setLabelSig(){
C_label.setText("c=\"c_ini\"");
Gamma_label.setText("Gamma=\"GAMMA_ini\"");
}
private void setSliderSig(){
C_slider.setMinimum(1);
C_slider.setMaximum(10);
}
C_slider.setValue((int)(c_ini*10));
Gamma_slider.setMinimum(1);
Gamma_slider.setMaximum(255);
Gamma_slider.setValue((int)GAMMA_ini);
}
private void resetSig(){
c_ini=0.5f;
GAMMA_ini=15;
C_slider.setMinimum(1);
C_slider.setMaximum(10);
C_slider.setValue((int)(c_ini*10));
Gamma_slider.setMinimum(1);
Gamma_slider.setMaximum(255);
Gamma_slider.setValue((int)GAMMA_ini);
}
public void actionPerformed(ActionEvent e){
Object source=e.getSource();
if(source==applySig){
ip_sig.copyBits(ip_orig,0,0,Blitter.COPY);
im = (byte[]) ip_sig.getPixels();
this.process();
im_sig.updateAndDraw();
}
if(source==resetSig){
this.resetSig();
setSliderSig();
setLabelSig();
graphicSig.updateImage(plotSig().getImage());
}
}
public void stateChanged(ChangeEvent e){
Object source=e.getSource();
if(source==C_slider){
c_ini = (float)C_slider.getValue()/(float)10;
setSliderSig();
setLabelSig();
graphicSig.updateImage(plotSig().getImage());
}
if(source==Gamma_slider){
GAMMA_ini = Gamma_slider.getValue();
setSliderSig();
setLabelSig();
graphicSig.updateImage(plotSig().getImage());
}
}
2.2 Contrast Enhancement
2.2.2 Thresholding
Another way to perform contrast stretching is thresholding. A threshold \( t \) is defined so that levels \( l \leq t \) are set to 0, while levels \( l > t \) are set to 255. In this way a binary image is obtained having values \{0, 255\} or \{0, 1\} (Fig. 2.4).
2.2.3 Histogram Transformation
Histograms are the basis for numerous spatial domain processing techniques. Histograms are simple to calculate and also lend themselves to economic hardware implementations, thus making them a popular tool for real-time image processing.
The histogram represents a global information for an image: all the pixels having a particular value \( i \) contribute to populate the \( i \)-th bin of the histogram. That is \( h(i) = n_i \) being \( n_i \) the number of pixels having value \( i \). The presence of peaks in an histogram may represent bright or dark regions, or regions having low or high contrast. The modification of the histogram produces a different distribution of gray levels. Hence histogram manipulation can be used effectively for image enhancement. The modification of an histogram is defined by means of a point transformation \( T: j \rightarrow k \) such that if the level \( j \) has frequency \( h(j) \) then the transformed level \( k \) has frequency \( g(k) \) where \( h(\cdot) \) and \( g(\cdot) \) are the initial histogram and the transformed one, respectively.

Fig. 2.4 a LENA image. b Image obtained by thresholding only the brightness channel
**Equalization**
Equalization or normalization is the transformation of the level distribution into a uniform distribution in which the frequency of each transformed level is approximately constant, i.e., \( g(i) \approx \text{constant} \) for \( i = 0, \ldots, L_{\text{max}} \) where \( L_{\text{max}} \) is the maximum gray level. Since equalization provides an histogram that is almost uniform, it improves the image by augmenting the contrast and removing regions that are too bright or too dark.
Let \( f(x, y) \) be a gray-level image of \( n = M \times N \) pixels with \( L_{\text{max}} + 1 \) gray levels and let \( h(i) \) be its histogram. We define the cumulative histogram of \( f \) as
\[
h_c(i) = \sum_{j=0}^{i} h(j) = \sum_{j=0}^{i} n_j
\]
for \( i = 0, \ldots, L_{\text{max}} \). If the histogram \( h \) is uniform then its cumulative function \( h_c \) is a line. Hence the equalization of the histogram \( h \) can be computed by imposing the cumulative histogram \( h_c \) to be linear. To this aim we consider the following equation:
\[
\frac{i}{h_c(i)} = \frac{L_{\text{max}}}{n} \quad i = 0, \ldots, L_{\text{max}}
\]
from which we obtain
\[
i = L_{\text{max}} \frac{h_c(i)}{n} = L_{\text{max}} \frac{\sum_{j=0}^{i} h(j)}{n} = L_{\text{max}} \frac{\sum_{j=0}^{i} n_j}{n}
\]
where \( n \) is the number of pixels and \( n_j \) is the occurrence number of the level \( j \).
Appendix A provides reference to a Java plugin \([5]\) useful to evaluate the histogram of a color image.
### 2.3 Image Smoothing
Smoothing, also called blurring, is a simple and frequently image processing operation, used to ‘blur’ images and remove detail and noise. A blurring process can attenuate the abrupt transitions of the gray levels between a pixel and its neighbor (random noise) or the irrelevant details associated to a small number of pixels.
Generally to perform a smoothing operation we apply a filter to the image, by means of a local operator. A local operator produces a value for each pixel \((x, y)\) of the output image \( g \) computed in a neighborhood or window \( w \) (Fig. 2.5) centered in the pixel \((x, y)\) of the input image \( f \) by the following equation:
\[
g = T(f, w)
\]
In Fig. 2.5 a filter is visualized as a window of coefficients sliding across the image, that is the image is explored in a fixed sequence (for example, from left to
2.3 Image Smoothing
Fig. 2.5 Window or mask centered in the pixel \( f(x, y) \)
right and from top to down). The function \( T \) can be linear or not linear. The most common types of filter are linear: the output value \( g(x, y) \) is determined as a weighted sum of input pixel values \( f(x + i, y + j) \). Local operators based on the convolution operation can be formally described by means of the theory of linear systems, namely the Fourier transform and the convolution product theorem.
Linear image smoothing is a local operator based on a convolution of the image with a matrix \( h \) of proper dimension \( L \times L \), called mask or kernel, where \( L \) is an odd value. More formally given an image \( f(x, y) \) of \( M \times N \) pixels and \( h(x, y) \) a \( L \times L \) spatial mask, we define \( l = \lfloor L/2 \rfloor \) and the following equation describes the convolution product in the spatial domain between the image \( f \) and the mask \( h \), with origin in the center of the mask.
\[
g(x, y) = h \otimes f = \sum_{i=-l}^{l} \sum_{j=-l}^{l} h(i, j)f(x + i, y + j) \quad \text{for} \quad x = 0, \ldots, M - 1, \quad y = 0, \ldots, N - 1
\]
There are many kind of filters depending on the mask used in the convolution equation. In the following we will mention the most used.
**Mean Filter**
A simple process for image smoothing consists in locally computing the mean value for each pixel. This can be obtained by means of the convolution of the input image with the mask in Fig. 2.6 (lowpass spatial filtering). The mask is a square matrix of coefficients used to compute a new value starting from the neighbor of the examined pixel. It is also called filter in the spatial domain. The multiplying factor is needed to normalize the weights to 1. In this way the range of the output results the same as the input. The effect of the convolution operator is to compute each output pixel as mean value of the pixels in its \( L \times L \) neighborhood.
Fig. 2.6 3 × 3 mean filter
\[
h = \frac{1}{9} \times \begin{bmatrix} 1 & 1 & 1 \\ 1 & 1 & 1 \\ 1 & 1 & 1 \end{bmatrix}
\]
The Gaussian smoothing operator is a two-dimensional convolution operator that is used to blur images and remove details and noise. It uses kernels having the shape of a two-dimensional Gaussian function:
$$G(x, y, \sigma) = \left( \frac{1}{2\pi\sigma^2} \right) \exp\left( -\frac{x^2 + y^2}{2\sigma^2} \right)$$
where $\sigma$ is the standard deviation and $r = x^2 + y^2$ is the ray from the center. The degree of smoothing is determined by the value of $\sigma$. Larger values of $\sigma$ require larger convolution kernels in order to be accurately represented. Figure 2.7 shows a two-dimensional Gaussian function and the corresponding mask.
About 99.7% of values drawn from a Gaussian function are within three standard deviations ($3\sigma$) away from the mean. This fact is known as the three-sigma rule. For this reason, the Gaussian smoothing eliminates the influence of those points that are away from $3\sigma$ with respect to the current pixel in the mono-dimensional case and $6\sqrt{2}\sigma$ in the bi-dimensional case (the central lobe of the two-dimensional Gaussian function has the value $2\sqrt{2}\sigma$). A Java plugin for the two-dimensional Gaussian filter is available at [7]. As an example, Fig. 2.8 shows the effect of the Gaussian smoothing on the Lena image.
### 2.4 Edge Detection
Edges represent abrupt changes or discontinuities in an amplitude attribute of an image such as luminance, surface orientation, color and so on. Edges characterize object boundaries and are usually defined as curves separating two regions having different average values of their characteristics. The causes of the region dissimilarity may be due to some factors such as the geometry of the scene, radiometric characteristics of the surface, illumination, and so on. If the regions are sufficiently
homogeneous, the transition between two adjacent regions may be detected by analyzing the discontinuities along gray levels.
Edge detection is a fundamental problem in image analysis and computer vision. It is the process to locate and identify sharp discontinuities in an image giving boundaries between different regions. This boundary detection is the first step in many computer vision edge-based tasks such as face recognition, obstacle detection, target recognition, image compression, and so on. Edge detection is a local operator based on a convolution of the image with a matrix $h$ of dimension $L \times L$, called mask or kernel, where $L$ is an odd value.
An edge is characterized by the following features:
- **Edge normal**: the unit vector in the direction of maximum intensity change.
- **Edge direction**: the unit vector along the edge (perpendicular to the edge normal).
- **Edge position or center**: the image position at which the edge is located.
- **Edge strength or magnitude**: local image contrast along the normal. Generally a pixel is an edge pixel if its strength overcomes a predefined threshold value.
An edge detection method detects the pixels candidate to be points of the boundary of an object or a region. To derive a boundary of an object all the edge pixels of that boundary should be grouped. This can be done by border following algorithms or grouping algorithms.
Since edges may not be represented by perfect discontinuities, the quality of detected edges is highly dependent on noise, lighting conditions, objects of same intensities, and density of edges in the scene. Regarding this problem it should be noted that even though noise is not visible in the original image, noise is highlighted in derivatives, especially in second derivatives. Hence edge detection, being based on derivatives, is highly affected by noise. Some noise effect can be reduced by thresholding. For example, we could define a point in an image as an edge point if its first derivative is greater than a specified threshold. By doing so, we automatically assess which significant gray-level transitions can be considered as edge. Another problem arises when the edge is located on a soft discontinuity. A solution to this problem is proposed in Chap. 8.
The most common edge detection methods are the Gradient operator based on first derivatives, the Laplacian and the LoG (Laplacian of a Gaussian) operators based on second derivatives. Figure 2.9 shows how the magnitude of the first derivative can be used to detect the presence of an edge in an image. The sign of the second derivative can be used to determine whether an edge pixel lies on the dark or the light side of an edge. The zero crossings of the second derivative provide a powerful way of locating edges in an image.
Generally an edge detection method involves three steps
1. Smoothing to reduce the noise;
2. Applying edge enhancement, that is a local operation that extracts all image pixels candidate to be edge points;
3. Applying edge localization (thresholding) to select the edge points among all the candidate points.
Steps 1. and 2. can be implemented by convolving the input image with a proper mask so as to obtain the gradient image. In the third step the edge points are detected, for example by looking for maximum and minimum magnitude values for the first derivative operators. These operators analyze the distribution of the gradient values in the neighborhood of a given pixel and determine if the pixel has to be classified as an edge point on the basis of threshold values. The results of these edge detectors are very sensitive to the threshold value. These operators require high computational time and hence cannot be used in real-time applications.
**Gradient Operator**
Given an image \( f(x, y) \), its gradient is defined by
\[
\nabla f(x, y) = \begin{bmatrix}
\frac{\partial f(x, y)}{\partial x} \\
\frac{\partial f(x, y)}{\partial y}
\end{bmatrix}
\]
The magnitude of the gradient is given by
\[ m(x, y) = |\nabla f(x, y)| = \left( \left( \frac{\partial f(x, y)}{\partial x} \right)^2 + \left( \frac{\partial f(x, y)}{\partial y} \right)^2 \right)^{\frac{1}{2}} \]
The direction of the gradient is given by
\[ \alpha(x, y) = \tan^{-1} \left( \frac{\frac{\partial f(x, y)}{\partial x}}{\frac{\partial f(x, y)}{\partial y}} \right) \]
The Gradient operator may be implemented as a convolution with the masks shown in Figs. 2.10 and 2.11.
Laplacian Operator
In many applications it is of particular interest to construct derivative operators, which are isotropic, i.e., rotation invariant. This means that rotating the image \( f \) and applying the operator gives the same result as applying the operator on \( f \) and then rotating the result. In other words, if the operator is isotropic then equally sharpened edges are enhanced in any direction. One of these isotropic operators is the Laplacian operator, defined as
\[ \nabla^2 f(x, y) = \frac{\partial^2 f(x, y)}{\partial x^2} + \frac{\partial^2 f(x, y)}{\partial y^2} \]
This can be implemented using the mask of Fig. 2.12. If \( f(x, y) \) is not constant or it does not vary linearly then the Laplacian of \( f \) has a zero crossing, i.e., a sign change crossing the \( x \) axis.
Laplacian of a Gaussian
Using second-order derivatives, the edge localization step is based on the extraction of zero-crossing points which indicate a sign change crossing the \( x \)-axis. Since the second-order derivative is very sensitive to noise, a filtering function is required. In [8] a Gaussian function is used to smooth the image hence deriving the operator called Laplacian of a Gaussian (LoG). The Gaussian smoothing operator is a


two-dimensional convolution operator that is used to blur images and remove details and noise. It uses kernels that represent the shape of a two-dimensional Gaussian function
$$G(x, y, \sigma) = \left[ \frac{1}{2\pi\sigma^2} \right] \exp \left( -\frac{x^2 + y^2}{2\sigma^2} \right)$$
The LoG operator based on this Gaussian function is defined as
$$\text{LoG}(x, y, \sigma) = c \left[ \frac{(x^2 + y^2)}{\sigma^2} - 1 \right] \exp \left( \frac{-x^2 + y^2}{2\sigma^2} \right)$$
where $c$ is a factor that normalizes to 1 and the value of $\sigma$ determines for each pixel $(x, y)$ the number of points that influence the evaluation of the Laplacian in $(x, y)$. A significant problem of the LoG operator is that the localization of edges with an asymmetric profile by zero-crossing points introduces a bias which increases with the smoothing effect of filtering [1].
### 2.4.1 Canny Operator
An interesting solution to avoid the dependence of detected edges on noise was proposed by J. Canny in [2], who defined an optimal operator for edge detection including three criteria: good detection, good localization, identification of single edge point (a given edge in the image should be marked only once).
Let $f(x, y)$ a gray-level image of $M \times N$ pixels and $G(x, y)$ a Gaussian filter. The Canny operator performs the following steps:
1. First the noise is filtered out from the image. A suitable Gaussian filter is used for this task. Gaussian smoothing can be performed using a convolution product $f_s(x, y) = g(x, y) \otimes f(x, y)$. Some parameters have to be fixed for this operator, such as the standard deviation $\sigma$ of the Gaussian filter. The width of the mask must be chosen carefully since it is directly proportional to the localization error. Since the Gaussian smoothing eliminates the influence of the points far more than $3(2\sqrt{2}\sigma)$ with respect to the current pixel, the mask size must be equal to $6\sqrt{2}\sigma$ for a fixed value of $\sigma$.
2. The second step consists of computing the gradient of \( f \) by means of the Sobel masks along \( x \) (columns) and \( y \) (rows) directions. Edge strength is found out by taking the gradient of the image.
3. The third step finds the edge direction using the gradient in \( x \) and \( y \) directions. For each pixel \((x, y)\) we evaluate the gradient strength \( m(x, y) \) and the gradient direction \( \alpha(x, y) \), where \( m \) and \( \alpha \) are matrices having the same size of the image \( f(x, y) \). A non-maxima suppression algorithm is applied that follows the edge direction and suppresses any pixel value that is not to be considered as edge point. That is, for each pixel \((x, y)\) we consider the gradient direction \( \alpha(x, y) \) and check if \( m(x, y) \) has a local maximum in that direction. Usually a small number of directions is considered. For example, the four directions \((0^\circ, 90^\circ, 45^\circ, -45^\circ)\) of a \((3 \times 3)\) window centered in the pixel \((x, y)\) may be considered to produce an initial edge map \( S(x, y) \).
4. The last step uses double thresholding to eliminate false edges. Two thresholds \( t_1 < t_2 \) are selected, with a ratio of 2 or 3. Pixels of \( \text{edge}(x, y) \) having a gradient magnitude \( m(x, y) \) greater than \( t_2 \) are definitively labeled as edge pixels. If a point \((x, y)\) has \( m(x, y) < t_2 \) and is also connected to points yet labeled as edge points then \( m(x, y) \) is compared with \( t_1 \). If \( m(x, y) > t_1 \) then the point \((x, y)\) is definitively labeled as an edge point. All the other points are not labeled as edge points.
An Imagej plugin that implements the Canny algorithm can be found at [4]. An application example is shown in Fig. 2.13. We can observe how an appropriate choice of the parameters of the Canny operator may produce very thin edges.
**Fig. 2.13** a Original image. b Image obtained by applying the gradient in \((x, y)\) directions. c Image after thresholding of the brightness of the image b. d Image obtained from a after applying Canny operator with \( \sigma = 1, t_1 = 2.5, t_2 = 7.5 \).
2.4.2 Optimization-Based Operators
In the previous sections, we have seen that detection of edges usually involves two stages. The first one is an edge enhancement process that requires the evaluation of derivatives of the image making use of gradient or Laplacian operators. Methods such as thresholding or zero crossing produce an edge map that contains pixels candidates to be labeled as edge points. In the second stage, pixels of the edge map are selected and combined in contours using processes such as boundary detection, edge linking, and grouping of local edges [11, 13].
This last phase can be viewed as a search of the optimal configuration of those pixels that better approximate the edges. More precisely let us consider an image $F = \{ f(x, y); 0 \leq x \leq M - 1, 0 \leq y \leq N - 1 \}$ and an edge configuration $S = \{ s(x, y); 0 \leq x \leq M - 1, 0 \leq y \leq N - 1 \}$ where $s(x, y) = 1$ if $(x, y)$ is an edge pixel, $s(x, y) = 0$ otherwise. Therefore an edge could be considered as one of the possible paths in the universe of the pixels of the image $F$. If we define a function $T(S)$ evaluating the edge $S$, the searching of the best edge configuration can be accomplished by means of an optimization method that minimizes/maximizes the function $T(S)$.
In other words, the edge detection problem can be formulated as one of optimization where the evaluation function depends on the local edge structure. Since the search space for the optimal solution is extremely large due to the number of possible configurations of the $M \times N$ pixels of the image, a ‘blind’ search would be fully inefficient. Then optimization methods are necessary which take into account the geometric and topological constraints of the problem. In this sense some methods have been introduced such as graph searching, relaxation, and simulated annealing [9, 10, 12].
In [3] optimization techniques known as Genetic Algorithms are proposed for the search of the optimal edge. The peculiarities of these algorithms are the robustness in the application to different classes of problems and the natural parallel implementation. When using a genetic algorithm for optimization, a solution is encoded as a string of genes to form a chromosome representing an individual. In [3] an individual is an edge configuration $S$ represented by a string of $M \times N$ bits. Each bit encodes the presence (or not) of an edge pixel in the image $F$. The approach consists essentially of two phases: evaluation of the likelihood of a pixel to be an edge pixel and boundary detection by means of genetic algorithms. An objective function $T$ is supplied which assigns a fitness value to each edge configuration $S$. This function evaluates the cost of $S$ as the sum of the costs of each pixel $(x, y)$ in $S$. The assumptions are that the edges should tend to be continuous, thin and of sufficient length; moreover the edges should be perpendicular to the gradient at each pixel. The cost function $T$ evaluates at each point the deviation from the previous assumptions by computing a linear combination of five weighted factors: fragmentation, thickness, local length, region similarity, and curvature. These factors capture the local nature of the edges and are evaluated in a $(w \times w)$ window centered on each pixel $(x, y)$ using the values of the configuration $S$ and a likelihood map $L$ based on the gradient (amplitude and
direction). The pixels in this window constitute the neighbor of the central pixel. The genetic algorithm, starting from an initial population (i.e., a collection of possible solutions) iteratively produces new generations of individuals (i.e., potential solutions) using the operators of reproduction, crossover, and mutation. Since the problem is the minimization of the objective function $T(S)$, each individual $S$ of the population must reproduce itself in proportion to the inverse of its function $T(S)$. The iterative optimization process ends when the mean value of the objective function $T$ does not change, within a tolerance value, between two consecutive generations.
References
Fuzzy Logic for Image Processing
A Gentle Introduction Using Java
Caponetti, L.; Castellano, G.
2017, XIV, 138 p. 61 illus., 33 illus. in color., Softcover
ISBN: 978-3-319-44128-3
|
{"Source-Url": "https://www.springer.com/cda/content/document/cda_downloaddocument/9783319441283-c2.pdf?SGWID=0-0-45-1588704-p180189884", "len_cl100k_base": 9602, "olmocr-version": "0.1.49", "pdf-total-pages": 24, "total-fallback-pages": 0, "total-input-tokens": 54942, "total-output-tokens": 11783, "length": "2e13", "weborganizer": {"__label__adult": 0.0004935264587402344, "__label__art_design": 0.004001617431640625, "__label__crime_law": 0.00049591064453125, "__label__education_jobs": 0.0010738372802734375, "__label__entertainment": 0.00019443035125732425, "__label__fashion_beauty": 0.00029540061950683594, "__label__finance_business": 0.0002582073211669922, "__label__food_dining": 0.0005397796630859375, "__label__games": 0.0008101463317871094, "__label__hardware": 0.004894256591796875, "__label__health": 0.0009737014770507812, "__label__history": 0.00069427490234375, "__label__home_hobbies": 0.0002211332321166992, "__label__industrial": 0.0008869171142578125, "__label__literature": 0.0004451274871826172, "__label__politics": 0.0003604888916015625, "__label__religion": 0.0007748603820800781, "__label__science_tech": 0.329345703125, "__label__social_life": 0.0001056194305419922, "__label__software": 0.0146636962890625, "__label__software_dev": 0.63720703125, "__label__sports_fitness": 0.0003974437713623047, "__label__transportation": 0.0007338523864746094, "__label__travel": 0.0003108978271484375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 40542, 0.03309]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 40542, 0.88784]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 40542, 0.77502]], "google_gemma-3-12b-it_contains_pii": [[0, 1384, false], [1384, 3763, null], [3763, 4975, null], [4975, 6642, null], [6642, 6967, null], [6967, 8970, null], [8970, 10316, null], [10316, 10938, null], [10938, 12620, null], [12620, 13975, null], [13975, 15447, null], [15447, 16683, null], [16683, 18227, null], [18227, 20617, null], [20617, 22735, null], [22735, 24551, null], [24551, 26831, null], [26831, 28528, null], [28528, 30389, null], [30389, 32386, null], [32386, 34540, null], [34540, 37978, null], [37978, 40363, null], [40363, 40542, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1384, true], [1384, 3763, null], [3763, 4975, null], [4975, 6642, null], [6642, 6967, null], [6967, 8970, null], [8970, 10316, null], [10316, 10938, null], [10938, 12620, null], [12620, 13975, null], [13975, 15447, null], [15447, 16683, null], [16683, 18227, null], [18227, 20617, null], [20617, 22735, null], [22735, 24551, null], [24551, 26831, null], [26831, 28528, null], [28528, 30389, null], [30389, 32386, null], [32386, 34540, null], [34540, 37978, null], [37978, 40363, null], [40363, 40542, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 40542, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 40542, null]], "pdf_page_numbers": [[0, 1384, 1], [1384, 3763, 2], [3763, 4975, 3], [4975, 6642, 4], [6642, 6967, 5], [6967, 8970, 6], [8970, 10316, 7], [10316, 10938, 8], [10938, 12620, 9], [12620, 13975, 10], [13975, 15447, 11], [15447, 16683, 12], [16683, 18227, 13], [18227, 20617, 14], [20617, 22735, 15], [22735, 24551, 16], [24551, 26831, 17], [26831, 28528, 18], [28528, 30389, 19], [30389, 32386, 20], [32386, 34540, 21], [34540, 37978, 22], [37978, 40363, 23], [40363, 40542, 24]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 40542, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-25
|
2024-11-25
|
d2d1dbdd99f8da4058c25537265b2f2479140144
|
Histogram Guided Interactive Query Evaluation
Donko Donjerkovic
Raghu Ramakrishnan
Technical Report #1419
August 2000
Histogram Guided Interactive Query Evaluation
Donko Donjerkovic, Raghu Ramakrishnan
Department of Computer Sciences
University of Wisconsin–Madison
Abstract
The data analysis process is interactive. Analysts are more interested in seeing a subset of query answers quickly than in seeing all answers after a long waiting period. To effectively support such interactive queries, we propose new join execution strategies that minimize the time to produce the first N answers. Our techniques deliver early result tuples at a significantly higher rate than traditional pipelined join plans, as demonstrated by experiments. This improvement is a result of better memory utilization, which is in turn made possible by exploiting database statistics to prioritize tuples that are the largest contributors to the join result. To incorporate these new techniques into a cost-based optimizer, we derive cost formulas and suggest an optimization strategy that seeks to minimize the response time for early query answers. We also show that system statistics can be exploited, surprisingly enough, to significantly improve the total execution time of traditional blocking operators such as Hybrid Hash join and hash-based group-by.
1 Introduction
It is widely recognized that the information analysis tools should provide quick response times since they are used in an iterative manner. Analysis usually starts with broad queries that are subsequently refined based on the initial answers. Database systems used as backends of such tools must, therefore, be able to provide interactive response times as well.
Data analysts typically examine large databases in search of common trends. For example, one might compare total sales on a monthly basis. Two common methods for detecting large scale trends are: (1) Aggregate operations such as sum, count, and average, and (2) Data visualization. While it is evident how aggregating data reduces information complexity, data visualization does this as well, since large distributions can be presented in a graph where data trends may be evident at a glance. Both of these methods can be made interactive, as explained next.
Online aggregation techniques produce approximate aggregate answers with increasing precision over time [8, 6]. The objective of an online aggregation system is not to minimize the time to produce the final answer, but to minimize the time to achieve a small confidence interval. Because the confidence interval is proportional to $1/\sqrt{n}$, where $n$ is the number of processed tuples, the basic objective of online aggregation is equivalent to the objective of minimizing time to produce a certain number of answers. On the other hand, database visualization systems, such as DEVise [10], can be interactive by displaying tuples as soon
as they are produced. This is, of course, under the assumption that answers to a query are being produced as soon as the query is issued. It follows that, in order to be useful for interactive visualization, database systems must provide an option of optimizing queries for the first few (N) answers. Some commercial systems support such optimization, mostly by choosing pipelined plans with Nested Loops joins, as discussed in the section on related work (Section 6).
As described, optimizing for N answers is needed in online aggregation and data visualization environments. Querying mode in which the emphasis is on producing a subset of answers quickly is called partial or interactive querying. In most scenarios, optimizing for N answers forces an optimizer to use a pipeline of Nested Loops joins. Unfortunately, Nested Loops joins are inefficient, mostly due to the non-locality of page references. For example, when memory is lacking, every index lookup in the Index Nested Loops (INL) join can result in one I/O operation. Because some form of an INL join is required for fast delivery of the first block of answers, we exploit the available system statistics to significantly improve memory utilization of the INL join.
We now consider the following motivating example.
**Example 1.1** Consider a business intelligence query that explores the distribution of sale prices by various drug manufacturers, based on point of sale data. (This example is motivated by real world data that we use to evaluate our technique.) The sales table has price and *productId* attributes, among several others. The product table has *productId* (primary key with an index) and *manufacturer* attributes, among other fields. Scaled down examples of sales and product tables are shown in Figures 1 and 2, respectively. Notice that the most common *productId* in the sales table is 1.
Suppose we want to execute the query that asks for (*price, manufacturer*) pairs:
```
select price, manufacturer
from sales s, products p where s.productId = p.productId
```
The system needs to perform an equi-join on *productId* between Sales and Products tables. A blocking join operator, such as hash join, is not suitable in an interactive environment due to a large initial “think” time. Index Nested Loops join, with Products as the inner table, is a better option since it will immediately start producing answers. However, assume that just two pages of memory are available for the join, one for the inner table and one for the outer table. (We use such small numbers since a more realistic scenario would be harder to
follow.) Suppose the number of tuples per page is two for both relations, and that a user wants to see five answer tuples as soon as possible. We now calculate the total number of I/Os needed to produce five answers using INL join. There are five I/Os on Products table, one for each index lookup needed to match the first five tuples in Sales. In addition, there are three I/Os on Sales, as we need to read three pages sequentially before we reach the fifth tuple (two tuples per page). This brings the total to eight I/Os before the five answer tuples are produced.
To produce the five answer tuples quicker, we propose the following strategy. Identify one page from Products (since that is the memory available) that is most frequently referenced by Sales. This is clearly the first page of Products (with tuples 1 and 2), since it has five references, compared with three references to the second page. Scan the sales table, and at the same time produce answers for tuples that have a match on this page. In this case we have four I/Os on Sales (since the whole outer must be scanned to produce five answers) and one I/O on Products, a total of five I/Os. By better utilizing the available memory we were able to reduce the number of I/Os from eight to five. Savings are more impressive when more realistic numbers are used, as illustrated in the experimental section (Section 3).
The main contribution of the work described in this paper is to propose and demonstrate effectiveness of techniques that use tuple frequencies for better memory utilization. Tuple frequencies are readily available in current database systems as a part of system statistics, usually in forms of histograms [14].
Histograms and other statistical summaries, such as random samples and wavelets, have been proposed and used in database systems primarily for query optimization. To the best of our knowledge, this is the first time that the use of distribution summaries is shown to significantly improve the performance of query execution.
The rest of the paper is organized as follows: Section 2 describes techniques for performance improvements of partial query evaluation. Experimental evaluation follows in Section 3. We then propose an optimization framework for partial query evaluation in Section 4, which takes into account properties of the new operator evaluation methods. Even though our work was primarily motivated by inefficiencies of partial query evaluation, we found that database statistics can be used to speed up the computation of all query answers also, as discussed in Section 5. Finally, we discuss related work, outline future work and conclude.
2 Statistics Guided Partial Query Evaluation
In this section, we focus on partial query evaluation for which blocking relational operators are not applicable, as explained in the introductory section (1). Our discussion is focused on common foreign key joins, but the results are easily applicable to general equi-joins as well. We first describe how statistical information helps minimize the I/O cost to produce the first N answers in single foreign key joins, and then extend the ideas to address multiple foreign key joins.
2.1 Single Foreign Key Joins
Our discussion in this section is applicable to INL and Simple Nested Loops joins. As illustrated in Example 1.1, Nested Loops joins have poor locality of memory references on
the inner relation when the available memory is not sufficient. In that example, we illustrated a strategy to improve memory utilization. We now summarize our observations. We propose to use a multi-phase join algorithm. The first join phase consists of two operations:
1. We identify the set of pages of the inner relation that has the most references in the outer relation. Tuples that occupy that set of pages can be defined by a relational selection on the join column which we call the join filter. (See Section 2.1.1.)
2. We sequentially scan the outer relation; if a tuple satisfies the join filter, an index lookup is performed and an answer is produced, otherwise the tuple is skipped.
If more answer tuples are desired, the next join phase is performed, which differs from the previous only in the join filter; it now includes the set of pages that are most frequently referenced and do not satisfy any previous filter.
We call such a join operation Filtered Index Nested Loops join. Note that our algorithm partitions the inner relation into disjoint blocks, and joins each block with the entire outer relation. A conceptual comparison between the traditional INL join and the filtered version is depicted in Figure 3. We note that index lookups of a Filtered INL join are limited to the set of join values satisfying the join filter. As a consequence, the set of referenced pages is limited. Because some outer tuples are rejected, i.e., no index lookup is attempted, additional scans over the outer relation may be required. On the other hand, a traditional INL join requires only one scan of the outer relation, and it repeatedly references all the pages of the inner relation. The major difference between these two algorithms is the usage of database statistics. While a traditional INL algorithm, makes no use of database statistics, a Filtered INL join uses frequency information to define the memory content in order to maximize the number of early answers. We reiterate that none of the traditional join
operators take advantage of readily available database statistics. Most of these operators could be enhanced by filtering. Perhaps the cleanest example would be the Filtered Block Nested Loops join. This algorithm is identical to the Block Nested Loops join except that a block would always contain the (next) most frequently referenced pages in the outer relation, not just a contiguous set of pages on disk. A disadvantage of Filtered Block Nested Loops join is that it requires a full scan of the outer relation before any answers are produced, which is why we focus on the INL join instead.
The key idea in join filtering is to construct a filter such that it contains the current set of the most frequently referenced pages. We discuss how to do this next.
2.1.1 Constructing a Join Filter for Index Nested Loops Join
As indicated before, a join filter ideally contains a set of the most referenced inner pages that remain to be joined. Therefore, to create an ideal join filter one would need to order pages of the inner relation by the number of references. In a realistic scenario, it would be too expensive to construct an exact join filter, so we consider the following approximations:
1. Maintain (small) histograms [14] to represent approximate reference counts for each page.
2. Use existing histograms on tuple values, together with some information about the tuple layout on pages in order to infer the number of page references. This solution is probably less efficient than the first alternative, but, it does not require creation of special purpose histograms.
In our experiments, we used the second alternative, as outlined next. Suppose that a filter can have $M$ pages and that the index used is clustered on the join column. We use a histogram on the outer’s join column. To create a join filter that includes $M$ pages we proceed as follows:
1. Find the histogram bucket with the (next) largest height.
2. Include the bucket bounds into the filter, extended up to the nearest page boundaries.
We repeat these steps until the size of the join filter is $M$ pages. The bucket extension in step 2 makes the filter more efficient by including the tuples that share the same page. We note that filter construction is fast since it based on histograms, which are small in size. An example of a histogram and a corresponding join filter is shown in Figure 4.
To create a filter for an unclustered relation, one would need to consult the histogram and an unclustered index to see which pages are frequently referenced. Alternatively, one could only consult the histogram and assume that each tuple is on a separate page.
In general, a join filter can be expressed as a disjunct of range selections. In practice, since histograms assume uniform frequency within a bucket, many entire buckets will be included in a filter. Therefore, the size of a filter will be about the same size as a histogram. Since histograms are typically small, testing for filter satisfiability in step 2 of a Filtered INL join phase is quick. Our techniques can be easily adapted to any other statistical summary.
2.1.2 I/O Cost For Filtered Index Nested Loops Join
In this subsection we derive an analytical expression for the I/O cost for the first phase of a Filtered INL join, and then generalize it to include all phases. Since filtered joins will be used when optimizing for N answers, the cost formula must be a function of N, the target number of answers. The input parameters that determine the cost are:
**M**, the number of memory pages allocated to the inner relation, which is equal to the size of the filter.
**h(M)**, the selectivity of the current join filter on the outer relation, it depends on the size of the filter and on the join column distribution.
**|S|**, the number of pages in the outer relation.
**\{S\}**, the number of tuples in the outer relation.
We first estimate the number of I/Os on the inner relation in the first phase of a join. This number is at most M, however it depends on N. Initially, almost all index lookups will result in I/O operations. Later on, there will be almost no I/O, as all the pages satisfying the join filter will be in memory. Denote the number of I/Os on the inner relation by \(P_R\). For each output tuple produced, the probability of an I/O on the inner relation is join value dependent. Pages containing more frequent join values are more likely to be in memory than other pages, assuming the LRU replacement policy. At this point we use the simplifying assumption that any tuple within the filter has the same frequency of occurrence in the outer relation. This assumption is a reasonable approximation, as experimentally verified in Section 3.6, and it makes derivation of an analytical expression possible. Under this assumption, the likelihood of a page fault on the inner relation when an output tuple is produced is: \(\frac{M-P_R}{M}\). That is, there are \(M - P_R\) unoccupied slots in memory of size \(M\), and referring to any of them will result in a page fault. This can be expressed as a differential equation:
\[
\frac{dP_R}{dN} = \frac{M - P_R}{M}
\]
It can be easily checked that the solution for this equation is:
\[
P_R = M(1 - e^{-\frac{N}{M}})
\]
As discussed before, for a small \(N\), \(P_R \approx N\), while for large \(N\), \(P_R \approx M\).
Next, we estimate the number of I/Os for the outer relation. To produce $N$ output tuples we need to read $N/h(M)$ outer tuples (as only the ones satisfying the filter are passed on). The number of I/Os per each outer tuple is $|S|/\{S\}$. It follows that the number of I/O operations on the outer relation, $P_S$, is:
$$P_S = \frac{N}{h(M)} \frac{|S|}{\{S\}} \tag{1}$$
This brings the total number of I/Os in the first phase ($P = P_R + P_S$) to:
$$P = M(1 - e^{-\frac{N}{M}}) + \frac{N}{h(M)} \frac{|S|}{\{S\}} \tag{2}$$
The maximum $N$ produced in the first phase of a filtered join is $\{S\}h(M)$. If a larger $N$ is needed the second join phase must be performed, which is identical to the first phase, only with a different join filter. The total cost is the sum of the costs of all the join phases. In practice, the number of phases will be small, because a blocking operator is likely to be more efficient than a multi phase Filtered INL join.
### 2.1.3 Optimal Join Filter
Our underlying objective is to minimize the cost to produce $N$ answers by a join operator. We approximate the cost measure by the number of I/O operations; the inclusion of CPU cost would be straightforward. The question we consider is this: What join filter should be used to minimize the number of I/O operations, given that all other parameters are fixed? It is easy to see that Eq. (2) is minimized when $h(M)$ is maximized. Finally, we clearly see the reason for selecting the most frequent join values in the outer relation—such a filter minimizes the cost of a filtered join, under the given memory constraints.
### 2.1.4 Optimal Memory for a Filtered Join
In a typical database system, memory is a precious resource and it is therefore important to know what the maximum memory requirement of a join operation is. To find the optimal memory needed by a filtered join, we need to minimize Eq. (2) with respect to $M$. To find the minimum of the cost function (Eq. (2)) we can use a standard function minimization algorithm such as Golden Section Search [20]. Of course, the minimum memory required by a Filtered INL join is just one page ($M = 1$).
### 2.2 Multiple Foreign Key Joins
If individual joins comprising the execution pipeline are independent from each other, then maximizing the tuple rate for each join will maximize the output rate of the query. However, if correlations exist between join columns in different joins, then creating filters to maximize each individual join rate may not maximize the overall query rate, as illustrated by the following example.
**Example 2.1** Consider a three way foreign key join of tables: A with two columns $A_1$ and $A_2$, shown in Figure 5, B with one column, shown in Figure 6, and C with one column, shown in Figure 7. Join conditions are $A_1 = B_1$ and $A_2 = C_1$. Notice that there is a large
correlation between columns $A_1$ and $A_2$; high values of $A_1$ are associated with high values of $A_2$. Suppose that a join filter may have only one tuple. Considered independently, the join filter on table B would consist of value 1 (the most frequent $A_1$ value) and the join filter on table C would consist of value 5 (the most frequent $A_2$ value). However, the pair (1, 5) does not even exist in relation A, and therefore these two filters combined would produce no results! To create a good filter, one should filter pairs of $B$ and $C$ values that are the most frequent in relation $A$. In this case it is the pair (2, 4). □
If multiple join attributes are correlated, a multidimensional join filter must be produced for such a group of attributes. A multidimensional join filter is comprised of tuples that have the most frequent combinations of correlated attributes. To capture attribute dependences, we need multidimensional histograms [15]. The technique for constructing and using multidimensional filters is the same as the one for single attribute filters. Hence we do not discuss the correlated join case further in this paper.
3 Experimental Evaluation of Partial Join Execution
3.1 Testing Environment
We ran our experiments on a NT 4.0 workstation with 128 MB of memory and a 400 MHz Intel Pentium II processor. To measure the impact of filtered joins in an unbiased environment we decided to use a commercial database system rather than a simulation or an experimental system. We conducted our experiments on an IBM’s DB2 Universal Database version 6.1, configured with 4KB pages. To eliminate buffering by the NT file system, we set the DB2 registry variable NTNOCACHE = yes. This way we could control the amount of memory used by the system by altering the size of the DB2 buffer pool. The amount of memory available to all algorithms was the same.
3.2 Join Methods Compared
1: Index Nested Loops (INL) join. A clustered index was built on the foreign key table. To make the DB2 select an INL join we used the OPTIMIZE FOR N ROWS clause. We used $N = 10$, but any small number would have the desired effect.
2: Filtered Index Nested Loops join. Since we could not modify the join processing of the DB2, we achieved the effect of filtering by rewriting the SQL query with additional
filter predicates. Actual values of the filter predicates were obtained from an externally maintained histogram. For example, to simulate the effect of a filter with the most frequent values in the interval from 10 to 20, we can rewrite the join query from Example 1.1 as follows:
\[
\text{select price, manufacturer from sales s, products p} \\
\text{where s.productId = p.productId and s.productId between 10 and 20}
\]
Before loading the data into the database, we built a compressed histogram [14], which is an enhanced version of the widely-used equi-depth histogram. The size of a histogram was fixed to a typical value of 500 Bytes. (Using a larger histogram, or higher quality algorithms, such as V-optimal [14] or Wavelets [16], would produce even better filters.)
3: Sort merge join. This represents the best DB2 timing when the query is optimized for all answers.
3.3 Data Sets
3.3.1 Real Data Set
We used point of sale data of a Japanese drugstore chain Pharma [7], consisting of two tables: Sales and Products (introduced in Example 1.1). To execute the query from Example 1.1, the system needs to perform an equi-join on productId between Sales and Products. The cardinality of Products and Sales are 27,215 and 893,402, while the tuple sizes are 96 and 110 bytes respectively. Since it was not possible to determine the number of inner tuples per page (the DB2 manual provides only an approximate formula), our simple policy is that if a tuple is selected to be in a filter, at least 10 neighboring tuples are selected as well. This reflects an assumption that 10 neighbors of a particular tuple are likely to be on the same page, which is reasonable since the number of inner tuples per page is 40. Performance results are not very sensitive to this number, as long as it is much smaller than the number of tuples per page, and the results are good enough to illustrate the gains of filtering.
3.3.2 Synthetic Data Set
Conceptually, in terms of tables and schemas, the synthetic data set is the same as the real data set. However, it differs in the distribution of the join values, which were varied throughout the experiment.
Join attribute values form clusters, which are positioned according to Zipf distribution [22]. Zipf distribution is known to accurately model skew in real-life data. Each cluster is bell shaped, with the same standard deviation (width) but with a variable number of points (size). The number of points within a cluster was also distributed according to a Zipf distribution. The summary of parameters, along with the default values used in experiments, are shown in Table 1.
The cardinality of the outer and inner relations are 1,000,000 and 100,000 respectively, while the tuple sizes are 100 bytes in both relations.
<table>
<thead>
<tr>
<th>Parameter</th>
<th>Values</th>
</tr>
</thead>
<tbody>
<tr>
<td>Number of clusters</td>
<td>500</td>
</tr>
<tr>
<td>Skew in the cluster sizes</td>
<td>1</td>
</tr>
<tr>
<td>Skew in the cluster positions</td>
<td>0</td>
</tr>
<tr>
<td>Standard deviation within a cluster</td>
<td>100</td>
</tr>
<tr>
<td>Total distribution width</td>
<td>100,000</td>
</tr>
</tbody>
</table>
Table 1: Synthetic data parameters with values.
3.4 Experimental Results with Real Data Set
3.4.1 Time Dependency
We measured times of answer tuples arrivals for the query from Example 1.1. Results, when the buffer size is 250 pages, are shown in Figure 8. The horizontal line represents the Sort Merge join, characterized by a pause of about 80 seconds before the first answer tuple is delivered. This is unacceptable for an interactive user who may prefer to obtain some answers immediately. As shown by the dashed line, the INL join achieves this. However, in case users are interested in seeing any significant fraction of the complete result set (more than 4% in this case) they will have to wait for a much longer time than that required for the Sort Merge join. The Filtered INL join has desirable characteristics of both previous methods: quick response time and efficient delivery of a larger number of answers. We note that some items in the sales data are sold much more frequently than others. Filtered join takes advantage of this fact by retaining such items in memory, which results in a large performance advantage.
3.4.2 Memory Dependency
We vary the number of memory pages allocated to the join and show the time needed to produce 6% of the answer set in Figure 9. This figure demonstrates that histogram filtering is effective over a large range of memory sizes (up to the full size of inner relation). However, the advantage over traditional INL join is less pronounced for higher memory sizes. This is expected since the INL join causes much less I/Os when given enough memory.
3.5 Experimental Results with Synthetic Data Sets
3.5.1 Time Dependency
Early tuple delivery rate is shown in Figure 10 and it looks much like the corresponding figure for the drugstore data (Figure 8). Filtered INL join runs significantly faster than the traditional version, since the most frequently referenced tuples are found in the buffer pool. The Sort Merge join blocks for about 120 seconds but delivers answers rapidly from there on. Due to excessive thrashing, the execution time for an INL join exceeds that of the Sort Merge join after 1% of the answer tuples are returned.
Figure 8: Elapsed time vs. percent of output produced (real data set).
Figure 9: Time to produce 6% of output vs. buffer pages (real data set).
Figure 10: Elapsed time vs. percent of output produced (synthetic data set).
Figure 11: Time to produce 5% of data vs. data skew (synthetic data set).
Figure 12: Time to produce 5% of data vs. buffer size (synthetic data set).
3.5.2 Skew Dependency
In Figure 11 we show the time needed to produce 5% of the answers for different skews of the cluster sizes in the outer relation. The differences between algorithms are less pronounced for the higher skew because the LRU replacement policy of the buffer manager effectively keeps the most frequent values in the cache, the same task that is achieved by the join filter.
3.5.3 Memory Dependency
In Figure 12 we show the time needed to produce 5% of the answers for different buffer sizes. This figure demonstrates that differences between filtered and traditional joins are more pronounced when less memory is available. The same trend was observed and explained in the real data set experiment (Section 3.4). It is interesting to notice that the Filtered INL join may not always be faster when given more memory. This emphasizes the importance of finding the optimal memory by minimizing Eq. 2 as described in Section 2.1.4.
3.6 Simulation of Filtered Join I/O Operations
In this section we verify the plausibility of an approximation used in Section 2.1.2. We compare the total number of I/Os predicted by formula (2) versus the simulation results of the synthetic data set described in Section 3.3.2. The results, presented in Figure 13, show that the two curves are close enough; the discrepancy is mostly confined to the early stages of the join while the memory is being loaded with the inner tuples that satisfy the join filter. The total number of I/Os is dominated by the outer relation, whose contribution is linear in the number of answers produced. (See Eq. 2). Notice that the number of I/Os on the inner relation is bounded by $M$, the memory size, so once all the inner tuples are brought in memory (1,000 pages in this case), the cost curve becomes a straight line, as evident from Figure 13.
4 Optimizing Queries For N Answers
As we have seen from the experimental section (Section 3), filtered joins are faster than standard join methods when a small fraction of result tuples are required. Consequently, an optimizer should create plans involving Filtered Index Nested Loops joins when optimizing for N answers. Current query optimizers, however, optimize for all answers, so we describe how a standard, dynamic programming based, optimizer [17] can be modified to optimize queries for N answers. We note that the best plan for N answers must be identical to the best plan for all answers when N is very large. We review some basic concepts in query optimization next.
A query can be represented as a tree of relational operators such as select, project, and join. Such relational operators are called logical operators. Physical operators are specific algorithms to evaluate logical operators, such as Hash join, Index scan, etc. A plan is a tree of physical operators. There are many plans that are logically equivalent but differ in cost. A traditional optimizer takes a tree of logical operators and generates a logically equivalent plan with the minimum cost to produce all answers.
In this section we focus on join optimization, since it is the most difficult aspect of query optimization. A standard optimizer builds join plans bottom up, starting from one relation subplans. It then uses already generated subplans as building blocks for larger plans. If two logically equivalent plans are generated, the more expensive one is eliminated, i.e., it is not considered as a part of any larger plan.
As indicated before, our task is to modify an optimizer to generate plans with minimum cost for obtaining N answers. We propose to maintain plans optimized for partial answers, in addition to traditional plans optimized for all answers. To distinguish between two types of plans, we mark each plan as either (1) optimized for all answers or (2) optimized for partial answers. For plans optimized for partial answers, we introduce a new physical property called optimization cardinality; it is the number of answers that a plan is optimized for. The optimization cardinality $N_X$ for a plan $X$ must be such that, on average, N query answers would be produced if the cardinality of $X$ was $N_X$. Let $T$ be the cardinality of the query result, and $T_X$ be the cardinality of the plan $X$. Assuming that the tuples contributing to the final answers are uniformly spread throughout table $X$, we have $\frac{N_X}{N} = \frac{T_X}{T}$, from which
it follows that:
$$N_X = N \frac{T_X}{T}.$$ \hspace{1cm} (3)
Note that the optimization cardinality may be any number between 1 and the total plan cardinality.
In addition to traditional optimization rules, the following rules are needed for partial query optimization:
1. We need to create all possible single relation plans and mark them as optimized for partial answers. The optimization cardinality of these plans is given by (3). To evaluate the cost of a single relation plan for partial answers, we count only the work needed to produce the number of answers given by (3). For example, the cost of a File scan plan with optimization cardinality 1 is just 1 I/O.
2. To build a larger plan using a blocking join operator, both input plans must be optimized for all answers. This is because a blocking operator, by definition, requires both of its entire inputs before any answer tuple can be produced. We mark the resulting plan as optimized for all answers. \(^1\)
3. When building larger plans using Nested Loops joins, the inner plan must be optimized for all answers, since the entire inner relation is required before any answer tuple may be produced. If the outer input is marked as optimized for all answers, we mark the resulting plan the same. If the outer input is marked as optimized for partial answers, we proceed as follows: (1) Evaluate the cost of the join as if the cardinality of the outer subplan is equal to its optimization cardinality, (2) determine the resulting optimization cardinality using Eq. (3), and (3) mark the resulting plan as optimized for partial answers.
4. If a plan optimized for a partial number of answers is more expensive than a logically equivalent plan optimized for all answers, it may be eliminated. This is due to the fact that the cost of a plan may only decrease with the smaller optimization cardinality. Note that a plan optimized for all answers may not be eliminated, even if it is more expensive, since more efficient blocking algorithms are applicable to such plans.
To illustrate the search process when optimizing for \(N\) answers we present the following example.
**Example 4.1** Consider a three way join between tables A, B, and C, as described in Example 2.1. Suppose that a plan optimized for 10 answers is needed. Single table subplans considered by an optimizer are A, B, and C. (We use table names to denote subplans, at the risk of confusing tables and plans, however the meaning is always clear from the context.) Because we need to optimize for 10 final answers, an optimizer will also consider plans A', B', and C' where the prime ('') stands for a subplan optimized for partial answers. Since both of our joins are foreign key joins, the optimization cardinality for all single relation plans is 10, the final optimization cardinality (according to Eq. (3)). Two table subplans considered by an optimizer are AB and AC (since we don't consider cross products) and
\(^1\)We could also cost a blocking join for a partial number of answers but the savings will be small since most of the execution time is the "think" time.
the corresponding \((AB)’\) and \((AC)’\) plans. The best plan for group AB is the cheaper of \(\text{AnyJoin}(A, B)\) and \(\text{AnyJoin}(B, A)\). Possible solutions for \((AB)’\) is \(\text{NestedLoops}(A’, B)\) and \(\text{NestedLoops}(B’, A)\), according to Rule 3. If \((AB)’\) is more expensive than AB, it is pruned (not considered as a part of any larger plan), according to Rule 4. Analogous statements are true for plans AC and \((AC)’\). The final plans are ABC and \((ABC)’\). The best plan for the group ABC is the cheaper of \(\text{AnyJoin}(AB, C)\) and \(\text{AnyJoin}(AC, B)\), since we consider only the left-deep trees. The best plan for group \((ABC)’\) is the cheaper of \(\text{NestedLoops}((AB)’, C)\) and \(\text{NestedLoops}((AC)’, B)\). The final plan is the cheaper of ABC and \((ABC)’\). □
5 Using Filters to Reduce Total Execution Time
In this section, we demonstrate that using statistics may also reduce the total execution time of blocking operators. We achieve significant cost reduction in Hybrid Hash join and group-by operators when the distribution of the join column is skewed.
5.1 Hybrid Hash Join
Hybrid Hash join algorithm (see [18] for an overview) works by partitioning inner and outer relations. In the first phase of the algorithm, the inner (smaller) table is partitioned into a number of memory sized partitions, which are written to temporary files on disk, and one memory resident partition. During the second phase, the outer table is hashed and tuples that hash into the memory resident partition are immediately probed and joined, while the other tuples are written to their partitions on disk. In the third phase, the matching disk resident partitions are joined. Since the outer tuples that hash into disk resident partitions have to be written to disk and read back in, the more tuples that hash into the memory resident partition, the smaller the number of I/O operations. To reduce the I/O, the algorithm uses all available memory in the first phase to make the memory resident partition as large as possible.
Consider a foreign key join where the foreign key table is chosen as inner. To save on I/O in the outer table, the number of outer table tuples that hash into the memory resident partition should be maximized. This means that the candidate tuples for the memory resident partition should have the most matches in the outer table. Therefore, we suggest using database statistics to find the most frequent values of the join column in the outer table and storing these values in the memory resident partition. We illustrate our strategy in Figure 14. The figure shows traditional Hybrid Hash join partitioning, which is based on hash values and therefore uniform, and Filtered Hybrid Hash join partitioning, which is characterized by a large first partition of the outer table, since it contains the most frequent join values. Because the first partition of the outer table is not written to disk, the larger it is, the smaller the number of I/O operations. We demonstrate gains of this strategy by analyzing the cost formula for Hybrid Hash join.
Let \(R\) denote the build relation and \(S\) the probe relation in a Hybrid Hash join algorithm and let \(|R|\) and \(|S|\) denote the respective number of pages while \(|R|\) and \(|S|\) denote the respective number of tuples. Furthermore, let \(MIPS\) denote the CPU speed of the machine, \(T_{I/O}\) denote the average I/O service time, \(\psi\) denote the selectivity of the join, while \(f_R\) and \(f_S\) denote fractions of the respective relations that belong to the memory resident partition.
Then the total cost of the Hybrid Hash is [21]:
\[
\text{I/O Cost} = T_{I/O} |(3 - 2f_R)|R| + (3 - 2f_S)|S|, \tag{4}
\]
\[
\text{CPU Cost} = \frac{1}{MIPS} \cdot \{I_{I/O}|(3 - 2f_R)|R| + (3 - 2f_S)|S| +
I_{move}[(3 - 2f_R)|R| + (3 - 2f_S)|S|] +
I_{join}|\{R}\{S\}| + I_{hash}|(2 - f_R)|R| + (2 - f_S)|S| +
I_{search}(\{R\} + \{S\})\}, \tag{5}
\]
where various \( I \)'s stand for the instruction path lengths for corresponding operations. The value of \( f_R \) is \( M/|R| \), where \( M \) is memory allocated to the join. Obviously, this formula is a decreasing function of \( f_S \), which is maximized when the memory resident partition of \( R \) contains join attribute values that are the most frequent in \( S \).
For an experimental evaluation of these ideas we used synthetic data generated as described in experimental section (Section 3) with data parameters shown in Table 2 and cost parameters shown in Table 3. The memory given to the join operator is 40 MB. In general, the smaller the memory the larger the impact of filtering. Results of experiments are shown in Figure 15. As expected, using a filter to define the content of the memory resident partition is especially beneficial for high skew data, where the time saving is about 25% for this typical data set. This is because a large percentage of the outer relation is matched to the memory resident partition.
5.2 Hash-Based Group-By Operator
A hash-based implementation of a group-by operator is analogous to the Hybrid Hash algorithm. Results for a certain number of groups are aggregated in memory while the tuples from other groups are hashed to appropriate partitions that are then grouped in memory during the second pass. We propose to determine the largest groups from database statistics and make sure that such groups are aggregated in memory. This strategy, which is illustrated in Figure 17, minimizes the number of tuples passed to disk. The exact cost formula for group-by operator is a simplified version of Equations (4), and (5) with no $R$ and $I_{join}$ terms. The experimental results are shown in Figure 16. It can be seen that relative benefits of filtering are even larger here than in a Hybrid Hash join since there are no filter independent $R$ terms. We note that our technique can reduce the execution time in half for this typical data set. Memory allocated to the group-by operator is 10 MB.
6 Related Work
Optimizing queries for interactive response time is a topic of considerable interest for the database community. To achieve interactive response time for select-project-join queries,
some commercial systems have extended SQL with a clause that instructs an optimizer to minimize the time to produce the first N answers. Examples are IBM's DB2 Universal Database OPTIMIZE FOR N ROWS clause [2], and Microsoft SQL Server 7.0 [3] OPTION FAST N clause. When this clause is present, the optimizer typically chooses a pipelined plan, provided that the value of N is small enough. There is no publicly available documentation on how the feature was implemented, however, we obtained some high level information from [11] and [5] that indicates certain similarities with the approach outlined in Section 4.
Optimization for the first answers is considered in [9], however, the main contribution was to reduce wasted effort in join pipelining. Authors observed that a pipeline of reductive join operators could be more efficient if the next tuple was always drawn from the first join operator whose tuple failed to match. We, on the other hand, consider foreign key (non-reductive) joins for which this work does not apply. We are also primarily concerned with a single join not a pipeline of joins, even though our work applies to a series of joins as well.
Ideas about getting certain number of query answers quickly were also presented in [19]. The emphasis of this paper is on heuristic query rewriting and decomposition into subqueries that can be evaluated on demand. The decomposition strategy is based on system configuration and past user behavior.
Online aggregation framework [8, 6] is closely related to our work since the objective is to achieve interactive response times. However, the focus of the work on online aggregation (and in particular Ripple joins) is to produce output with desired (random) statistical properties. In contrast, our work is focused on producing answers in the shortest possible time.
Previous work on partial query evaluation ([13, 4, 1]) is largely orthogonal to ours. The TOP N operator, which is the main focus of these papers, is conceptually a sequence of operators starting with SORT and followed by OPTIMIZE FOR N and STOP AFTER N. In fact, our work is not concerned with SORT operator because the problem would reduce to the TOP N query.
A distinguishing feature of our work is the use of system statistics to guide query execution. This idea was not proposed nor evaluated in any of the previous work, to the best of our knowledge.
7 Future Work
One of the directions for future work is to explore potential gains that filtering may offer in a wide-area network, where the bandwidth is scarce. For example, when transferring a relation over a slow wide area network in distributed join processing, one might first ship data satisfying the join filter. This way, we use the scarce bandwidth efficiently, as we first ship the data that will produce the maximal number of answers. This strategy is similar in its nature to bloom filters [12].
More research needs to be done to address the issue of answer correlation when using filters in answering partial queries. Users see the results that satisfy the filter but are not aware of filter existence and its influence on the early results. In other words, first tuples produced by a filter are not a random sample of the total answer set and such an assumption may mislead the user. A solution to this problem is to verify that the initial answers are random by maintaining the information about attribute correlations. For example, if a
query asks for attribute A and the filter is on attribute B, the filtering technique will not impose any specific ordering of answers only if there is no correlation between A and B.
8 Conclusion
We explored the idea of using system statistics to improve the execution speed of major relational operators such as join and group-by. Our experimental results indicate that significant performance gains are possible in both partial and full execution of those operators. Filtered join and group-by algorithms can be easily incorporated into a traditional cost based database optimizer, and can naturally be included into an online aggregation system. We believe that histogram filtering is an important step towards achieving interactive response times for complex, ad hoc, decision support queries.
References
|
{"Source-Url": "http://research.cs.wisc.edu/techreports/2000/TR1419.pdf", "len_cl100k_base": 9802, "olmocr-version": "0.1.50", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 25146, "total-output-tokens": 11877, "length": "2e13", "weborganizer": {"__label__adult": 0.00034046173095703125, "__label__art_design": 0.0003707408905029297, "__label__crime_law": 0.00045680999755859375, "__label__education_jobs": 0.0019397735595703125, "__label__entertainment": 0.00010824203491210938, "__label__fashion_beauty": 0.00019657611846923828, "__label__finance_business": 0.0006833076477050781, "__label__food_dining": 0.0004050731658935547, "__label__games": 0.0005526542663574219, "__label__hardware": 0.0015153884887695312, "__label__health": 0.0008540153503417969, "__label__history": 0.00039505958557128906, "__label__home_hobbies": 0.00013315677642822266, "__label__industrial": 0.0007977485656738281, "__label__literature": 0.0003414154052734375, "__label__politics": 0.0003170967102050781, "__label__religion": 0.0004482269287109375, "__label__science_tech": 0.26025390625, "__label__social_life": 0.00011980533599853516, "__label__software": 0.037567138671875, "__label__software_dev": 0.69140625, "__label__sports_fitness": 0.00022983551025390625, "__label__transportation": 0.00048828125, "__label__travel": 0.00021469593048095703}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 48740, 0.02897]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 48740, 0.47575]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 48740, 0.92175]], "google_gemma-3-12b-it_contains_pii": [[0, 121, false], [121, 2925, null], [2925, 5533, null], [5533, 8926, null], [8926, 10954, null], [10954, 14069, null], [14069, 16301, null], [16301, 19154, null], [19154, 21471, null], [21471, 24242, null], [24242, 26809, null], [26809, 27032, null], [27032, 29019, null], [29019, 31582, null], [31582, 34691, null], [34691, 38315, null], [38315, 39705, null], [39705, 40917, null], [40917, 44368, null], [44368, 47340, null], [47340, 48740, null], [48740, 48740, null]], "google_gemma-3-12b-it_is_public_document": [[0, 121, true], [121, 2925, null], [2925, 5533, null], [5533, 8926, null], [8926, 10954, null], [10954, 14069, null], [14069, 16301, null], [16301, 19154, null], [19154, 21471, null], [21471, 24242, null], [24242, 26809, null], [26809, 27032, null], [27032, 29019, null], [29019, 31582, null], [31582, 34691, null], [34691, 38315, null], [38315, 39705, null], [39705, 40917, null], [40917, 44368, null], [44368, 47340, null], [47340, 48740, null], [48740, 48740, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 48740, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 48740, null]], "pdf_page_numbers": [[0, 121, 1], [121, 2925, 2], [2925, 5533, 3], [5533, 8926, 4], [8926, 10954, 5], [10954, 14069, 6], [14069, 16301, 7], [16301, 19154, 8], [19154, 21471, 9], [21471, 24242, 10], [24242, 26809, 11], [26809, 27032, 12], [27032, 29019, 13], [29019, 31582, 14], [31582, 34691, 15], [34691, 38315, 16], [38315, 39705, 17], [39705, 40917, 18], [40917, 44368, 19], [44368, 47340, 20], [47340, 48740, 21], [48740, 48740, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 48740, 0.03483]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
b2f3cecb439f4c9305640e2328339dcac877be19
|
Introduction
Model checking [23] is a highly efficient technique for the computer-aided verification of computer systems such as integrated circuits, network protocols, and software. Model checking has long made the transition from research into practice and is routinely used by companies like Intel, Microsoft, or Amazon. Intel, for example, replaced testing with verification for the core execution cluster in their design of the Intel Core i7 processor [46] and, recently, the initial boot code in data centers at Amazon Web Services (AWS) has been model checked to be memory safe [27]. The key advantage of model checking is that it is an automatic method: given a system description $M$ and a logical specification $\varphi$ of a desired behavioral property, the model checker automatically determines whether or not $M$ satisfies $\varphi$. If the system design is erroneous, the model checker generates a counterexample in the form of a specific execution of $M$ that violates $\varphi$. While finding the counterexample is completely automatic, model checking typically provides very little assistance in actually understanding the counterexample and its underlying design flaw. Model checkers typically output the counterexample in the form of a detailed listing that contains the complete state information for every step of a computation that leads to the violation. Understanding all this data is already difficult for small designs and, for more complex systems and specifications, quickly becomes a daunting task.
In this paper, we present a visualization system that aids the analyst in understanding the counterexamples found by the model checker. The visualization views communicate the core aspects of the model checking result. Specifically, we introduce graphical representations of binary values for improving pattern recognition, color encoding for better indicating related aspects, visually enhanced textual descriptions, as well as extensive cross-view highlighting mechanisms. Further, through an underlying causal analysis of the counterexample, we are also able to identify values that contributed to the violation and use this knowledge for both improved encoding and highlighting. Finally, the analyst can modify both the specification of the hyperproperty and the system directly in understanding the error leading to the counterexample as well as iterating the provided system and specification. We ran multiple case studies with HYPERVIS and tested it with domain experts in qualitative feedback sessions. The participants’ positive feedback confirms the considerable improvement over the manual, text-based status quo and the value of the tool for explaining hyperproperties.
on the public inputs) should not show any difference on the public outputs. Consequently, the model checker searches for such a pair of executions that differ in their public output values. Our goal is to help the analyst understand the violation of the hyperproperty by visualizing the relationship between the individual system executions, as well as the relationship to the system description and the logical formula. To support this, we have implemented the interactive tool HYPERVIS [Fig. 1]mlrdcl/hypervis, that follows a multiple coordinated views approach [21][72]. We provide five interconnected views. The hyperproperty specification is shown as a logical formula in the formula view, the system as a state machine in the graph view, the executions over time in a tabular-like trace view and in a more compact timeline view. Additionally, there is a textual explication in the explanation view.
The fundamental challenge is that the connections between the different views and the relevance of their individual components is not known in advance, but rather must be deduced specifically for the hyperproperty of interest. We address this challenge with an automated causal analysis of the counterexample, where we identify those elements of the different views that directly contribute to the violation of the specification. The textual explication in the explanation view is directly based on this analysis. In all other views, the relevant elements can be directly highlighted. By incorporating easy-to-parse value encodings and clear color mappings alongside interactive mechanisms such as linked highlighting and debugger-like functionalities, we support the analyst in recognizing the counterexample’s characteristics and in relating its different components. Finally, after the cause of the violation is understood, the analyst can correct the system and the specification directly within the interface through integrated editing functionalities.
HYPERVIS is the result of an interdisciplinary effort and a highly iterative design process which included joint brainstormings and discussions between visualization and model checking experts. The results and insights from this joint effort are presented in this paper. Specifically, we contribute: (1) an in-depth analysis of challenges, (2) the design of visualization and interaction concepts enabling the visual analysis of the model checking results, (3) the realization of these concepts with HYPERVIS as a web-based tool alongside integrated editing facilities, and (4) insights from applying multiple case studies to our tool and conducting user feedback sessions with 6 participants. In summary, our work contributes to a class of visualization solutions that aims at visually explaining complex and abstract computing concepts.
2 Working with Hyperproperties
To support the analysis of counterexamples, we first need to understand the involved components, current workflows, and prevalent challenges. Therefore, we will first describe the formal objects utilized during the model checking process on a toy information-flow control problem, where a system needs to satisfy observational determinism (Sect. 2.1). Then, we will detail the current workflow using a slightly bigger example (Sect. 2.2) before outlining the resulting challenges for analyzing counterexamples (Sect. 2.3).
2.1 Example: Verifying Observational Determinism
In general, the considered objects include the system model $M$, the counterexample executions $\pi$ and $\pi'$, and the hyperproperty specification $\phi$. In this simplified example, our model $M$ (Fig. 2), is prone to leak a secret $s$ via the publicly observable outputs $o_1$ and $o_2$ to an attacker. The underlying security lattice considers the secret $s$ to be a confidential input that should not be visible to any observer while the input $i$ and the outputs $o_1$ and $o_2$ are publicly observable. The model $M$ can be represented as a finite state machine, where the current state determines the system’s output, and the transitions of the finite state machine are labelled with the inputs to the system. All inputs and outputs are binary values, thus, they are either present or absent. When executing such a system, the present inputs and outputs are observed over multiple time steps. The given system in Fig. 3 cycles in the first state, outputting nothing, until an input $i$ is present. Depending on whether a secret $s$ is also given, the system then either outputs $o_2$ or $o_1$ indefinitely. If an attacker now happens to observe two executions of the system, where the outputs are different although the input $i$ were the same on both executions, they can conclude about the secret $s$ at this time step.
The specification $\phi$ that we would like to verify for the system $M$ is given as a HYPERLTL formula $[24]$, a linear-time temporal logic for hyperproperties that can relate multiple executions. For the example above, we would like to require observational determinism, which is formalized in HYPERLTL as follows: $\forall s' \forall \pi' \left[ (i_1 \leftrightarrow i_2) \rightarrow (o_1 \leftrightarrow o_1) \right]$. The formula quantifies universally ($\forall$) over two traces $\pi$ and $\pi'$. The temporal modality $\Box$ means “globally”, i.e., the formula $\Box \phi$ requires the subformula $\phi$ to hold at every point in time. The given formula thus states that for all trace pairs $\pi$ and $\pi'$ it must hold that when the observable inputs are the same at every point in time, the respective observable outputs must also be equal. Given the model and formula, a model checker would now provide two specific executions where at a given time step the outputs differ while the inputs are equal.
2.2 Current Workflow
With the growing complexity of both the system and the specification, the model checking of hyperproperties quickly becomes complicated. We demonstrate the current workflow and the corresponding challenges when invoking a model checker for hyperproperties on a more involved example. To this end, we consider a system that arbitrates the access of two processes to a shared resource. Both processes can request access to their critical section (using req) where they can interact with the shared resource, and the arbiter grants the access (with grant) while ensuring mutual exclusion, i.e., only one of the processes can enter its critical section at any given time. The arbiter guarantees that every request will eventually be answered while not giving out spurious grants, i.e., every grant will have been requested before. The finite state machine for this system is sketched on the right in Fig. 3. We want to check whether the arbiter is symmetric, thus, if a pair of traces $\pi$ and $\pi'$ with symmetric requests at every step (i.e., $[\text{req}_0 \leftrightarrow \text{req}_1]$ and vice versa) also gives the grants symmetrically. This hyperproperty checks if any of the processes has an unfair advantage and is favoured when granting access to the critical section. The corresponding HYPERLTL formula expressing symmetry is noted on the left in Fig. 3. The system grants processes asymmetrically: If $\pi = \pi'$ and both processes request initially, then always process 0 is granted first (Fig. 4).
The symmetry specification and the model of the arbiter can be given to a HYPERLTL model checking tool, such as MCHYPER [51], which are typically command-line based. The provided system models are usually considered as hardware specifications that could be implemented, e.g., on a chipset. Consequently, the model checkers also consume low-level circuit representation like AIGER [10], encoding the system as an And-Inverter Graph. This representation is hard to read for human developers, who often sketch the system by hand in a more visual way (Fig. 3) and realize the models using hardware description languages such as VERILOG [45], which are then compiled down to AIGER. Given such a system description and a hyperproperty, the model checker then tries to find a counterexample, i.e., a set of system
Fig. 2. A simple system leaking a secret $s$ through the observable outputs $o_1$ and $o_2$.
Fig. 4. (a) Instance of a counterexample produced by MCHyper (excerpt), here for the system and formula described in Fig. 3. (b) Handwritten notation of the provided output in a table-like format. The marked outputs in the second row do not fulfill the requirements of the formula.
evaluations that together violate the HYPERLTL formula.
If a violation occurred, a counterexample is reported in a textual representation where each line represents variable’s value on a given trace at a given time step (Fig. 4). This representation is hard to grasp as even for smaller counterexamples, this output consists of a few hundred lines (140 lines for this arbiter example), rendering it almost impossible to quickly understand the violation. Consequently, system designers might write down the values in a table-like representation (Fig. 4b). Only then they can start to relate formula, system, and the counterexample executions with each other in order to identify and understand the violation of the specification.
2.3 Challenges
This whole process quickly becomes cumbersome and poses multiple challenges. First of all, hyperproperties can express arbitrarily complex relations across traces and time, making it hard to recognize the patterns in the executions that violate such a hyperproperty. Further, analysts need to identify which subformulas were relevant (i.e., violated) and which parts can be ignored. Any visual support for scanning the present (Fig. 4b). Only then they can start to relate formula, system, and the counterexample executions with each other in order to identify and understand the violation of the specification.
3 BACKGROUND & RELATED WORK
We first provide formal details of the model checking problem of hyperproperties. Secondly, we elaborate on the importance of visualization methods to better understand abstract models or processes by giving an overview of related work. Finally, we discuss existing work for editing formula and graph representations.
3.1 Model Checking of Hyperproperties
Model checking [23] answers the following question: Given a system description $M$ and a specification $\phi$, formally describing the desired property, does $M$ satisfy $\phi$? More specifically in the context of hyperproperties, we require that the set of executions of $M$ satisfies the hyperproperty. For the interested reader, we will define these concepts formally in the following.
The system description $M$ is typically provided as a finite Moore state machine, formally defined as a tuple $(S, s_0, I, O, \tau, I_1)$ with: $S$: a finite set of states; $s_0$: the initial state; $I$: the input alphabet; $O$: the output alphabet; $\tau: S \times I \rightarrow S$: a transition function; and $I_1: S \rightarrow O$: an output labeling. Figure 2, for example, depicts a finite Moore state machine with three states. The input alphabet contains variables $i$ and $s$ and the output alphabet contains the variables $o_1$ and $o_2$. Edges of the state machine (arrows) are labeled with the input and states (circles) are labeled with the system’s output. An execution (trace) of a model $M$ is an infinite sequence of sets of atomic propositions $AP$ through the state machine, where $AP = I \cup O$. An example trace of the model in Figure 2 is $(s, s) \{\{o_2\}\}^\omega$. In the first position of the trace (corresponding to the initial state and first input), there is no output but the input $i$ and $s$. Defined by the transition function, we proceed from the initial state to state 2, where we reside indefinitely by outputting $o_2$ without receiving a further input. The notation $(s)^\omega$ denotes that $s$ is repeated infinitely often. Formally, the set of all traces for a set of atomic propositions is thus $(2^{AP})^\omega$, i.e., the set of above mentioned infinite sequences over atomic propositions. The set of traces of a system model $M$, denoted by $Traces(M)$, is a subset of $(2^{AP})^\omega$.
Formally, a hyperproperty $H$ is a set of sets of traces; meaning it defines all trace sets that comply to the hyperproperty. If the traces of a system model $M$ are no element of the hyperproperty, i.e., if $Traces(M) \not\subseteq H$ then the system does not satisfy the hyperproperty. In this case, a counter example is provided by the model checker, i.e., a set of system traces that violates the hyperproperty.
The desired behavior of the system is provided in a formal specification language such as HyperLTL, a temporal logic for hyperproperties. In HyperLTL, variables are interpreted as atomic propositions which can be connected with either Boolean operators (e.g., equivalence $\leftrightarrow$, implies $\implies$, or $\lor$) or temporal operators. The most prominent temporal operators are globally $\Box$ (where $\phi$ must be true at all times) and eventually $\Diamond \phi$ (meaning that $\phi$ will hold at some point in time); further operators include until ($\U$, release ($\mathcal{R}$), and next ($\mathcal{N}$). As an example, consider again the HyperLTL formula from Sect. 2.1
$\forall \pi \forall \psi \forall \omega_i \forall \omega_o (\omega_i \leftrightarrow \omega_o) \rightarrow (\Box (\omega_o \leftrightarrow \Diamond \omega_i))$. HyperLTL formulas start with a quantifier prefix introducing universally ($\forall$) or existentially ($\exists$) quantified trace variables ($\pi$ and $\psi$) followed by a formula $\psi$ in the body (here $\Box (\omega_i \leftrightarrow \omega_o) \rightarrow (\omega_o \leftrightarrow \Diamond \omega_i)$). Within this formula, the variables are indexed with trace variables to indicate to which trace variable they refer to (e.g., $i_2$). For a formal definition of the semantics and more examples of hyperlogics, we refer the interested reader to [24, 25].
With HyperVIS, we visualize hyperproperty counterexamples returned by a hyperproperty model checker [26, 30, 31]. We use MCHyper [31], which builds on ABC [14]. MCHyper takes as inputs a hardware circuit, specified in the AIGER format [9, 10], and a HyperLTL formula. MCHyper solves the model checking problem by computing the self-composition [5] of the system. If the system violates the HyperLTL formula, MCHyper returns a counterexample. This counterexample is a set of traces through the original system that together violate the HyperLTL formula. Depending on the type of violation, this counterexample can then be used to debug the circuit or refine the specification iteratively.
3.2 Visualization and Explication of Formal Methods
In recent years, research started more intensively to investigate how complex and abstract algorithms and models can be visualized and interactively explored, and, thus, be made more transparent. Most prominently, this includes work within the area of explainable artificial intelligence (XAI) [40, 98, 92], but can also be extended to related fields such as formal methods [32]. For example, proof attempts have been visualized by SATVIS [34] and an improved version of the Z3 Axiom Profiler [74]. They visually represent attempts from the VAMPEXCRE theorem prover and Z3 SMT solver, respectively, in order to support users and developers of the tools in understanding the results.
Textual Explications One instance of textual explications are automatically generated facts based on the underlying data [81]. Typically, machine learning algorithms extract facts which are then verbalized using natural language generation (NLG) [57, 65, 80]. These facts can then aid interpreting a visualization by verifying the viewer’s thoughts and pointing at potentially overlooked aspects [81]. The generated facts can be shown as a single caption for a chart [78] or be provided as a collection of statements next to the visualization [38]. Applications in the areas of student-teacher communication [60], XAI [42, 77], and supporting safe handovers in cyber-physical systems [84] further indicate their practical benefits for interpreting visualizations.
Visualizing Counterexamples Visually representing counterexamples for practical properties, e.g., for LTL, is a known challenge for
which various approaches have already been presented. Techniques, such as state diagrams [3, 15, 45, 58], sequence diagrams [16, 35, 58], and variable tables [45, 58, 68], convert the counterexample and the system model to more readable formats. The model view [16, 35, 55] takes a different route by mimicking the counterexample and providing a step-wise navigation. Further, visualization approaches for such counterexamples with single executions have been considered for various domains and applications [11, 12, 44, 69]. Additionally, the established model checker UPPAAL [55] visualizes timed automata for real-time systems, allowing for interacting with simulations of the system.
Approaches for supporting the analysis of counterexamples include minimizing [33] and explaining counterexamples [7], as well as investigating several system executions simultaneously [11, 39, 75]. Multiple works explore how individual counterexamples can be visualized and explained, e.g., for function block diagrams [44, 68], with the newest version of MODCHK [68] being highly related to HYPERVIS. MODCHK provides a causality analysis [7] which delivers an over-approximation of a set of causes. In contrast, HYPERVIS produces minimal explanations using a more efficient explanation algorithm. Further approaches to identifying the causes of a trace property violation [59] has, for instance, for the EXPLAISN [58] tool, which has been incorporated into multiple model checkers [19, 20, 22].
Visualizing Parallel Executions Research on distributed systems has examined how to visualize multiple, parallel executions of a system. Two examples are ODDITY [63, 87] and ShTVIS [28]. Oddity consists of an interactive visual debugger and is part of the DSLAB framework, which introduces a model checker for distributed systems. ShTVIS is a web tool that uses space-time diagrams to visualize the execution of a distributed system. Particularly these diagrams highlight the communication between components and the partial ordering between events that happen across components (the happens-before relationship). However, while related investigations have been conducted, the specific case of visualizing model checking results of hyperproperties was not considered.
3.3 Editing Formulas and Systems The modeling of systems that fulfill certain specifications is an iterative process of checking, correcting, and refining both the specifications and the system models. Therefore, editing the specification, i.e., formula or system, are essential parts of the workflow. In the following, we present existing work providing techniques for efficiently editing them.
Formula Editing Established online tools like Wolfram Alpha [86] feature advanced text editors that facilitate writing mathematical notations. Specifically for formula editing, most interfaces provide a real-time preview of the formula, translated from the markup language used for writing the mathematical expressions. The most common markup languages for mathematical input are LATEX [77, 79] as well as OpenMath and MathML [77, 58]. Via markup alternatives or special characters keyboards, WYSIWYG approaches can allow users without knowledge of the markup language to still write the desired mathematical expressions [54, 71]. Such visual interfaces can also support focusing on specific formula parts by collapsing selected subformulas [18, 49]. Finally, more experimental interfaces are starting to provide handwriting and speech recognition capabilities [29, 54, 85].
System Editing System models are usually edited in a hardware description language like VERILOG [43] within an integrated development environment. As an alternative to these textual representations, the systems can also be modeled through finite state machines [16, 61, 70]. These models can then be visually edited, e.g., by adding, relocating and removing nodes or edges from the node-link diagram [33, 73]. Lightweight versions of such editing are already provided within commercial tools for general diagram editing, such as Stateflow [4].
4 HYPERVIS: VISUALIZING MODEL CHECKING RESULTS
Based on the identified challenges of analyzing model checking results (Sect. 2.3), we iteratively developed HYPERVIS. In this section, we will first recap the main components of the considered counterexamples (Sect. 4.1) and outline our set design goals (Sect. 4.2). Then, as the main part, we will present our visualization design (Sect. 4.3), including its interaction concepts. This is followed by the description of the considered editing and debugging facilities (Sect. 4.4). Finally, we provide further insights into the design process as well as the actual implementation (Sect. 4.5). The tool is provided online at (imld.de/hypervis).
4.1 Components of Counterexamples
Strictly speaking, a counterexample to a hyperproperty is only the set of executions that are returned by the model checking tool. However, for the remainder of this work, we depict a counterexample to comprise the formula and system provided by the analyst as well. Thus, it consists of three main components: the system, the formula, as well as the specific executions. In addition, we introduce explanations as a fourth component, indicating and explicating relevant bits of the violation.
The system describes the hardware circuit as a transition system with states providing the outputs and transitions implementing state changes based on inputs. Due to the system being a hardware circuit, states are internally represented by latches, i.e., sub-circuits that can preserve information. Together, all available variables, i.e., outputs, inputs, and latches, are the atomic propositions. The formula can be represented as a syntax tree over propositional and temporal operators where leaves are literal atomic propositions and a specified trace (or execution).
Here, a formula typically describes relations on pairs of executions, i.e., two instances of the system. Each execution is representing values of atomic propositions for every time step. Notably, these executions can be infinite and contain a lasso (or loop), which marks subsequent time steps that are repeated infinitely.
Through a causal analysis of the counterexample, we are able to identify which atomic propositions in the formula contributed to its overall violation and are, therefore, relevant for the counterexample. For the explanations, we extract textual explications of the most top level subformulas with temporal operators. Depending on the actual top level operator, these subformulas can either be satisfied or violated (e.g., in case of an ‘implies’ → operator, the premise has to be satisfied while the conclusion is then violated). For an analyst, all mentioned components and elements are relevant for understanding the counterexample in general, reasoning about why it can occur, and identifying possible corrections to either the formula or system.
4.2 Design Goals
When starting the design process, we identified multiple design goals that a tool for visually analyzing hyperproperty counterexamples should fulfill. The goals DG1–DG5 describe desired visualization aspects, while DG6–DG7 outline more general tool characteristics.
DG1: Build Upon Familiar Presentations. As illustrated in Fig. 3 and Fig. 4 analysts often sketch the system or list the executions in a certain way. We aim to foster an intuitive understanding of the views by building upon these typical representations, but extending them with more effective encoding strategies.
DG2: Support Recognizing Trace Relations. In many cases, a specific combination of absent or present atomic propositions must be identified and compared across the executions. We aim to simplify such pattern recognition within the executions.
DG3: Relating Components. A major challenge for analysts is relating the different components to each other, e.g., mapping back atomic propositions in the execution to corresponding subformulas or to taken transitions in the system. Thus, the tool should support the analyst in mentally linking elements across views.
DG4: Provide Guidance for Identifying Violations. Counterexamples can quickly become overwhelming, with a multitude of variables or time steps being involved. Our goal is to support analysts in identifying the relevant elements that led to the violation and, thus, in understanding the model checking result.
DG5: Enable Editing of Formula and System. Due to their complexity, formulas and systems can easily contain small but hard to recognize bugs leading to a counterexample. For this, the tool should provide integrated functionalities for fixing such issues.
DG6: Provide a Holistic Interface. Model checking of hyperproperties is a multi-step process; from providing the input, to analyzing the counterexample, to iterating the specification or system. Thus, a tool should consolidate these steps within one interface.
DG7: Avoid Setup Efforts. The tools used for model checking are often command-line based and implemented with different dependencies. We aim at avoiding the setup effort for the analyst and providing a unifying ready-to-use tool.
In the following, we detail how we addressed these design goals within the visualization design of HYPERVIS’ views.
4.3 Visualization Design
Guided by the described design goals, we developed HYPERVIS and its general interface including five visualization views. The focus in this section lies on how we visualize the counterexample components specifically as well as efficiently guide the analysis.
4.3.1 Visualizing a Counterexample: Provided Views
For HYPERVIS, we developed five different views; the formula view, graph view, trace view, timeline view, and explanation view. In the following, their design is detailed.
Formula View The HYPERTL formula provided by the user is transformed into a representation using the actual logical and temporal operator symbols (DG1). Internally, the formula is in a hierarchical structure; this structure is indicated with bars below the formula string. The bars allow emphasizing the different subformula levels, with the uppermost bars representing the atomic propositions and the lowest bar (marked in blue) the entire formula. Hovering over the bars emphasizes the corresponding subformula (Fig. 3), simplifying recognizing the formula structure and corresponding bracket. The stated atomic propositions always relate to one specific trace. To simplify distinguishing which proposition corresponds to which trace, we introduced fixed colors for the traces and added labels to the proposition, i.e., either \( \Box \pi \) or \( @ \pi \). These trace colors are re-used in all views.
Graph View The system is visualized as a Moore transition system, i.e., a graph with the states as nodes and transitions as edges (Fig. 3b). Following their convention, the set of present outputs on a given state is printed into the node label, e.g., \( \text{emergency} \) in state S3. If an output is absent, its value is false. Further, we show symbolic transitions, i.e., edges can be labeled with formulas expressing specific input combinations, such as logical conjunction (e.g., \( \text{up} \land \neg \text{bound} \)). The graph can be freely zoomed and panned.
Trace View As previously described, analysts typically transform the textual output of the model checker into a table-like format, thus creating an overview of all atomic propositions and their values on the traces across time steps. Our trace view builds upon that (DG1) and prints the atomic propositions per trace as columns and the time steps as rows (Fig. 3c). The values themselves are binary, thus are either true when a variable was present or false when it was absent. We propose to replace the common notation of the values as 1 and 0 with a graphical representation: a filled rectangle \( \blacksquare \) represents present variables and a hollow rectangle \( \square \) absent variables. This representation simplifies recognizing patterns of occurring values in and across traces (DG2).
Small icons before the proposition name indicate its type, i.e., either \( \Box \) output, \( @ \) input, or \( \Box \) latch. The propositions are sorted first by type and then alphabetically. Controls in the view head allow for hiding an proposition type. In addition to the atomic propositions, we also show a numbered state indicator (e.g., S3) in the first column of each trace. These state indicators are abstractions of the latches, which together encode the current state. The time steps are labeled as T0, T1, and so forth. Further, if a lasso (see Sect. 4.1) is present in the counterexample, it is indicated with gray borders at the respective time steps.
Explanation View The explanation view shows a verbal summary of the counterexample alongside statements on the most top-level subformulas relevant to the found violation (Fig. 3). The basis of this is an automated causal analysis of the counterexample, which extracts a minimal set of subformulas that contributed to the overall violation at one or more time steps. With this information, we can relate the subformulas to specific values at specific time steps and derive a textual statement. The statements’ structure is always the same: first, the temporal operator of the subformula is stated, followed by a list of involved atomic propositions. For each proposition, it is indicated at which time step it became relevant, how the values relate to each other across traces and whether the values were always true or false. This information is provided as inline or word-sized representations seamlessly integrating into the textual description. Further, each statement is assigned a unique color, allowing for indicating it in other views (DG3). For example, as visible in Fig. 3d, the timeline view shows bars at the bottom, hinting at which time steps a subformula was relevant.
Timeline View So far, the view design was influenced by common ways to write down the counterexample. However, the timeline view is a new visualization that aims to provide a more compact representation of the executions (Fig. 5d). Similarly to the trace view, it shows the specific values of the atomic propositions, but with the time mapped horizontally. By omitting the atomic proposition labels, the rectangles indicating present or absent variables are placed next to each other. This allows for a further improved pattern recognition, either across traces or across time steps (DG2). For example, considering a set of four variables, it is easily possible to observe differences or similarities across instances: \( \blacksquare \) and \( \blacksquare \) differ only in the second variable. The label of the represented proposition can be accessed by hovering over the rectangle; their order is equal to the order in the trace view.
In an earlier iteration, the view was intended to emphasize diverging behaviors of the executions for one variable, e.g., showing when they were in different states or read different values for an atomic proposition. We opted to develop the view further into its more compact format while also showing the atomic propositions. To still indicate diverging executions, the state indicator (e.g., S0) is colored black if both executions are in the same state and colored according to the trace color when they diverge (e.g., S2 and S1). Finally, in the case of a present lasso, an indicator at the time steps is provided.
Fig. 5. Views of HYPERVIS: (a) formula view, here with hover of a subformula; (b) graph view showing the system as a Moore transition system, here zoomed in; (c) trace view providing for both executions \( \pi \) and \( \pi' \) the values of all atomic propositions across all time steps; (d) timeline view showing the executions in a compact format; and (e) explanation view with textual statements on the counterexample, here with one relevant subformula.
A complete statement is shown in Fig. 5. For $\text{bound}$ the value was $true$ and equivalent on both traces ($\pi = \pi'$) at T2, while $\text{emergency}$ was unequal on both traces ($\pi \neq \pi'$) at T3. These statements can help analysts to quickly grasp the essential aspects of the violation and locate the time steps and atomic propositions of interest. Currently, the provided explanations for the atomic propositions can indicate found equivalences of traces across $n$ time steps as well as consistent values across time steps (DG2). However, as HYPERVIS formulas can describe arbitrary relations of atomic propositions across traces, not all relevant patterns are currently recognized and expressed. Similarly, we only provide statements for subformulas being present at the top two levels; thus, more nested formulas might not be verbalized adequately.
Interface Arrangement By default, the views are arranged in a simple 2-column grid, with formula, explanation, and trace view being placed in a wider column on the left, and timeline and graph view on the right (Fig. 1). However, as the space requirement of the views can heavily vary between counterexamples, the interface also supports arranging the views differently. For example, if a formula is becoming rather long, it is placed in full width on top. Similarly, if counterexamples involve many time steps, trace view and timeline view are devoted more space. In general, the goal is to provide all views within the initial viewport and avoid scrolling as much as possible. Finally, analysts can also manually collapse or maximize views.
4.3.2 Analyzing a Counterexample: Interactive Guidance
For analyzing the counterexample, we provide further interactive mechanisms fostering the comprehension of the counterexample’s specifics. These mechanisms include an explicit highlighting of relevant elements, linked highlighting across views, as well as a debugger-like stepping through the counterexample. These dynamic functionalities of HYPERVIS are also shown and explained in the accompanying video.
Highlighting Relevant Elements As stated in the context of the explanation view, we are identifying the subformulas that contributed to the overall violation at specific time steps. This knowledge is not only used for the explanation statements but also to indicate the relevant elements across all views (DG4). To activate this indication, the explanation view features a ‘Highlight’ toggle button (Fig. 5e). Upon activation, as in Fig. 6a and Fig. 1, the non-relevant subformulas in the formula view are grayed out, as are the non-taken states and transitions of the executions in the graph view. Similarly, non-relevant values in the trace view and timeline view are shown less opaque while relevant values are emphasized. Further, a filter button next to the highlight button allows for removing non-relevant elements from the views.
We also relate the relevant elements to the provided statements in the explanation view (DG3). Specifically, we identify which atomic proposition is part of which statement, i.e., in which subformula it occurs. Further, we propose to use the statements’ assigned color for highlighting: the rectangle representing binary values are colored accordingly, as are the bars indicating the atomic proposition.
Linked Highlighting In general, all views react to hovering over displayed elements, e.g., subformulas, states, or time steps. Hovering also results in a linked highlighting across views, i.e., the corresponding elements in other views are also highlighted (DG3). Only in a few cases, elements are shown in exactly the same way in other views. For example, subformulas in the formula view may occur again in the explanation view. As in most cases elements appear slightly differently, e.g., formula view and timeline view show atomic propositions differently, the correspondence is not immediately apparent and is then indicated through the linked highlighting (DG3).
Specifically, hovering over a trace indicator (i.e., $\pi$ or $\pi'$) in either view highlights the execution in the graph view, i.e., all taken transitions and states are colored in the trace color. Vice versa, hovering a state or transition highlights instances in the executions where this state and the inputs of the transition were present. Hovering over a time step highlights the corresponding row or column in trace and timeline view, the relevant subformulas at this time step, and the executions’ current states and transitions taken next. The atomic propositions in formula and explanation view allow for highlighting the corresponding labels in the trace view and, if applicable, the specific values that were relevant at certain time steps in both trace and timeline view (DG4).
Stepping through a Counterexample It is important to understand the sequence of events that lead to the violation. Therefore, we enable stepping through the counterexample in a debugger-like fashion (Fig. 6b). Through control buttons provided in the interface header, the analyst can move forward and backward. For the current step, a stronger visual highlight is used (Fig. 6b), with the time step colored in blue and relevant subformulas further emphasized. For the graph view, we color the states and transitions in the respective trace colors; if they share the same state or transition, blue is used. The same effect can also be achieved by selecting a time step in the trace view or timeline view. Further, when stepping through, the highlight is permanent and can be used in combination with the highlighting of relevant elements as well as the linked highlighting triggered on hover.
4.4 Tool Functionalities & Editing Facilities
Following DG6, we provide one unified interface that allows for performing model checking, analyzing the counterexamples, and iterating specification and system within it. In the following, we describe the tool functionalities of HYPERVIS as well as its editing facilities.
**Tool Functionalities** We extended HYPERVIS with tool functionalities allowing for using it in a productive way for many model checking projects simultaneously. Among others, this includes functionalities for (re-)loading projects, re-running model checks, or managing different versions of them. The project manager provides access to all model checking projects, i.e., loaded systems and specifications checked, in a sidebar widget. The different projects can have multiple versions (here, marked with the latest modification timestamp), helping analysts to quickly jump to older iterations. For each version, multiple checks can be created, i.e., multiple hyperproperties that a system should fulfill. The versions of the projects can be manually created, but are also automatically introduced when editing a formula or system. In case of faulty edits that result in an error thrown by the model checker, a
within an interdisciplinary team. This team consists of formal methods
experts for model checking of hyperproperties, knowing the challenges
mentioned before, HYPERVIS supports the editing of multiple separate
formulas for a single system, potentially allowing to split up complex
hyperproperties or to test very different specifications.
**Editing of the System** For editing the system model, changes
can be made either through visual editing or by changing the original
text input. The visual editing could involve providing a special mode
allowing for, e.g., drawing edges, rerouting existing ones, or creating
nodes. At the same time, some users might still prefer to directly
textually edit the originally provided VERILOG definition. However, as
providing such editing supporting is not straightforward, HYPERVIS
currently only features a mock-up editing of the system. Specifically,
the challenge comes with the representation of the system as a hardware
circuit in the AIGER format [10]. These AIGER files are automatically
generated from definitions implemented in VERILOG and are hardly
readable by humans. For displaying the graph view, a DOT representation is generated from AIGER, however, the transformation comes with information loss and is therefore not invertible. To sidestep
this, an intermediate format for automatons could be used, which can be
transformed from or to AIGER without information loss and also more
easily changed in programmatic way. For now, we allow the editing of the
DOT notation, which updates the shown graph view to illustrate the
intended functionality but cannot trigger an updated model check.
**4.5 Design Process & Implementation**
In order to develop HYPERVIS, we followed an iterative design process
within an interdisciplinary team. This team consists of formal methods
researchers on the one hand and HCI as well as visualization researchers
on the other hand. While not end-users, the first group are domain
experts for model checking of hyperproperties, knowing the challenges
and main goals. In the following, we detail the design phases as well
as the current implementation of HYPERVIS.
**4.5.1 Design Phases**
The first phase involved introducing the visualization researchers to the
domain of model checking and hyperproperties in order to establish a
common understanding of the current processes and present challenges.
Afterward, we jointly developed a first click prototype illustrating a
possible interface visualizing the found counterexamples. Then, the first
implementations of the visualizations were realized in a web prototype
alongside a parser consuming the output file of the model checking
generated by MCHYPER. Within this process, it became apparent
that plainly representing the counterexample will be insufficient and
that it will be essential to become able to extract the relevant bits of
the violation and presenting it to the analyst. At this stage, a first
version of the causal analysis algorithm was developed alongside early
highlighting mechanisms. This enabled testing various case studies, and
thus incorporating further improvements into the visualization design.
On the tool side, we started to develop approaches for editing the
formula and system as well as the general structure of the tool interface,
e.g., providing access to the project list, their versions, or loading new
ones. With these tool aspects implemented, we ran a first feedback
session with 3 participants and collected comments on the interface.
This feedback allowed us to iterate, e.g., the menu structures, button
icons and labels, or features of the inline formula editor. The result of
the overall design process is the current version of HYPERVIS.
**4.5.2 Implementation as Web Tool**
HYPERVIS is implemented as a web-based tool, featuring a NODE.JS-
based [67] backend and JavaScript-based frontend. In the frontend, the
views are implemented with plain HTML or SVG. Except for the
explanation view, the rendering of all views is controlled by D3.JS [13].
To support the linked highlighting, custom events were introduced that
are sent and consumed by the views. For the formula editing, we incor-
porate the MATHQUILL library [76]. For translating the formula and
producing the polish notation that MCHYPER requires, we use SPOT.
Graph editing is not fully functionally implemented yet. To illustrate
the general possibility, we provide an embedded CODEMIRROR [41]
editor to change the DOT representation of the system.
The backend is responsible for managing the model checking pipeline. Based on the analyst’s inputs, it calls the MCHYPER Python
tool before handing over the found counterexample to our own Python
script extracting the relevant subformulas. It computes a minimal set of
variable and time step pairs, which cause the violation. In addition, this
script also writes all required information into a JSON file. In parallel, a separate script is used to generate the DOT representation based on
the AIGER file. As for larger systems this generation might not terminate
in a reasonable time, for some counterexamples the graph is not avail-
able. After parsing these generated outputs in the NODE.JS server, the
data is provided to the frontend. For each project, the results are stored
in a folder structure, allowing to quickly reload the counterexamples
later on and implement the versioning concept. The communication
between the frontend and backend is based on HTTP requests.
The HYPERVIS tool is hosted online but can also be locally run,
either as a Docker container or by fully installing it and all its dependen-
cies. In general, we envision the usage as an online tool as the primary
usage style, which then also allows for avoiding setup efforts (DG7).
**5 Validating HYPERVIS**
In this section, we validate HYPERVIS by discussing multiple case
studies and reporting on user feedback sessions. Both illustrate that HY-
PERVIS indeed advances the state-of-the-art significantly by helping to
quickly identify the violations in counterexamples of hyperproperties.
**5.1 Case Studies**
Here, we detail two selected case studies: In the first one, we visualize
the results of model checking information-flow properties on an open
source implementation of the I2C bus protocol. In the second case
study, we take a look at one of the core building blocks of such bus
implementations: mutual exclusion protocols.
**CS1: I2C Bus Protocol** The I2C bus protocol coordinates the
communication between multiple components in a master-slave hier-
archy and is widely used in practice. As it has no security features,
this has led to exploits, for example, in smart cards of German public
health insurance companies [83]. The implementation used in this case
study is taken from OpenCores [66]. Its AIGER circuit consists of
254 latches plus 86 input and output variables. Typically, this protocol
consists of a master, one controller, and several slaves, where the master
communicates to the slaves while the controller ensures properties like
mutual exclusion. Suppose information has to be sent over the bus,
the master addresses the slaves with a designated address bit. In this
case study, we visualized the result of model checking the following
information-flow policy: The information which slave the master is ad-
dressing should not be identifiable from the bus’ output. This property
is violated, but the counterexample is highly complex (e.g., it is not
possible to generate a state graph). Still, the visualizations provided by
HYPERVIS help to understand the violation.
why the symmetry specification is violated: In the counterexample, why the overall formula is violated. When the highlighting button design before reporting on the received feedback.
In the following, we first describe the study before introducing the views implemented in HyperVis. The explanation view directly tells the analyst why the overall formula is violated. When the highlighting button is pressed, HyperVis pinpoints the atomic propositions, time steps, and subformulas that caused the violation in the trace view, the timeline view, and the formula view (Fig. 1). In the formula view, for example, the subformula \(\text{grant}_0 \leftrightarrow \text{grant}_1\) is highlighted in the conclusion because only this subformula is needed to understand why the symmetry specification is violated: In the counterexample, \(\text{grant}_0 \leftrightarrow \text{grant}_1\) holds at time step 1 while \(\text{grant}_1 \leftrightarrow \text{grant}_0\) does not hold at that time step. Highlighting relevant subformulas decreases the number of subformulas that the analyst needs to consider when trying to understand the counterexample. This illustrates that HyperVis fulfills DG4, providing guidance for understanding the formula violation.
After the violation is identified, the bug in the system needs to be found. Since DG3 is supported through the linked highlighting of elements and the highlight button, the graph view is restricted to the relevant states for the counterexample executions. This feature again allows the analyst to focus their attention on the most relevant aspects. By using HyperVis to explicitly step through the time steps, one observes that both executions represent the same system trace, thereby violating the symmetry in the grants. The solution to achieve symmetry is: Adding a new input to the system that allows giving grants symmetrically when both processes send requests simultaneously [59].
5.2 User Feedback Sessions
We conducted feedback sessions with domain users to better assess the merit of our tool for them. In the following, we first describe the study design before reporting on the received feedback.
5.2.1 Study Design
Participants We recruited six participants (age M=27.5 yrs, SD=3.33 yrs; 1 female, 5 male) that have significant knowledge on model checking and hyperproperties. On average, participants rated their theoretical expertise on model checking with 4.5 out 5 and on hyperproperties with 4.0.
Apparatus The sessions were conducted remotely through a video call with screen sharing. We hosted the latest version of the tool online and provided participants with the link. Two investigators moderated the videotaped sessions, and a third one was taking notes. Participants were asked to follow a think-aloud protocol, i.e., verbally phrasing their thoughts and actions while interacting with the tool.
Procedure After a short welcome and general introduction, participants were asked to provide consent for the video recording. Then, we outlined the procedure and think-aloud protocol before starting the demonstration of HyperVis via screen sharing, introducing all views and their functionalities. Afterward, participants were asked to open the tool, start screen sharing from their end, and analyze three provided examples (detailed below). For each example, we provided a short introduction on the specification and system and then asked them to reason about the counterexample. Further, for the first example, participants had to propose a fix for a corrupt system, while in the second, they had to edit the formula to a working version. For the last example, they had to identify the “needle in the haystack”. After working on each example, we asked them to reflect on the interface and which views they found helpful in the specific context. Lastly, we concluded the session with an open discussion and provided them with a link to our questionnaire. Sessions lasted one hour on average.
Provided Examples For the demonstration of HyperVis, we used an arbiter example similar to the one described in CS2. Further, we prepared three examples for the hands-on part: The first two consider a straightforward drone system that is supposed to increase the drone’s height when it reads an up input, and to go into an emergency state when a bound input is read. In the first version, the specification stated that equal bound inputs must result in equal emergency outputs in the next time step; however, the specification was violated due to an incorrect transition in the system. Participants had to identify this issue and verbally provide a fix. The counterexample is visible in Fig. 6a and Fig. 6b. In the second version, the fixed system was used, but now with a different but incorrect formula. Participants had to pinpoint this issue and, this time, edit the formula in HyperVis. The counterexample is shown in Fig. 6c. Lastly, the third example was a larger counterexample involving 29 time steps and 50 atomic propositions, where a mutual exclusion specification was violated. Due to the system’s size, the graph view was not available. We asked participants to describe the violation in their own words and did not inquire any fixes.
5.2.2 Results
Overall, all participants (P1–P6) were able to work with HYPERVIS without larger issues and considered the tool to be useful for experts and novices. Our two main insights are: 1) Our proposed interface allows to quickly identify the violations in counterexamples and provides valuable guidance for understanding the underlying issue. 2) Personal preferences and the different analysis workflows influence how the different views were used by the participants.
All participants were able to correctly identify the violations and issues in the provided examples within the given time. We could observe that the trace view was used as a central component within the analysis process, providing detailed information on the executions, while the linked highlighting allowed for seeing the corresponding information in the other views. For the formula view, all participants (P1–P6) explicitly stated that they are intrigued by the hierarchy indicators below the formula. Similarly, the graph view proved to be of great value, particularly when the traces were highlighted while stepping through the counterexample. The comments and ratings also showed that the used colors for traces and statements were appreciated for relating the different components. Finally, the explanation highlighting was considered “extremely important” (P3) for understanding the counterexample and identifying the relevant pieces for the violation (all Ps).
At the same time, not all views were used to the same extent. For example, while most participants (P2–P6) found the textual explanations “extremely good” (P6) and used it as starting point for understanding the violation, participant P1 preferred working with the other views. Similarly, while participants P2+P5 only briefly used the stepping through mechanisms, the others found it very helpful and used it extensively. The timeline view was intensively utilized by participants P2 and P4–P6, particularly for comparing traces and recognizing specific patterns. At the same time, P1 used the trace view more extensively, while P3 used the timeline view only for the larger example.
While working with HYPERVIS, participants also provided multiple suggestions for various improvements. One commonly stated shortcoming was the missing graph view for more extensive examples. Further, P5+6 would prefer some indication of the explanation statements in the graph view as well. As participant P4 intensively used the stepping through functionalities, he proposed to improve the coloring of nodes and edges in the graph view when both executions are overlapping. Participants P4+6 suggested activating the explanation highlight on default. Some extended filter options were also proposed, e.g., P5 suggested the filtering of single atomic propositions, while P2 proposed to allow for hiding time steps. For the formula editing, multiple possible improvements were stated, e.g., better highlighting of corresponding brackets (P5+P6), a semantic check (P4+P5), or separating the B3X input and rendered formula (P2). Still, the formula editor was appreciated, with P4 stating that it is “something that we needed for a long time”.
6 Discussion
The positive feedback that we received emphasizes that there is a clear need for visualization and analysis interfaces within the formal methods domain. We found that the most important aspect when working on visualization solutions within this space is to have access to the specific knowledge that is involved in the rather abstract and formalized concepts. From a visualization perspective, the incorporated encoding strategies or interactive mechanisms are mostly already known. However, when applied and combined in the right way, they become extremely helpful. Importantly, as it was also commented in our study, such a solution is not only an improvement for domain experts, but can also support novices in understanding the underlying principles.
Consequently, our work is in line with other efforts of providing explanations and intuition for abstract or black-box-like processes [12][82][83]. However, in this area, work around explainable AI [40] has received most attention in recent years, while formal methods themselves are only rarely considered. This is particularly interesting for two reasons: (1) formal methods, and especially model checking, are largely built around mathematical and logical representations that are consumed in command-line tools, while visual representations remain underestimated. Therefore, there is a big potential for making the concepts more accessible by using visualizations. Further, the mathematical nature of it requires a rigorous treatment of the visualized elements, which poses special challenges to the visualization design. (2) Formal methods also play an important role for AI in general and when trying to provide explanations to computations of an AI agent. For example, Marabou [47] is a recently introduced framework for verifying and providing counterexamples of properties of deep neural networks (e.g., robustness, which is in general a hyperproperty). However, as it is command-line based and does not provide an explanation on the counterexample, users have to cope with the same problems described in this work. The here presented visualization approaches might be directly applicable to many tools in the area of formal methods.
In the light of these considerations, HYPERVIS can be seen as a first foundation for explaining hyperproperties and counterexamples. As the immediate next steps, the suggested improvements from the user feedback sessions can be incorporated. For the editing facilities, this includes the general possibility for modifying the system plus a visual editing mode. This could also allow for providing a stand-alone editing mode with improved live previews of formula and system. For the analysis of counterexamples themselves, an interesting addition would be support for adding annotations or storing derived insights from them. In this context, it can also be considered to automatically track the analysis history or provenance [88] and allow analysts to review it.
Currently, HYPERVIS is focused on visualizing a specific counterexample to a hyperproperty. However, on the one hand, the need for visually representing hyperproperties can also occur independently from violated specifications, i.e., for correctly implemented systems and specified properties. While it is always possible to generate a so-called witness for a proved hyperproperty by negating the specification, the found witness is one of many possible ones and might not adequately represent the underlying hyperproperty. On the other hand, the challenging but promising interactive synthesis problem potentially benefits from the presented visualizations. Synthesis constructs per definition a correct implementation directly from the provided specification, making the model checking process superfluous. Here, it would be beneficial to visualize the iterative synthesis process (i.e., how the system was derived) as well as the proposed implementation itself. The visualization and interaction designs presented here can guide the development of such novel hyperproperty visualization tools.
7 Conclusion
In this paper, we presented concepts for visually analyzing counterexamples to hyperproperties as well as for editing the provided formula and system. As demonstrated through case studies and attested by user feedback, our HYPERVIS tool notably improves the analysis and understanding of the counterexamples. At the core of this is the targeted usage of encoding strategies and interactive mechanisms that pointedly represent the different aspects and help to guide the analyst to the relevant information in the example. In particular, the right combination of allegedly simple measures, such as color encoding, linked highlighting, and relevance indication, can allow experts to quickly recognize the violation cause and also novices to understand the complex relations in the first place. Notably, the key to such solutions is the understanding of the domain, which in this case enabled us to embed the causal analysis of the counterexample and to automatically derive textual explanations and corresponding highlights. The provided editing facilities support fixing the identified issues, turning HYPERVIS into a valuable tool for analyzing hyperproperties. With this, we contribute a foundation for explaining and visualizing hyperproperties in general and hope to inspire further visualization solutions for more formal methods concepts.
Acknowledgments
We thank Weizhou Luo for his valuable support during the overall project duration. This work was funded by DFG grant 389792660 as part of [TRR 248 — CPEC] by the DFG as part of the Germany’s Excellence Strategy EXC 2050/1 - Project ID 390969704 - Cluster of Excellence “Centre for Tactile Internet” (CeTI) of TU Dresden, by the European Research Council (ERC) Grant OSARES (No. 683300), and by the German Israeli Foundation (GIF) Grant No. I-1513-407/2019.
|
{"Source-Url": "https://export.arxiv.org/pdf/2108.03698", "len_cl100k_base": 12845, "olmocr-version": "0.1.50", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 38419, "total-output-tokens": 13552, "length": "2e13", "weborganizer": {"__label__adult": 0.00035881996154785156, "__label__art_design": 0.0013427734375, "__label__crime_law": 0.0003261566162109375, "__label__education_jobs": 0.0016813278198242188, "__label__entertainment": 0.00015616416931152344, "__label__fashion_beauty": 0.0002149343490600586, "__label__finance_business": 0.0004439353942871094, "__label__food_dining": 0.0003709793090820313, "__label__games": 0.00083160400390625, "__label__hardware": 0.0026397705078125, "__label__health": 0.0005230903625488281, "__label__history": 0.0005974769592285156, "__label__home_hobbies": 0.00018799304962158203, "__label__industrial": 0.0008935928344726562, "__label__literature": 0.0004498958587646485, "__label__politics": 0.0003578662872314453, "__label__religion": 0.00061798095703125, "__label__science_tech": 0.3212890625, "__label__social_life": 0.00012695789337158203, "__label__software": 0.018890380859375, "__label__software_dev": 0.646484375, "__label__sports_fitness": 0.00028014183044433594, "__label__transportation": 0.0007624626159667969, "__label__travel": 0.0002200603485107422}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 63755, 0.0141]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 63755, 0.534]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 63755, 0.90841]], "google_gemma-3-12b-it_contains_pii": [[0, 2713, false], [2713, 10906, null], [10906, 19011, null], [19011, 27684, null], [27684, 34953, null], [34953, 41868, null], [41868, 49451, null], [49451, 54668, null], [54668, 63755, null], [63755, 63755, null], [63755, 63755, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2713, true], [2713, 10906, null], [10906, 19011, null], [19011, 27684, null], [27684, 34953, null], [34953, 41868, null], [41868, 49451, null], [49451, 54668, null], [54668, 63755, null], [63755, 63755, null], [63755, 63755, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 63755, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 63755, null]], "pdf_page_numbers": [[0, 2713, 1], [2713, 10906, 2], [10906, 19011, 3], [19011, 27684, 4], [27684, 34953, 5], [34953, 41868, 6], [41868, 49451, 7], [49451, 54668, 8], [54668, 63755, 9], [63755, 63755, 10], [63755, 63755, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 63755, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
902aae7f2abf33f82fce3e86982347ab336fadfc
|
## CONTENTS
1 Introduction ................................. 1
1.1 Improvements and New Features ................. 1
2 Installing the Desktop Synchronization Client ............. 3
2.1 Installation Wizard ............................ 3
3 Using the Synchronization Client ..................... 7
3.1 Systray Icon ................................. 7
3.1.1 Configuring ownCloud Account Settings .......... 9
3.1.2 Adding New Accounts ....................... 10
3.2 Sharing From Your Desktop ..................... 10
3.3 Activity Window .............................. 12
3.4 General Window .............................. 12
3.5 Using the Network Window ..................... 12
3.6 Using the Ignored Files Editor ............... 12
4 Advanced Usage ................................ 19
4.1 Options ........................................ 19
4.2 Configuration File .............................. 19
4.3 ownCloud Command Line Client ................. 20
4.3.1 Credential Handling ....................... 20
5 The Automatic Updater .......................... 21
5.1 Basic Workflow ................................ 21
5.1.1 Windows ..................................... 21
5.1.2 Mac OS X .................................... 21
5.1.3 Linux ........................................ 21
5.2 Preventing Automatic Updates ................... 22
5.2.1 Preventing Automatic Updates in Windows Environments ...... 22
5.2.2 Preventing Automatic Updates in Mac OS X Environments ........ 22
5.2.3 Preventing Automatic Updates in Linux Environments ........... 23
6 Appendix A: Building the Client .................. 25
6.1 Getting Source Code .......................... 25
6.2 Linux .......................................... 25
6.3 Mac OS X ...................................... 26
6.4 Windows Development Build .................... 26
6.5 Windows Installer Build (Cross-Compile) ........ 27
6.6 Generic Build Instructions .................... 28
7 Appendix B: History and Architecture ............... 31
Available for Windows, Mac OS X, and various Linux distributions, the ownCloud Desktop Sync client enables you to:
- Specify one or more directories on your computer that you want to synchronize to the ownCloud server.
- Always have the latest files synchronized, wherever they are located.
Your files are always automatically synchronized between your ownCloud server and local PC.
**Note:** Because of various technical issues, desktop sync clients older than 1.7 will not allowed to connect and sync with the ownCloud 8.1+ server. It is highly recommended to keep your client updated.
### 1.1 Improvements and New Features
The 2.1 release of the ownCloud desktop sync client has many new features and improvements. (See the complete changelog.)
- Improved appearance on HiDPI screens
- Improved error messages
- Several fixes/improvements to the sharing dialog
- Several fixes/improvements to the server activity tab
- Allow changeable upload chunk size in owncloud.cfg
- Forget password on explicit sign-out
- Windows: Fix deleting and replacing of read-only files
- Share with internal ownCloud users from your desktop
- Separate views for server activity, sync activity, and errors
- Don’t re-upload *eml-files if size and checksum are unchanged
- Improved upload/download progress indicator
You can download the latest version of the ownCloud Desktop Synchronization Client from the ownCloud download page. There are clients for Linux, Mac OS X, and Microsoft Windows.
Installation on Mac OS X and Windows is the same as for any software application: download the program and then double-click it to launch the installation, and then follow the installation wizard. After it is installed and configured the sync client will automatically keep itself updated; see The Automatic Updater for more information.
Linux users must follow the instructions on the download page to add the appropriate repository for their Linux distribution, install the signing key, and then use their package managers to install the desktop sync client. Linux users will also update their sync clients via package manager, and the client will display a notification when an update is available.
Linux users must also have a password manager enabled, such as GNOME Keyring or KWallet, so that the sync client can login automatically.
You will also find links to source code archives and older versions on the download page.
### 2.1 Installation Wizard
The installation wizard takes you step-by-step through configuration options and account setup. First you need to enter the URL of your ownCloud server.
Enter your ownCloud login on the next screen.
On the Local Folder Option screen you may sync all of your files on the ownCloud server, or select individual folders.
The default local sync folder is `ownCloud`, in your home directory. You may change this as well.
When you have completed selecting your sync folders, click the Connect button at the bottom right. The client will attempt to connect to your ownCloud server, and when it is successful you’ll see two buttons: one to connect to your ownCloud Web GUI, and one to open your local folder. It will also start synchronizing your files.
Click the Finish button, and you’re all done.
CHAPTER
THREE
USING THE SYNCHRONIZATION CLIENT
The ownCloud Desktop Client remains in the background and is visible as an icon in the system tray (Windows, KDE), status bar (Mac OS X), or notification area (Linux).
The status indicator uses overlay icons to indicate the current status of your synchronization. The green circle with the white checkmark tells you that your synchronization is current and you are connected to your ownCloud server.
The blue icon with the white semi-circles means synchronization is in progress.
The yellow overlay icon with the parallel lines tells you your synchronization has been paused. (Most likely by you.)
The gray icon with three white dots means your sync client has lost its connection with your ownCloud server.
When you see a white circle with the letter “i” that is the informational icon, so you should click it to see what it has to tell you.
The red circle with the white “x” indicates a configuration error, such as an incorrect login or server URL.
3.1 Systray Icon
A right-click on the systray icon opens a menu for quick access to multiple operations.
This menu provides the following options:
- Quick access to your accounts
- Sync status
Chapter 3. Using the Synchronization Client
• Recent Changes, showing latest activities
• Settings
• Help menu
• An option to log in or log out of all of your accounts at once
• Quit ownCloud, logging out and closing the client
A left-click on your systray icon opens the desktop client to the account settings window.
3.1.1 Configuring ownCloud Account Settings
At the top of the window are tabs for each configured sync account, and three others for Activity, General and Network settings. On your account tabs you have the following features:
• Connection status, showing which ownCloud server you are connected to, and your ownCloud username.
• An **Account** button, which contains a dropdown menu with **Add New**, **Log In/Log Out**, and **Remove**.
• Used and available space on the server.
• Current synchronization status.
• **Add Folder Sync Connection** button, which is active only when you have removed synchronization on an account (see **Remove Sync** below).
The little button with three dots (the overflow menu) that sits to the right of the sync status bar offers four additional options:
• Open Folder
• Choose What to Sync
• Pause Sync / Resume Sync
• Remove folder sync connection
**Open Folder** opens a file explorer window displaying the client-side folder that is being synced.
**Choose What to Sync** opens the folder sync tree view. Use this to sync all or only some of the folders in the folder tree.
**Pause Sync** pauses sync operations without making any changes to your account. It will continue to update file and folder lists, without downloading or updating files. To stop all sync activity use **Remove Sync**.
**Resume Sync** resumes sync operations.
**Remove Sync** removes the sync connection without removing the account. This stops all sync activity, including file and folder list updates. If you want to synchronize the folder tree again then click the **Add Folder Sync Connection** button, and re-select the folder tree that you want to sync.
---
**Note:** ownCloud does not preserve the mtime (modification time) of directories, though it does update the mtimes on files. See **Wrong folder date when syncing** for discussion of this.
### 3.1.2 Adding New Accounts
You may configure multiple ownCloud accounts in your desktop sync client. Simply click the **Account > Add New** button on any account tab to add a new account, and then follow the account creation wizard. The new account will appear as a new tab in the settings dialog, where you can adjust its settings at any time. Use **Account > Remove** to delete accounts.
### 3.2 Sharing From Your Desktop
The ownCloud desktop sync client integrates with your file manager: Finder on Mac OS X, Explorer on Windows, and Nautilus on Linux. (Linux users must install the *owncloud-client-nautilus* plugin.) You can create share links, and share with internal ownCloud users the same way as in your ownCloud Web interface.
Right-click your systray icon, hover over the account you want to use, and left-click “Open folder [folder name]” to quickly enter your local ownCloud folder. Right-click the file or folder you want to share to expose the share dialog, and click **Share with ownCloud**.
The share dialog has all the same options as your ownCloud Web interface.
Use **Share with ownCloud** to see who you have shared with, and to modify their permissions, or to delete the share.
3.3 Activity Window
The Activity window contains the log of your recent activities, organized over three tabs: **Server Activities**, which includes new shares and files downloaded and deleted, **Sync Protocol**, which displays local activities such as which local folders your files went into, and **Not Synced** shows errors such as files not synced.
3.4 General Window
The General window has configuration options such as **Launch on System Startup**, **Use Monochrome Icons**, and **Show Desktop Notifications**. This is where you will find the **Edit Ignored Files** button, to launch the ignored files editor, and **Ask confirmation before downloading folders larger than** [folder size].
3.5 Using the Network Window
The Network settings window enables you to define network proxy settings, and also to limit download and upload bandwidth.
3.6 Using the Ignored Files Editor
You might have some local files or directories that you do not want to backup and store on the server. To identify and exclude these files or directories, you can use the **Ignored Files Editor** (General tab.)
For your convenience, the editor is pre-populated with a default list of typical ignore patterns. These patterns are contained in a system file (typically `sync-exclude.lst`) located in the ownCloud Client application directory. You cannot modify these pre-populated patterns directly from the editor. However, if necessary, you can hover...
Chapter 3. Using the Synchronization Client
---
General Settings
- Launch on System Startup
- Show Desktop Notifications
- Use Monochrome Icons
Advanced
- Edit Ignored Files
- Ask confirmation before downloading folders larger than 500 MB
About
Version 2.1.1. For more information visit owncloud.com
By Klaas Freitag, Daniel Molkentin, Jan-Christoph Borchardt, Olivier Goffart, Markus Götz and others.
Copyright ownCloud, Inc.
Licensed under the GNU General Public License (GPL) Version 2.0
ownCloud and the ownCloud Logo are registered trademarks of ownCloud, Inc. in the United States, other countries, or both.
Built from Git revision eeeec89 on Feb 9 2016. 15:41:23 using Ot 4.8.6. OpenSSL 1.0.2d 9 Jul 2015
Updates
No updates available. Your installation is at the latest version.
3.6. Using the Ignored Files Editor
## Ignored Files Editor
### Global Ignore Settings
- [ ] Sync hidden files
### Files Ignored by Patterns
<table>
<thead>
<tr>
<th>Pattern</th>
<th>Allow Deletion</th>
</tr>
</thead>
<tbody>
<tr>
<td>.filepart</td>
<td></td>
</tr>
<tr>
<td>~</td>
<td></td>
</tr>
<tr>
<td>.part</td>
<td></td>
</tr>
<tr>
<td>.crdownload</td>
<td></td>
</tr>
<tr>
<td>.unison*</td>
<td></td>
</tr>
<tr>
<td><em>csync_timedif.ctmp</em></td>
<td></td>
</tr>
<tr>
<td>.csync_journal.db</td>
<td></td>
</tr>
<tr>
<td>.csync_journal.db.ctmp</td>
<td></td>
</tr>
</tbody>
</table>
Files or folders matching a pattern will not be synchronized.
Items where deletion is allowed will be deleted if they prevent a directory from being removed. This is useful for meta data.
---
**Chapter 3. Using the Synchronization Client**
over any pattern in the list to show the path and filename associated with that pattern, locate the file, and edit the `sync-exclude.lst` file.
**Note:** Modifying the global exclude definition file might render the client unusable or result in undesired behavior.
Each line in the editor contains an ignore pattern string. When creating custom patterns, in addition to being able to use normal characters to define an ignore pattern, you can use wildcards characters for matching values. As an example, you can use an asterisk (*) to identify an arbitrary number of characters or a question mark (?) to identify a single character.
Patterns that end with a slash character (/) are applied to only directory components of the path being checked.
**Note:** Custom entries are currently not validated for syntactical correctness by the editor, so you will not see any warnings for bad syntax. If your synchronization does not work as you expected, check your syntax.
Each pattern string in the list is preceded by a checkbox. When the check box contains a check mark, in addition to ignoring the file or directory component matched by the pattern, any matched files are also deemed “fleeting metadata” and removed by the client.
In addition to excluding files and directories that use patterns defined in this list:
- The ownCloud Client always excludes files containing characters that cannot be synchronized to other file systems.
- Files are removed that cause individual errors three times during a synchronization. However, the client provides the option of retrying a synchronization three additional times on files that produce errors.
For more detailed information see _Ignored Files_.
3.6. Using the Ignored Files Editor
4.1 Options
You have the option of starting your ownCloud desktop client with the `owncloud` command. The following options are supported:
```
owncloud -h or owncloud --help
```
Displays all command options.
The other options are:
- `--logwindow` Opens a window displaying log output.
- `--logfile <filename>` Write log output to the file specified. To write to stdout, specify - as the filename.
- `--logdir <name>` Writes each synchronization log output in a new file in the specified directory.
- `--logexpire <hours>` Removes logs older than the value specified (in hours). This command is used with `--logdir`.
- `--logflush` Clears (flushes) the log file after each write action.
- `--confdir <dirname>` Uses the specified configuration directory.
4.2 Configuration File
The ownCloud Client reads a configuration file. You can locate this configuration file as follows:
- **On Linux distributions:** `$HOME/.local/share/data/ownCloud/owncloud.cfg`
- **On Microsoft Windows systems:** `%LOCALAPPDATA%\ownCloud\owncloud.cfg`
- **On MAC OS X systems:** `$HOME/Library/Application Support/ownCloud`
The configuration file contains settings using the Microsoft Windows .ini file format. You can overwrite changes using the ownCloud configuration dialog.
**Note:** Use caution when making changes to the ownCloud Client configuration file. Incorrect settings can produce unintended results.
You can change the following configuration settings (must be under the `[ownCloud]` section)
- `remotePollInterval` (default: 30000) – Specifies the poll time for the remote repository in milliseconds.
- `maxLogLines` (default: 20000) – Specifies the maximum number of log lines displayed in the log window.
4.3 ownCloud Command Line Client
The ownCloud Client packages contain a command line client, owncloudcmd, that can be used to synchronize ownCloud files to client machines.
owncloudcmd performs a single sync run and then exits the synchronization process. In this manner, owncloudcmd processes the differences between client and server directories and propagates the files to bring both repositories to the same state. Contrary to the GUI-based client, owncloudcmd does not repeat synchronizations on its own. It also does not monitor for file system changes.
To invoke owncloudcmd, you must provide the local and the remote repository URL using the following command:
```
owncloudcmd [OPTIONS...] sourcedir owncloudurl
```
where sourcedir is the local directory and owncloudurl is the server URL.
Other command line switches supported by owncloudcmd include the following:
- `--user, -u [user]` Use user as the login name.
- `--password, -p [password]` Use password as the password.
- `--non-interactive` Do not prompt for questions.
- `--silent, --s` Inhibits verbose log output.
- `--trust` Trust any SSL certificate, including invalid ones.
- `--httpproxy http://[user@pass:]<server>:<port>` Uses server as HTTP proxy.
- `--nonshib` Uses Non Shibboleth WebDAV Authentication
- `--davpath [path]` Overrides the WebDAV Path with path
- `--exclude [file]` Exclude list file
- `--unsyncedfolders [file]` File containing the list of unsynced folders (selective sync)
- `--max-sync-retries [n]` Retries maximum n times (defaults to 3)
- `-h` Sync hidden files, do not ignore them
4.3.1 Credential Handling
owncloudcmd uses the credentials of the GUI synchronization client. If no client is configured, or if you choose to use a different user to synchronize, you can specify the user password setting with the usual URL pattern. For example:
```
$ owncloudcmd / https://carla:secret@server/owncloud/remote.php/webdav/
```
To synchronize the ownCloud directory Music to the local directory media/music, through a proxy listening on port 8080, and on a gateway machine using IP address 192.168.178.1, the command line would be:
```
$ owncloudcmd --httpproxy http://192.168.178.1:8080 \n $HOME/media/music \n https://server/owncloud/remote.php/webdav/Music
```
owncloudcmd will prompt for the user name and password, unless they have been specified on the command line or -n has been passed.
The Automatic Updater ensures that you always have the latest features and bugfixes for your ownCloud synchronization client.
The Automatic Updater updates only on Mac OS X and Windows computers; Linux users only need to use their normal package managers. However, on Linux systems the Updater will check for updates and notify you when a new version is available.
**Note:** Because of various technical issues, desktop sync clients older than 1.7 will not be allowed to connect and sync with the ownCloud 8.1+ server. It is highly recommended to keep your client updated.
### 5.1 Basic Workflow
The following sections describe how to use the Automatic Updater on different operating systems.
#### 5.1.1 Windows
The ownCloud client checks for updates and downloads them when available. You can view the update status under **Settings -> General -> Updates** in the ownCloud client.
If an update is available, and has been successfully downloaded, the ownCloud client starts a silent update prior to its next launch and then restarts itself. Should the silent update fail, the client offers a manual download.
**Note:** Administrative privileges are required to perform the update.
#### 5.1.2 Mac OS X
If a new update is available, the ownCloud client initializes a pop-up dialog to alert you of the update and requesting that you update to the latest version. Due to their use of the Sparkle frameworks, this is the default process for Mac OS X applications.
#### 5.1.3 Linux
Linux distributions provide their own update tools, so ownCloud clients that use the Linux operating system do not perform any updates on their own. The client will inform you (**Settings -> General -> Updates**) when an update is available.
5.2 Preventing Automatic Updates
In controlled environments, such as companies or universities, you might not want to enable the auto-update mechanism, as it interferes with controlled deployment tools and policies. To address this case, it is possible to disable the auto-updater entirely. The following sections describe how to disable the auto-update mechanism for different operating systems.
5.2.1 Preventing Automatic Updates in Windows Environments
Users may disable automatic updates by adding this line to the [General] section of their owncloud.cfg files:
```plaintext
skipUpdateCheck=true
```
Windows administrators have more options for preventing automatic updates in Windows environments by using one of two methods. The first method allows users to override the automatic update check mechanism, whereas the second method prevents any manual overrides.
To prevent automatic updates, but allow manual overrides:
1. Edit these Registry keys:
(a) (32-bit-Windows) \HKEY_LOCAL_MACHINE\Software\ownCloud\ownCloud
(b) (64-bit-Windows) \HKEY_LOCAL_MACHINE\Software\Wow6432Node\ownCloud\ownCloud
2. Add the key `skipUpdateCheck` (of type DWORD).
3. Specify a value of 1 to the machine.
To manually override this key, use the same value in `HKEY_CURRENT_USER`.
To prevent automatic updates and disallow manual overrides:
```plaintext
Note: This is the preferred method of controlling the updater behavior using Group Policies.
```
1. Edit this Registry key:
\HKEY_LOCAL_MACHINE\Software\Policies\ownCloud\ownCloud
2. Add the key `skipUpdateCheck` (of type DWORD).
3. Specify a value of 1 to the machine.
```plaintext
Note: Enterprise branded clients (see Building Branded ownCloud Clients) have different key names, which are set in ownBrander using the Application Vendor and Application Name fields. Your key names look like this:
```
```plaintext`
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
```
2. Locate and open the following file:
```
/Library/Preferences/com.owncloud.desktopclient.plist
```
3. Add a new root level item of type `bool`.
4. Name the item `skipUpdateCheck`.
5. Set the item to `true`.
Alternatively, you can copy the file `owncloud.app/Contents/Resources/deny_autoupdate_com.owncloud.desktopclient.plist` to `/Library/Preferences/com.owncloud.desktopclient.plist`.
### 5.2.3 Preventing Automatic Updates in Linux Environments
Because the Linux client does not provide automatic updating functionality, there is no need to remove the automatic-update check. However, if you want to disable it edit your desktop client configuration file, `$HOME/.local/share/data/ownCloud/owncloud.cfg`. Add this line to the `[General]` section:
```
skipUpdateCheck=true
```
This section explains how to build the ownCloud Client from source for all major platforms. You should read this section if you want to develop for the desktop client.
Note: Build instructions are subject to change as development proceeds. Please check the version for which you want to build.
These instructions are updated to work with version 2.1 of the ownCloud Client.
### 6.1 Getting Source Code
The *Generic Build Instructions* pull the latest code directly from GitHub, and work on Linux, Mac OS X, and Windows.
See the next section for instructions on getting source code from Linux packages.
### 6.2 Linux
You may wish to use source packages for your Linux distribution, as these give you the exact sources from which the binary packages are built. These are hosted on the ownCloud repository from OBS. Go to the Index of repositories to see all the Linux client repos.
1. **At the bottom of the page for each distribution** is a “Grab binary packages directly” section. These contain source RPMs for CentOS, RHEL, Fedora, SLES, and openSUSE.
To get the .deb source packages add the source repo for your Debian or Ubuntu version, like this example for Debian 8 (run as root):
```
echo 'deb-src http://download.opensuse.org/repositories/isv:/ownCloud:/desktop/Debian_8.0/ /' >> /etc/apt/sources.list.d/owncloud-client.list
```
2. **Install the dependencies using the following commands for your specific Linux distribution:**
- **Debian/Ubuntu:** `apt-get update; apt-get build-dep owncloud-client`
- **openSUSE/SLES:** `zypper ref; zypper si -d owncloud-client`
- **Fedora/CentOS/RHEL:** `yum install yum-utils; yum-builddep owncloud-client`
3. **Follow the *Generic Build Instructions*, starting with step 2.**
6.3 Mac OS X
In addition to needing XCode (along with the command line tools), developing in the Mac OS X environment requires extra dependencies. You can install these dependencies through MacPorts or Homebrew. These dependencies are required only on the build machine, because non-standard libs are deployed in the app bundle.
The tested and preferred way to develop in this environment is through the use of HomeBrew. The ownCloud team has its own repository containing non-standard recipes.
To set up your build environment for development using HomeBrew:
1. Add the ownCloud repository using the following command:
```shell
brew tap owncloud/owncloud
```
2. Install any missing dependencies:
```shell
brew install $(brew deps owncloud-client)
```
3. Add Qt from brew to the path:
```shell
export PATH=/usr/local/Cellar/qt5/5.x.y/bin/qmake
```
Where \(x.y\) is the current version of Qt 5 that brew has installed on your machine.
4. For compilation of the client, follow the **Generic Build Instructions**.
5. Install the Packages package creation tool.
6. In the build directory, run `admin/osx/create_mac.sh <build_dir> <install_dir>`. If you have a developer signing certificate, you can specify its Common Name as a third parameter (use quotes) to have the package signed automatically.
**Note:** Contrary to earlier versions, ownCloud 1.7 and later are packaged as a *pkg* installer. Do not call “make package” at any time when compiling for OS X, as this will build a disk image, and will not work correctly.
6.4 Windows Development Build
If you want to test some changes and deploy them locally, you can build natively on Windows using MinGW. If you want to generate an installer for deployment, please follow **Windows Installer Build (Cross-Compile)** instead.
1. Get the required dependencies:
• Make sure that you have CMake and Git.
• Download the Qt MinGW package. You will use the MinGW version bundled with it.
• Download an OpenSSL Windows Build (the non-“Light” version)
2. Get the QtKeychain sources as well as the latest versions of the ownCloud client from Git as follows:
```shell
git clone https://github.com/frankosterfeld/qtkeychain.git
git clone git://github.com/owncloud/client.git
```
3. Open the Qt MinGW shortcut console from the Start Menu
4. Make sure that OpenSSL’s bin directory as well as your qtkeychain source directories are in your PATH. This will allow CMake to find the library and headers, as well as allow the ownCloud client to find the DLLs at runtime:
```bash
set PATH=C:\<OpenSSL Install Dir>\bin;%PATH%
set PATH=C:\<qtkeychain Clone Dir>;%PATH%
```
5. Build qtkeychain directly in the source directory so that the DLL is built in the same directory as the headers to let CMake find them together through PATH:
```bash
cd <qtkeychain Clone Dir>
cmake -G "MinGW Makefiles" .
mingw32-make
cd ..
```
6. Create the build directory:
```bash
mkdir client-build
cd client-build
```
7. Build the client:
```bash
cmake -G "MinGW Makefiles" ../client
mingw32-make
```
**Note:** You can try using ninja to build in parallel using `cmake -G Ninja ../client` and `ninja` instead.
**Note:** Refer to the Generic Build Instructions section for additional options.
The ownCloud binary will appear in the bin directory.
### 6.5 Windows Installer Build (Cross-Compile)
Due to the large number of dependencies, building the client installer for Windows is currently only officially supported on openSUSE, by using the MinGW cross compiler. You can set up any currently supported version of openSUSE in a virtual machine if you do not have it installed already.
In order to make setup simple, you can use the provided Dockerfile to build your own image.
1. Assuming you are in the root of the ownCloud Client’s source tree, you can build an image from this Dockerfile like this:
```bash
cd admin/win32/docker
docker build . -t ownCloud-client-win32:<version>
```
Replace `<version>` by the version of the client you are building, e.g. 2.1 for the release of the client that this document describes. If you do not wish to use docker, you can run the commands in RUN manually in a shell, e.g. to create your own build environment in a virtual machine.
**Note:** Docker images are specific to releases. This one refers to 2.1. Newer releases may have different dependencies, and thus require a later version of the docker image! Always pick the docker image fitting your
2. From within the source tree Run the docker instance:
```bash
docker run ownCloud-client-win32:<version> -v "$PWD:/home/jenkins/client" \
admin/win32/docker/build.sh $(id -u)
```
It will run the build, create an NSIS based installer, as well as run tests. You will find the resulting binary in an newly created build-win32 subfolder.
If you do not wish to use docker, and ran the RUN commands above in a virtual machine, you can run the indented commands in the lower section of build.sh manually in your source tree.
4. Finally, you should sign the installer to avoid warnings upon installation. This requires a Microsoft Authenticode Certificate `osslsigncode` to sign the installer:
```bash
osslsigncode -pkcs12 $HOME/.codesign/packages.pfx -h sha256 \
-pass yourpass \
-n "ACME Client" \
-i "http://acme.com" \
-ts "http://timestamp.server/" \
-in ${unsigned_file} \
-out ${installer_file}
```
for -in, use the URL to the time stamping server provided by your CA along with the Authenticode certificate. Alternatively, you may use the official Microsoft `signtool` utility on Microsoft Windows.
If you’re familiar with docker, you can use the version of `osslsigncode` that is part of the docker image.
### 6.6 Generic Build Instructions
Compared to previous versions, building the desktop sync client has become easier. Unlike earlier versions, CSync, which is the sync engine library of the client, is now part of the client source repository and not a separate module.
To build the most up-to-date version of the client:
1. Clone the latest versions of the client from Git as follows:
```bash
git clone git://github.com/owncloud/client.git
git submodule init
git submodule update
```
2. Create the build directory:
```bash
mkdir client-build
cd client-build
```
3. Configure the client build:
```bash
cmake -DCMAKE_BUILD_TYPE="Debug" ..\client
```
**Note:** You must use absolute paths for the `include` and `library` directories.
Note: On Mac OS X, you need to specify `-DCMAKE_INSTALL_PREFIX=target`, where target is a private location, i.e. in parallel to your build dir by specifying `../install`.
4. Call `make`.
The owncloud binary will appear in the `bin` directory.
5. (Optional) Call `make install` to install the client to the `/usr/local/bin` directory.
The following are known cmake parameters:
- `QTKEYCHAIN_LIBRARY=/path/to/qtkeychain.dylib -DQTKEYCHAIN_INCLUDE_DIR=/path/to/qtkeychain/`:
Used for stored credentials. When compiling with Qt5, the library is called `qt5keychain.dylib`.
You need to compile QtKeychain with the same Qt version.
- `WITH_DOC=TRUE`: Creates doc and manpages through running `make`; also adds install statements, providing
the ability to install using `make install`.
- `CMAKE_PREFIX_PATH=/path/to/Qt5.2.0/5.2.0/yourarch/lib/cmake/`: Builds using Qt5.
- `BUILD_WITH_QT4=ON`: Builds using Qt4 (even if Qt5 is found).
- `CMAKE_INSTALL_PREFIX=path`: Set an install prefix. This is mandatory on Mac OS
ownCloud provides desktop sync clients to synchronize the contents of local directories from computers, tablets, and handheld devices to the ownCloud server.
Synchronization is accomplished using csync, a bidirectional file synchronizing tool that provides both a command line client as well as a library. A special module for csync was written to synchronize with the ownCloud built-in WebDAV server.
The ownCloud Client software is written in C++ using the Qt Framework. As a result, the ownCloud Client runs on Linux, Windows, and MacOS.
### 7.1 The Synchronization Process
The process of synchronization keeps files in two separate repositories the same. When synchronized:
- If a file is added to one repository it is copied to the other synchronized repository.
- When a file is changed in one repository, the change is propagated to any other synchronized repository.
- If a file is deleted in one repository, it is deleted in any other.
It is important to note that the ownCloud synchronization process does not use a typical client/server system where the server is always master. This is a major difference between the ownCloud synchronization process and other systems like a file backup, where only changes to files or folders and the addition of new files are propagated, but these files and folders are never deleted unless explicitly deleted in the backup.
During synchronization, the ownCloud Client checks both repositories for changes frequently. This process is referred to as a sync run. In between sync runs, the local repository is monitored by a file system monitoring process that starts a sync run immediately if something was edited, added, or removed.
### 7.2 Synchronization by Time versus ETag
Until the release of ownCloud 4.5 and ownCloud Client 1.1, the ownCloud synchronization process employed a single file property – the file modification time – to decide which file was newer and needed to be synchronized to the other repository.
The modification timestamp is part of the files metadata. It is available on every relevant filesystem and is the typical indicator for a file change. Modification timestamps do not require special action to create, and have a general meaning. One design goal of csync is to not require a special server component. This design goal is why csync was chosen as the backend component.
To compare the modification times of two files from different systems, csync must operate on the same base. Before ownCloud Client version 1.1.0, csync required both device repositories to run on the exact same time. This requirement was achieved through the use of enterprise standard NTP time synchronization on all machines.
Because this timing strategy is rather fragile without the use of NTP, ownCloud 4.5 introduced a unique number (for each file?) that changes whenever the file changes. Although this number is a unique value, it is not a hash of the file. Instead, it is a randomly chosen number, that is transmitted in the Etag field. Because the file number changes if the file changes, its use is guaranteed to determine if one of the files has changed and, thereby, launching a synchronization process.
**Note:** ownCloud Client release 1.1 and later requires file ID capabilities on the ownCloud server. Servers that run with release earlier than 4.5.0 do not support using the file ID functionality.
Before the 1.3.0 release of the Desktop Client, the synchronization process might create false conflict files if time deviates. Original and changed files conflict only in their timestamp, but not in their content. This behaviour was changed to employ a binary check if files differ.
Like files, directories also hold a unique ID that changes whenever one of the contained files or directories is modified. Because this is a recursive process, it significantly reduces the effort required for a synchronization cycle, because the client only analyzes directories with a modified ID.
The following table outlines the different synchronization methods used, depending on server/client combination:
<table>
<thead>
<tr>
<th>Server Version</th>
<th>Client Version</th>
<th>Sync Methods</th>
</tr>
</thead>
<tbody>
<tr>
<td>4.0.x or earlier</td>
<td>1.0.5 or earlier</td>
<td>Time Stamp</td>
</tr>
<tr>
<td>4.0.x or earlier</td>
<td>1.1 or later</td>
<td>n/a (incompatible)</td>
</tr>
<tr>
<td>4.5 or later</td>
<td>1.0.5 or earlier</td>
<td>Time Stamp</td>
</tr>
<tr>
<td>4.5 or later</td>
<td>1.1 or later</td>
<td>File ID, Time Stamp</td>
</tr>
</tbody>
</table>
We strongly recommend using ownCloud Server release 4.5 or later when using ownCloud Client 1.1 or later. Using an incompatible time stamp-based synchronization mechanism can lead to data loss in rare cases, especially when multiple clients are involved and one utilizes a non-synchronized NTP time.
### 7.3 Comparison and Conflict Cases
As mentioned above, during a sync run the client must first detect if one of the two repositories have changed files. On the local repository, the client traverses the file tree and compares the modification time of each file with an expected value stored in its database. If the value is not the same, the client determines that the file has been modified in the local repository.
**Note:** On the local side, the modification time is a good attribute to use for detecting changes, because the value does not depend on time shifts and such.
For the remote (that is, ownCloud server) repository, the client compares the ETag of each file with its expected value. Again, the expected ETag value is queried from the client database. If the ETag is the same, the file has not changed and no synchronization occurs.
In the event a file has changed on both the local and the remote repository since the last sync run, it can not easily be decided which version of the file is the one that should be used. However, changes to any side will not be lost. Instead, a conflict case is created. The client resolves this conflict by creating a conflict file of the older of the two files and saving the newer file under the original file name. Conflict files are always created on the client and never on the server. The conflict file uses the same name as the original file, but is appended with the timestamp of the conflict detection.
7.4 Ignored Files
The ownCloud Client supports the ability to exclude or ignore certain files from the synchronization process. Some system wide file patterns that are used to exclude or ignore files are included with the client by default and the ownCloud Client provides the ability to add custom patterns.
By default, the ownCloud Client ignores the following files:
- Files matched by one of the patterns defined in the Ignored Files Editor
- Files containing characters that do not work on certain file systems (`, `/`, `:`, `?`, `*`, ``, `>`, `<`, `|`).
- Files starting with `.csync_journal.db`, as these files are reserved for journalling.
If a pattern selected using a checkbox in the ignoredFilesEditor-label (or if a line in the exclude file starts with the character `[]` directly followed by the file pattern), files matching the pattern are considered fleeting meta data. These files are ignored and removed by the client if found in the synchronized folder. This is suitable for meta files created by some applications that have no sustainable meaning.
If a pattern ends with the forwardslash (`/`) character, only directories are matched. The pattern is only applied for directory components of filenames selected using the checkbox.
To match filenames against the exclude patterns, the unix standard C library function fnmatch is used. This process checks the filename against the specified pattern using standard shell wildcard pattern matching. For more information, please refer to The opengroup website.
The path that is checked is the relative path under the sync root directory.
Pattern and File Match Examples:
<table>
<thead>
<tr>
<th>Pattern</th>
<th>File Matches</th>
</tr>
</thead>
<tbody>
<tr>
<td>~$*</td>
<td>~$foo, ~$example.doc</td>
</tr>
<tr>
<td>fl?p</td>
<td>flip, flap</td>
</tr>
<tr>
<td>moo/</td>
<td>map/moo/, moo/</td>
</tr>
</tbody>
</table>
7.5 The Sync Journal
The client stores the ETag number in a per-directory database, called the journal. This database is a hidden file contained in the directory to be synchronized.
If the journal database is removed, the ownCloud Client CSync backend rebuilds the database by comparing the files and their modification times. This process ensures that both server and client are synchronized using the appropriate NTP time before restarting the client following a database removal.
Pressing F5 while in the Account Settings Dialog enables you to “reset” the journal. This function can be used to recreate the journal database.
Note: We recommend that you use this function only when advised to do so by ownCloud support staff.
7.6 Custom WebDAV Properties
In the communication between client and server a couple of custom WebDAV properties were introduced. They are either needed for sync functionality or help have a positive effect on synchronization performance.
This chapter describes additional xml elements which the server returns in response to a successful PROPFIND request on a file or directory. The elements are returned in the namespace oc.
### 7.7 Server Side Permissions
The XML element `<oc:permissions>` represents the permission- and sharing state of the item. It is a list of characters, and each of the chars has a meaning as outlined in the table below:
<table>
<thead>
<tr>
<th>Code</th>
<th>Resource</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>S</td>
<td>File or Folder</td>
<td>is shared</td>
</tr>
<tr>
<td>R</td>
<td>File or Folder</td>
<td>can share (includes reshare)</td>
</tr>
<tr>
<td>M</td>
<td>File or Folder</td>
<td>is mounted (like on DropBox, Samba, etc.)</td>
</tr>
<tr>
<td>W</td>
<td>File</td>
<td>can write file</td>
</tr>
<tr>
<td>C</td>
<td>Folder</td>
<td>can create file in folder</td>
</tr>
<tr>
<td>K</td>
<td>Folder</td>
<td>can create folder (mkdir)</td>
</tr>
<tr>
<td>D</td>
<td>File or Folder</td>
<td>can delete file or folder</td>
</tr>
<tr>
<td>N</td>
<td>File or Folder</td>
<td>can rename file or folder</td>
</tr>
<tr>
<td>V</td>
<td>File or Folder</td>
<td>can move file or folder</td>
</tr>
</tbody>
</table>
Example:
```
<oc:permissions>RDNVCK</oc:permissions>
```
### 7.8 File- or Directory Size
The XML element `<oc:size>` represents the file- or directory size in bytes. For directories, the size of the whole file tree underneath the directory is accumulated.
Example:
```
<oc:size>2429176697</oc:size>
```
### 7.9 FileID
The XML element `<oc:id>` represents the so called file ID. It is a non volatile string id that stays constant as long as the file exists. It is not changed if the file changes or is renamed or moved.
Example:
```
<oc:id>00000020oc5cfy6qqizm</oc:id>
```
APPENDIX C: TROUBLESHOOTING
The following two general issues can result in failed synchronization:
- The server setup is incorrect.
- The client contains a bug.
When reporting bugs, it is helpful if you first determine what part of the system is causing the issue.
8.1 Identifying Basic Functionality Problems
Performing a general ownCloud Server test The first step in troubleshooting synchronization issues is to verify that you can log on to the ownCloud web application. To verify connectivity to the ownCloud server try logging in via your Web browser.
If you are not prompted for your username and password, or if a red warning box appears on the page, your server setup requires modification. Please verify that your server installation is working correctly.
Ensure the WebDAV API is working If all desktop clients fail to connect to the ownCloud Server, but access using the Web interface functions properly, the problem is often a misconfiguration of the WebDAV API.
The ownCloud Client uses the built-in WebDAV access of the server content. Verify that you can log on to ownClouds WebDAV server. To verify connectivity with the ownCloud WebDAV server:
- Open a browser window and enter the address to the ownCloud WebDAV server.
For example, if your ownCloud instance is installed at http://yourserver.com/owncloud, your WebDAV server address is http://yourserver.com/owncloud/remote.php/webdav.
If you are prompted for your username and password but, after providing the correct credentials, authentication fails, please ensure that your authentication backend is configured properly.
Use a WebDAV command line tool to test A more sophisticated test method for troubleshooting synchronization issues is to use a WebDAV command line client and log into the ownCloud WebDAV server. One such command line client – called cadaver – is available for Linux distributions. You can use this application to further verify that the WebDAV server is running properly using PROPFIND calls.
As an example, after installing the cadaver app, you can issue the propget command to obtain various properties pertaining to the current directory and also verify WebDAV server connection.
8.2 Isolating other issues
Other issues can affect synchronization of your ownCloud files:
- If you find that the results of the synchronizations are unreliable, please ensure that the folder to which you are synchronizing is not shared with other synchronization applications.
- Synchronizing the same directory with ownCloud and other synchronization software such as Unison, rsync, Microsoft Windows Offline Folders, or other cloud services such as DropBox or Microsoft SkyDrive is not supported and should not be attempted. In the worst case, it is possible that synchronizing folders or files using ownCloud and other synchronization software or services can result in data loss.
- If you find that only specific files are not synchronized, the synchronization protocol might be having an effect. Some files are automatically ignored because they are system files, other files might be ignored because their filename contains characters that are not supported on certain file systems. For more information about ignored files, see _ignored-files-label.
- If you are operating your own server, and use the local storage backend (the default), make sure that ownCloud has exclusive access to the directory.
Note: The data directory on the server is exclusive to ownCloud and must not be modified manually.
- If you are using a different file backend on the server, you can try to exclude a bug in the backend by reverting to the built-in backend.
- If you are experiencing slow upload/download speed or similar performance issues be aware that those could be caused by on-access virus scanning solutions, either on the server (like the files_antivirus app) or the client.
8.3 Log Files
Effectively debugging software requires as much relevant information as can be obtained. To assist the ownCloud support personnel, please try to provide as many relevant logs as possible. Log output can help with tracking down problems and, if you report a bug, log output can help to resolve an issue more quickly.
8.3.1 Obtaining the Client Log File
To obtain the client log file:
1. Open the ownCloud Desktop Client.
2. Press F12 on your keyboard.
The Log Output window opens.
3. Click the ‘Save’ button.
The Save Log File window opens.
4. Migrate to a location on your system where you want to save your log file.
5. Name the log file and click the ‘Save’ button.
The log file is saved in the location specified.
Alternatively, you can launch the ownCloud Log Output window using the `--logwindow` command. After issuing this command, the Log Output window opens to show the current log. You can then follow the same procedures mentioned above to save the log to a file.
8.3. Log Files
8.3.2 Saving Files Directly
The ownCloud client enables you to save log files directly to a predefined file or directory. This is a useful option for troubleshooting sporadic issues as it enables you to log large amounts of data and bypasses the limited buffer settings associated with the log window.
To save log files to a file or a directory:
1. To save to a file, start the client using the `--logfile <file>` command, where `<file>` is the filename to which you want to save the file.
2. To save to a directory, start the client using the `--logdir <dir>` command, where `<dir>` is an existing directory.
When using the `--logdir` command, each sync run creates a new file. To limit the amount of data that accumulates over time, you can specify the `--logexpire <hours>` command. When combined with the `--logdir` command, the client automatically erases saved log data in the directory that is older than the specified number of hours.
As an example, to define a test where you keep log data for two days, you can issue the following command:
```
owncloud --logdir /tmp/owncloud_logs --logexpire 48
```
8.3.3 ownCloud server Log File
The ownCloud server also maintains an ownCloud specific log file. This log file must be enabled through the ownCloud Administration page. On that page, you can adjust the log level. We recommend that when setting the log file level that you set it to a verbose level like Debug or Info.
You can view the server log file using the web interface or you can open it directly from the file system in the ownCloud server data directory.
Todo
Need more information on this. How is the log file accessed? Need to explore procedural steps in access and in saving this file ... similar to how the log file is managed for the client. Perhaps it is detailed in the Admin Guide and a link should be provided from here. I will look into that when I begin heavily editing the Admin Guide.
8.3.4 Webserver Log Files
It can be helpful to view your webservers error log file to isolate any ownCloud-related problems. For Apache on Linux, the error logs are typically located in the `/var/log/apache2` directory. Some helpful files include the following:
• error_log – Maintains errors associated with PHP code.
• access_log – Typically records all requests handled by the server; very useful as a debugging tool because the log line contains information specific to each request and its result.
You can find more information about Apache logging at http://httpd.apache.org/docs/current/logs.html.
8.4 Core Dumps
On Mac OS X and Linux systems, and in the unlikely event the client software crashes, the client is able to write a core dump file. Obtaining a core dump file can assist ownCloud Customer Support tremendously in the debugging process.
To enable the writing of core dump files, you must define the OWNCLoud_CORE_DUMP environment variable on the system.
For example:
` OWNCLOUD_CORE_DUMP=1 owncloud `
This command starts the client with core dumping enabled and saves the files in the current working directory.
**Note:** Core dump files can be fairly large. Before enabling core dumps on your system, ensure that you have enough disk space to accommodate these files. Also, due to their size, we strongly recommend that you properly compress any core dump files prior to sending them to ownCloud Customer Support.
FAQ
Issue:
Some files are continuously uploaded to the server, even when they are not modified.
Resolution:
It is possible that another program is changing the modification date of the file.
If the file is uses the .eml extension, Windows automatically and continually changes all files, unless you remove \\HKEY_LOCAL_MACHINE\SOFTWARE\Microsoft\Windows\CurrentVersion\PropertySystem\PropertyHandler from the windows registry.
mtime
modification time
file modification time File property used to determine whether the servers’ or the clients’ file is more recent. Standard procedure in oCC 1.0.5 and earlier, used by oCC 1.1 and later only when no sync database exists and files already exist in the client directory.
ownCloud Server The server counterpart of ownCloud Client as provided by the ownCloud community.
ownCloud Sync Client
ownCloud Client Name of the official ownCloud syncing client for desktop, which runs on Windows, Mac OS X and Linux. It uses the CSync sync engine for synchronization with the ownCloud server.
unique id
Etag ID assigned to every file starting with ownCloud server 4.5 and submitted via the HTTP Etag. Used to check if files on client and server have changed.
INDEX
A
account settings, 9
Advanced Usage, 19
architecture, 31
B
bandwith, 12
C
command line, 19
command line switches, 19
compatibility table, 32
config file, 19
E
ETag, 43
etag, 31
exclude files, 12
F
file modification time, 43
file times, 31
I
ignored files, 12
L
limiting, 12
M
modification time, 43
mtime, 43
N
navigating, 7
O
options, 19
ownCloud Client, 43
ownCloud Server, 43
ownCloud Sync Client, 43
owncloudcmd, 20
P
parameters, 19
password, 9
pattern, 12
proxy settings, 12
S
Server URL, 9
SOCKS, 12
T
throttling, 12
time stamps, 31
U
unique id, 31, 43
usage, 7
user, 9
|
{"Source-Url": "https://doc.owncloud.org/desktop/2.1/ownCloudClientManual.pdf", "len_cl100k_base": 13466, "olmocr-version": "0.1.50", "pdf-total-pages": 49, "total-fallback-pages": 0, "total-input-tokens": 80724, "total-output-tokens": 16036, "length": "2e13", "weborganizer": {"__label__adult": 0.0003337860107421875, "__label__art_design": 0.0005054473876953125, "__label__crime_law": 0.0001883506774902344, "__label__education_jobs": 0.001262664794921875, "__label__entertainment": 0.00013649463653564453, "__label__fashion_beauty": 0.00011992454528808594, "__label__finance_business": 0.0004718303680419922, "__label__food_dining": 0.00016105175018310547, "__label__games": 0.0014925003051757812, "__label__hardware": 0.0011510848999023438, "__label__health": 0.00014901161193847656, "__label__history": 0.00024700164794921875, "__label__home_hobbies": 0.00015234947204589844, "__label__industrial": 0.00016486644744873047, "__label__literature": 0.0003447532653808594, "__label__politics": 0.00012993812561035156, "__label__religion": 0.00031828880310058594, "__label__science_tech": 0.00684356689453125, "__label__social_life": 0.00015091896057128906, "__label__software": 0.1578369140625, "__label__software_dev": 0.8271484375, "__label__sports_fitness": 0.0001550912857055664, "__label__transportation": 0.00016486644744873047, "__label__travel": 0.00020325183868408203}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 55984, 0.01233]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 55984, 0.17461]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 55984, 0.84985]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 0, null], [0, 2107, false], [2107, 2107, null], [2107, 3411, null], [3411, 3411, null], [3411, 4706, null], [4706, 4872, null], [4872, 5301, null], [5301, 5347, null], [5347, 6551, null], [6551, 6595, null], [6595, 7202, null], [7202, 9497, null], [9497, 9962, null], [9962, 11405, null], [11405, 11405, null], [11405, 12205, null], [12205, 12241, null], [12241, 13054, null], [13054, 14791, null], [14791, 14791, null], [14791, 16502, null], [16502, 18902, null], [18902, 20632, null], [20632, 25004, null], [25004, 25793, null], [25793, 25793, null], [25793, 27548, null], [27548, 29899, null], [29899, 32040, null], [32040, 34011, null], [34011, 35038, null], [35038, 35038, null], [35038, 37398, null], [37398, 41223, null], [41223, 43987, null], [43987, 45789, null], [45789, 47981, null], [47981, 50166, null], [50166, 50682, null], [50682, 52876, null], [52876, 54057, null], [54057, 54057, null], [54057, 54614, null], [54614, 54614, null], [54614, 55389, null], [55389, 55389, null], [55389, 55984, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 0, null], [0, 2107, true], [2107, 2107, null], [2107, 3411, null], [3411, 3411, null], [3411, 4706, null], [4706, 4872, null], [4872, 5301, null], [5301, 5347, null], [5347, 6551, null], [6551, 6595, null], [6595, 7202, null], [7202, 9497, null], [9497, 9962, null], [9962, 11405, null], [11405, 11405, null], [11405, 12205, null], [12205, 12241, null], [12241, 13054, null], [13054, 14791, null], [14791, 14791, null], [14791, 16502, null], [16502, 18902, null], [18902, 20632, null], [20632, 25004, null], [25004, 25793, null], [25793, 25793, null], [25793, 27548, null], [27548, 29899, null], [29899, 32040, null], [32040, 34011, null], [34011, 35038, null], [35038, 35038, null], [35038, 37398, null], [37398, 41223, null], [41223, 43987, null], [43987, 45789, null], [45789, 47981, null], [47981, 50166, null], [50166, 50682, null], [50682, 52876, null], [52876, 54057, null], [54057, 54057, null], [54057, 54614, null], [54614, 54614, null], [54614, 55389, null], [55389, 55389, null], [55389, 55984, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 55984, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 55984, null]], "pdf_page_numbers": [[0, 0, 1], [0, 0, 2], [0, 2107, 3], [2107, 2107, 4], [2107, 3411, 5], [3411, 3411, 6], [3411, 4706, 7], [4706, 4872, 8], [4872, 5301, 9], [5301, 5347, 10], [5347, 6551, 11], [6551, 6595, 12], [6595, 7202, 13], [7202, 9497, 14], [9497, 9962, 15], [9962, 11405, 16], [11405, 11405, 17], [11405, 12205, 18], [12205, 12241, 19], [12241, 13054, 20], [13054, 14791, 21], [14791, 14791, 22], [14791, 16502, 23], [16502, 18902, 24], [18902, 20632, 25], [20632, 25004, 26], [25004, 25793, 27], [25793, 25793, 28], [25793, 27548, 29], [27548, 29899, 30], [29899, 32040, 31], [32040, 34011, 32], [34011, 35038, 33], [35038, 35038, 34], [35038, 37398, 35], [37398, 41223, 36], [41223, 43987, 37], [43987, 45789, 38], [45789, 47981, 39], [47981, 50166, 40], [50166, 50682, 41], [50682, 52876, 42], [52876, 54057, 43], [54057, 54057, 44], [54057, 54614, 45], [54614, 54614, 46], [54614, 55389, 47], [55389, 55389, 48], [55389, 55984, 49]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 55984, 0.0256]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
f5b415215c74b0e3e1635a9cf9ad6cc25bcebc70
|
WRENCH: A Framework for Simulating Workflow Management Systems
Henri Casanova, Suraj Pandey, James Oeth, Ryan Tanaka, Frédéric Suter,
Rafael Ferreira da Silva
To cite this version:
HAL Id: hal-01948162
https://inria.hal.science/hal-01948162
Submitted on 7 Dec 2018
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
WRENCH: A Framework for Simulating Workflow Management Systems
Henri Casanova*, Suraj Pandey*, James Oeth§, Ryan Tanaka*, Frédéric Suter†, Rafael Ferreira da Silva§
*Information and Computer Sciences, University of Hawaii, Honolulu, HI, USA
§Information Sciences Institute, University of Southern California, Marina Del Rey, CA, USA
†IN2P3 Computing Center, CNRS, Villeurbanne, France
{henric,surajp.ryan}@hawaii.edu, {rafsilva,oeth}@isi.edu, frederic.suter@cc.in2p3.fr
Abstract—Scientific workflows are used routinely in numerous scientific domains, and Workflow Management Systems (WMSs) have been developed to orchestrate and optimize workflow executions on distributed platforms. WMSs are complex software systems that interact with complex software infrastructures. Most WMS research and development activities rely on empirical experiments conducted with full-fledged software stacks on actual hardware platforms. Such experiments, however, are limited to hardware and software infrastructures at hand and can be labor- and/or time-intensive. As a result, relying solely on real-world experiments impedes WMS research and development. An alternative is to conduct experiments in simulation.
In this work we present WRENCH, a WMS simulation framework, whose objectives are (i) accurate and scalable simulations; and (ii) easy simulation software development. WRENCH achieves its first objective by building on the SimGrid framework. While SimGrid is recognized for the accuracy and scalability of its simulation models, it only provides low-level simulation abstractions and thus large software development efforts are required when implementing simulators of complex systems. WRENCH thus achieves its second objective by providing high-level and directly re-usable simulation abstractions on top of SimGrid. After describing and giving rationales for WRENCH’s software architecture and APIs, we present a case study in which we apply WRENCH to simulate the Pegasus production WMS. We report on ease of implementation, simulation accuracy, and simulation scalability so as to determine to which extent WRENCH achieves its two above objectives. We also draw both qualitative and quantitative comparisons with a previously proposed workflow simulator.
Index Terms—Scientific Workflows, Workflow Management Systems, Simulation, Distributed Computing
I. INTRODUCTION
Scientific workflows have become mainstream in support of research and development activities in numerous scientific domains [1]. Consequently, several Workflow Management Systems (WMSs) have been developed [2]–[7] that allow scientists to execute workflows on distributed platforms that can accommodate executions at various scales. WMSs handle the logistics of workflow executions and make decisions regarding resource selection, data management, and computation scheduling, the goal being to optimize some performance metric (e.g., latency [8], [9], throughput [10], [11], jitter [12], reliability [13]–[15], power consumption [16], [17]). WMSs are complex software systems that interact with complex software infrastructures and can thus employ a wide range of designs and algorithms.
In spite of active WMS development and use in production, which has entailed solving engineering challenges, fundamental questions remain unanswered in terms of system designs and algorithms. Although there are theoretical underpinnings for most of these questions, theoretical results often make assumptions that do not hold with production hardware and software infrastructures. Further, the specifics of the design of a WMS can impose particular constraints on what solutions can be implemented effectively, and these constraints are typically not considered in available theoretical results. Consequently, current research that aims at improving and evolving the state of the art, although sometimes informed by theory, is mostly done via “real-world” experiments: designs and algorithms are implemented, evaluated, and selected based on experiments conducted for a particular WMS implementation with particular workflow configurations on particular platforms. As a corollary, from the WMS user’s perspective, quantifying accurately how a WMS would perform for a particular workflow configuration on a particular platform entails actually executing that workflow on that platform.
Unfortunately, real-world experiments have limited scope, which impedes WMS research and development. This is because they are confined to application and platform configurations available at hand, and thus cover only a small subset of the relevant scenarios that may be encountered in practice. Furthermore, exclusively relying on real-world experiments makes it difficult or even impossible to investigate hypothetical scenarios (e.g., “What if the network had a different topology?”, “What if there were 10 times more compute nodes but they had half as many cores?”). Real-world experiments, especially when large-scale, are often not fully reproducible due to shared networks and compute resources, and due to transient or idiosyncratic behaviors (maintenance schedules, software upgrades, and particular software configurations). Running real-world experiments is also time-consuming, thus possibly making it difficult to obtain statistically significant numbers of experimental results. Real-world experiments are driven by WMS implementations that often impose constraints on workflow executions. Furthermore, WMSs are typically not monolithic but instead reuse CyberInfrastructure (CI) components that impose their own overheads and constraints on workflow execution. Exploring what lies beyond these constraints via real-world executions,
e.g., for research and development purposes, typically entails unacceptable software (re-)engineering costs. Finally, running real-world experiments can also be labor-intensive. This is due to the need to install and execute many full-featured software stacks, including actual scientific workflow implementations, which is often not deemed worthwhile for “just testing out” ideas.
An alternative to conducting WMS research via real-world experiments is to use simulation, i.e., implement a software artifact that models the functional and performance behaviors of software and hardware stacks of interest. Simulation is used in many computer science domains and can address the limitations of real-world experiments outlined above. Several simulation frameworks have been developed that target the parallel and distributed computing domain [18]–[34]. Some simulation frameworks have also been developed specifically for the scientific workflow domain [11], [35]–[40].
We claim that advances in simulation capabilities in the field have made it possible to simulate WMSs that execute large workflows on large-scale platforms accessible via diverse CI services in a way that is accurate (via validated simulation models), scalable (fast execution and low memory footprint), and expressive (ability to describe arbitrary platforms, complex WMSs, and complex software infrastructure). In this work, we build on the existing open-source SimGrid simulation framework [33], [41], which has been one of the drivers of the above advances and whose simulation models have been extensively validated [42]–[46], to develop a WMS simulation framework called WRENCH [47]. More specifically, this work makes the following contributions:
1) We justify the need for WRENCH and explain how it improves on the state of the art.
2) We describe the high-level simulation abstractions provided by WRENCH that (i) make it straightforward to implement full-fledged simulated versions of complex WMS systems; and (ii) make it possible to instantiate simulation scenarios with only few lines of code.
3) Via a case study with the Pegasus [2] production WMS, we evaluate the ease-of-use, accuracy, and scalability of WRENCH, and compare it with a previously proposed simulator, WorkflowSim [35].
This paper is organized as follows. Section II discusses related work. Section III outlines the design of WRENCH and describes how its APIs are used to implement simulators. Section IV presents our case study. Finally, Section V concludes with a brief summary of results and a discussion of future research directions.
II. RELATED WORK
Many simulation frameworks have been developed for parallel and distributed computing research and development. They span domains such as HPC [18]–[21], Grid [22]–[24], Cloud [25]–[27], Peer-to-peer [28], [29], or Volunteer Computing [30]–[32]. Some frameworks have striven to be applicable across some or all of the above domains [33], [34]. Two conflicting concerns are accuracy (the ability to capture the behavior of a real-world system with as little bias as possible) and scalability (the ability to simulate large systems with as few CPU cycles and bytes of RAM as possible). The aforementioned simulation frameworks achieve different compromises between these two concerns by using various simulation models. At one extreme are discrete event models that simulate the “microscopic” behavior of hardware/software systems (e.g., by relying on packet-level network simulation for communication [48], on cycle-accurate CPU simulation [49] or emulation for computation). In this case, the scalability challenge can be handled by using Parallel Discrete Event Simulation [50], i.e., the simulation itself is a parallel application that requires a parallel platform whose scale is at least commensurate to that of the simulated platform. At the other extreme are analytical models that capture “macroscopic” behaviors (e.g., transfer times as data sizes divided by bottleneck bandwidths, compute times as numbers of operations divided by compute speeds). While these models are typically more scalable, they must be developed with care so that they are accurate. In previous work, it has been shown that several available simulation frameworks use macroscopic models that can exhibit high inaccuracy [43].
A number of simulators have been developed that target scientific workflows. Some of them are stand-alone simulators [11], [35]–[37]. Others are integrated with a particular WMS to promote more faithful simulation and code reuse [38], [39] or to execute simulations at runtime to guide on-line scheduling decisions made by the WMS [40].
The authors in [39] conduct a critical analysis of the state-of-the-art of workflow simulators. They observe that many of these simulators do not capture the details of underlying infrastructures and/or use naive simulation models. This is the case with custom simulators such as that in [36], [37], [40]. But it is also the case with workflow simulators built on top of generic simulation frameworks that provide convenient user-level abstractions but fail to model the details of the underlying infrastructure, e.g., the simulators in [11], [35], [38], which build on the CloudSim [25] or GroudSim [24] frameworks. These frameworks have been shown to lack in their network modeling capabilities [45]. As a result, some authors readily recognize that their simulators are likely only valid when network effects play a small role in workflow executions (i.e., when workflows are not data-intensive).
To overcome the above limitations, in [39] the authors have improved the network model in GroudSim and also use a separate simulator, DISSECT-CF [27], for simulating cloud infrastructures accurately. Both [39] and [27] acknowledge that the popular SimGrid [33], [34] simulation framework offers compelling capabilities, both in terms of scalability and simulation accuracy. But one of their reasons for not considering SimGrid is that, because it is low-level, using it to implement a simulator of a complex system, such as a WMS and the CI services it uses, would be too labor-intensive. In this work, we address this issue by developing a simulation framework that provides convenient, reusable, high-level abstractions but that builds on SimGrid so as to
benefit from its scalable and accurate simulation models. Furthermore, unlike [38], [39], we do not focus on integration with any specific WMS. The argument in [39] is that stand-alone simulators, such as that in [35], are disconnected from real-world WMSs because they abstract away much of the complexity of these systems. Instead, our proposed framework does capture low-level system details (and simulates them well thanks to SimGrid), but provides high-level enough abstractions to implement faithful simulations of complex WMSs with minimum effort, which we demonstrate via a case study with the Pegasus WMS [2].
Also related to this work is previous research that has not focused on providing simulators or simulation frameworks per se, but instead on WMS simulation methodology. In particular, several authors have investigated methods for injecting realistic stochastic noise in simulated WMS executions [35], [51]. These techniques can be adopted by most of the aforementioned frameworks, including the one proposed in this work.
III. WRENCH
A. Objective and Intended Users
WRENCH’s objective is to make it possible to study WMSs in simulation in a way that is accurate (faithful modeling of real-world executions), scalable (low computation and memory footprints on a single computer), and expressive (ability to simulate arbitrary WMS, workflow, and platform scenarios with minimal software engineering effort). WRENCH is not a simulator but a simulation framework that is distributed as a C++ library. It provides high-level reusable abstractions for developing simulated WMS implementations and simulators for the execution of these implementations. There are two categories of WRENCH users:
1. Users who implement simulated WMSs – These users are engaged in WMS research and development activities and need an “in simulation” version of their current or intended WMS. Their goals typically include evaluating how their WMS behaves over hypothetical experimental scenarios and comparing competing algorithm and system design options. For these users, WRENCH provides the WRENCH Developer API (described in Section III-D) that eases WMS development by removing the typical difficulties involved when developing, either in real-world or in simulation mode, a system comprised of distributed components that interact both synchronously and asynchronously. To this end, WRENCH makes it possible to implement a WMS as a single thread of control that interacts with simulated CI services via high-level APIs and must react to a small set of asynchronous events.
2. Users who execute simulated WMSs – These users simulate how given WMSs behave for particular workflows on particular platforms. Their goals include comparing different WMSs, determining how a given WMS would behave for various workflow configurations, comparing different platform and resource provisioning options, determining performance bottlenecks, engaging in pedagogic activities centered on distributed computing and workflow issues, etc. These users can develop simulators via the WRENCH User API (described in Section III-E), which makes it possible to build a full-fledged simulator with only a few lines of code.
Users in the first category above often also belong to the second category. That is, after implementing a simulated WMS these users typically instantiate simulators for several experimental scenarios to evaluate their WMS.
B. Software Architecture Overview
Figure 1 depicts WRENCH’s software architecture. At the bottom layer is the Simulation Core, which simulates low-level software and hardware stacks using the simulation abstractions and models provided by SimGrid (see Section II-C). The next layer implements simulated CI services that are commonly found in current distributed platforms and used by production WMSs. At the time of this writing, WRENCH provides services in 4 categories: compute services that provide access to compute resources to execute workflow tasks; storage services that provide access to storage resources for storing workflow data; network monitoring services that can be queried to determine network distances; and data registry services that can be used to track the location of (replicas of) workflow data. Each category includes multiple service implementations, so as to capture specifics of currently available CI services used in production. For instance, in its current version WRENCH provides a “batch-scheduled cluster” compute service, a “cloud” compute service, and a “bare-metal” compute service. The above layer in the software architecture consists of simulated...
WMS, that interact with CI services using the WRENCH Developer API (see Section III-D). These WMS implementations, which can simulate production WMSs or WMS research prototypes, are not included as part of the WRENCH distribution, but implemented as stand-alone projects. One such project is the simulated Pegasus implementation for our case study in Section IV. Finally, the top layer consists of simulators that configure and instantiate particular CI services and particular WMSs on a given simulated hardware platform, that launch the simulation, and that analyze the simulation outcome. These simulators use the WRENCH User API (see Section III-E). Here again, these simulators are not part of WRENCH, but implemented as stand-alone projects.
C. Simulation Core
WRENCH’s simulation core is implemented using SimGrid’s S4U API, which provides all necessary abstractions and models to simulate computation, I/O, and communication activities on arbitrary hardware platform configurations. These platform configurations are defined by XML files that specify network topologies and endpoints, compute resources, and storage resources [52].
At its most fundamental level, SimGrid provides a Concurrent Sequential Processes (CSP) model: a simulation consists of sequential threads of control that consume hardware resources. These threads of control can implement arbitrary code, exchange messages via simulated network, can perform computation on simulated (multicore) hosts, and can perform I/O on simulated storage devices. In addition, SimGrid provides a virtual machine abstraction that includes a migration feature. Therefore, SimGrid provides all the base abstractions necessary to implement the classes of distributed systems that are relevant to scientific workflow executions. However, these abstractions are low-level and a common criticism of SimGrid is that implementing a simulation of a complex system requires a large software engineering effort. A WMS executing a workflow using several CI services is a complex system, and WRENCH builds on top of SimGrid to provide high-level abstractions so that implementing this complex system is not labor-intensive.
We have selected SimGrid for WRENCH for the following reasons. SimGrid has been used successfully in many distributed computing domains (cluster, peer-to-peer, grid, cloud, volunteer computing, etc.), and thus can be used to simulate WMSs that execute over a wide range of platforms. SimGrid is open source and freely available, has been stable for many years, is actively developed, has a sizable user community, and has provided simulation results for over 350 research publications since its inception. SimGrid has also been the object of many invalidation and validation studies [42]–[46], and its simulation models have been shown to provide compelling advantages over other simulation frameworks in terms of both accuracy and scalability [33]. Finally, most SimGrid simulations can be executed in minutes on a standard laptop computer, making it possible to perform large numbers of simulations quickly with minimal compute resource expenses.
To the best of our knowledge, among comparable available simulation frameworks (as reviewed in Section II), SimGrid is the only one to offer all the above desirable characteristics.
D. WRENCH Developer API
With the Developer API, a WMS is implemented as a single thread of control that executes according to the pseudo-code blueprint shown in Algorithm 1. Given a workflow to execute, a WMS first gathers information about all the CI services it can use to execute the workflow (lines 2-3). Examples of such information include the number of compute nodes provided by a compute service, the number of cores per node and the speed of these cores, the amount of storage space available in a storage service, the list of hosts monitored by a network monitoring service, etc. Then, the WMS iterates until the workflow execution is complete or has failed (line 4). At each iteration it gathers dynamic information about available resources and services if needed (line 5). Example of such information include currently available capacities at compute or storage services, current network distances between pairs of hosts, etc. Then, if desired, the WMS can submit pilot jobs [53] to compute services that support them, if any (line 6). Based on resource information and on the current state of the workflow, the WMS can then make whatever scheduling decisions it sees fit (line 7). It then enacts these decisions by interacting with appropriate services. For instance, it could decide to submit a “job” to a compute service to execute a ready task on some number of cores at some compute service and copy all produced files to some storage service, or it could decide to just copy a file between storage services and then update a data location service to keep track of the location of this new file replica. It is the responsibility of the developer to implement all decision-making algorithms employed by the WMS. At the end of the iteration, the WMS simply waits for a workflow execution event to which it can react if need be. Most common events are job completions/failures and data transfer completions/failures.
The WRENCH Developer API provides a rich set of methods to process analyze the workflow and to interact with CI services to execute the workflow. These methods were designed based on current and envisioned capabilities of current state-of-the-art WMSs. We refer the reader to the WRENCH Web site [47] for more information on how to use this API.
Algorithm 1 Blueprint for a WMS execution
1: procedure MAIN(workflow)
2: Obtain list of available services
3: Gather static information about the services
4: while workflow execution has not completed/failed do
5: Gather dynamic service/resource information
6: Submit pilot jobs if needed
7: Make data/computation scheduling decisions
8: Interact with services to enact decisions
9: Wait for and react to the next event
10: end while
11: return
12: end procedure
and for the full API documentation. The key objective of this API is to make it straightforward to implement a complex system, namely a full-fledged WMS that interact with diverse CI services. We achieve this objective by providing simple solutions and abstractions to handle well-known challenges when implementing a complex distributed system (whether in the real world or in simulation), as explained hereafter.
SimGrid provides simple point-to-point communication between threads of control via a mailbox abstraction. One of the recognized strengths of SimGrid is that it employs highly accurate and yet scalable network simulation models. However, unlike some of its competitors, it does not provide any higher-level simulation abstractions meaning that distributed systems must be implemented essentially from scratch, with many message-based interactions. All message-based communication is abstracted away by WRENCH, and although the simulated CI services exchange many messages with the WMS and among themselves, the WRENCH Developer API only exposes higher-level interaction with services (“run this job”, “move this data”) and only requires that the WMS handle a few events. The WMS developer thus completely avoids the need to send and receive (and thus orchestrate) network messages.
Another challenge when developing a system like a WMS is the need to handle asynchronous interactions. While some service interactions can be synchronous (e.g., “are you up?”), “tell me your current load”), most need to be asynchronous so that the WMS retains control. The typical solution is to maintain sets of request handles and/or to use multiple threads of control. To free the WMS developer from these responsibilities, WRENCH provides already implemented “managers” that can be used out-of-the-box to take care of asynchronicity.
A WMS can instantiate such managers, which are independent threads of control. Each manager transparently interacts with CI services, maintains a database of pending requests, provides a simple API to check on the status of these requests, and automatically generates workflow execution events. For instance, a WMS can instantiate a “job manager” through which it will create and submit jobs to compute services. It can at any time check on the status of a job, and the job manager interacts directly (and asynchronously) with compute services so as to generate “job done” or “job failed” events to which the WMS can react. In our experience developing simulators from scratch using SimGrid, the implementation of asynchronous interactions with simulated processes is a non-trivial development effort, both in terms of amount of code to write and difficulty to write this code correctly. We posit that this is one of the reasons why some users have preferred using simulation frameworks that provide higher-level abstractions than SimGrid but offer less attractive accuracy and/or scalability features. WRENCH provides such higher-level abstractions to the WMS developers, and as a result implementing a WMS with WRENCH can be straightforward.
Finally, one of the challenges when developing a WMS is failure handling. It is expected that compute, storage, and network resources, as well as the CI services that use them, can fail through the execution of the WMS. SimGrid has the capability to simulate arbitrary failures via availability traces. Furthermore, failures can occur due to the WMS implementation itself, e.g., if it fails to check that the operations it attempts are actually valid, if concurrent operations initiated by the WMS at cross purposes. WRENCH abstracts away all these failures as C++ exceptions that can be caught by the WMS implementation, or caught by a manager and passed to the WMS as workflow execution events. Regardless, each failure exposes a failure cause, which encodes a detailed description of the failure. For instance, after initiating a file copy from a storage service to another storage service, a “file copy failed” event sent to the WMS would include a failure cause that could specify that when trying to copy file x from storage service y to storage service z, storage service z did not have sufficient storage space. Other example failure causes could be that a network error occurred when storage service y attempted to receive a message from storage service z, or that service z was down. All CI services implemented in WRENCH simulate well-defined failure behaviors, and failure handling capabilities afforded to simulated WMSs can actually allow more sophisticated failure tolerance strategies than currently done or possible in real-world implementations. But more importantly, the amount of code that needs to be written for failure handling in a simulated WMS is minimal.
Given the above, WRENCH makes it possible to implement a simulated WMS with very little code and effort. The example WMS implementation provided with the WRENCH distribution, which is simple but functional, is under 200 lines of C++ (once comments have been removed). See more discussion of the effort needed to implement a WMS with WRENCH in the context of our Pegasus case study (Section IV).
E. WRENCH User API
With the User API one can quickly build a simulator, which typically follows these steps:
1. Instantiate a platform based on a SimGrid XML platform description file;
2. Create one or more workflows;
3. Instantiate services on the platform;
4. Instantiate one or more WMSs telling each what services are at its disposal and what workflow it should execute starting at what time;
5. Launch the simulation; and
6. Process the simulation outcome.
The above steps can be implemented with only a few lines of C++. An example WRENCH simulator is shown in Figure 2 which uses a WMS implementation (called SomeWMS) that has already been developed using the WRENCH Developer API (see previous section). After initializing the simulation (lines 5-6), the simulator instantiates a platform (line 8) and a workflow (line 10-11). A workflow is defined as a set of computation tasks and data files, with control and data dependencies between tasks. Each task can also have a priority, which can then be taken into account by a WMS for scheduling purposes. Although the workflow can be defined purely programmatically, in this example the workflow is imported from
// Declare and initialize a simulation
wrench::Simulation simulation;
simulation.init(argc, argv);
// Instantiate a platform
simulation.instantiatePlatform("my_platform.xml");
// Instantiate a workflow
wrench::Workflow workflow;
workflow.loadFromDAX("my_workflow.dax", "1000Gf");
// Instantiate a storage service
auto storage_service = simulation.add(new wrench::SimpleStorageService("storage_host", pow(2, 50)));
// Instantiate a compute service (a 4-host cloud platform that does not support pilot jobs)
auto cloud_service = simulation.add(new wrench::CloudService("cloud_gateway", {"host1", "host2", "host3", "host4"}, pow(2, 42),
{{wrench::CloudServiceProperty::SUPPORTS_PILOT_JOBS, "false"}}));
// Instantiate a data registry service
auto data_registry_service = simulation.add(new wrench::FileRegistryService("my_desktop"));
// Instantiate a network monitoring service
auto network_monitoring_service = simulation.add(new wrench::NetworkProximityService{
"my_desktop", "my_host", {"node1", "node2", "node3", "node4"}, pow(2, 40),
{{wrench::BatchServiceProperty::SIMULATED_WORKLOAD_TRACE_FILE, "load.swf"},
{wrench::BatchServiceProperty::BATCH_SCHEDULING_ALGORITHM, "easy_bf")});
// Stage a workflow input file at the storage service
simulation.stageFile(workflow.getFileByID("input_file"), storage_service);
// Instantiate a WMS...
auto wms = simulation.add(new wrench::SomeWMS({batch_service, cloud_service}, {storage_service},
{network_monitoring_service}, {data_registry_service}, "my_desktop");
// ... and assign the workflow to it, to be executed one hour in
wms->addWorkflow(&workflow, 3600);
// Launch the simulation
simulation.launch();
// Retrieve task completion events
auto trace = simulation.getOutput().getTrace<wrench::SimulationTimestampTaskCompletion>();
// Determine the completion time of the last task that completed
double completion_time = trace[trace.size() - 1]->getContent()->getDate();
}
Fig. 2: Example fully functional WRENCH simulator. Try-catch clauses are omitted.
a workflow description file in the DAX format [54]. At line 13 the simulator creates a storage service with 1PiB capacity accessible on host storage_host. This and other hostname are specified in the XML platform description file. At line 17 the simulator creates a compute service that corresponds to a 4-node batch-scheduled cluster. The physical characteristics of the compute nodes node[1-4] are specified in the platform description file. This compute service has a 1TiB scratch storage space. Its behavior is customized by passing a couple of property-value pairs to its constructor. It will be subject to a background load as defined by a trace in the standard SWF format [55], and its batch queue will be managed using the EASY Backfilling scheduling algorithm [56]. The simulator then creates a second compute service (line 22), which is a 4-host cloud service, customized so that it does not support pilot jobs. Two helper services are instantiated, a data registry service so that the WMS can keep track of file locations (line 26) and a network monitoring service that uses the Vivaldi algorithm [57] to measure network distances between the two hosts from which the compute services are accessed (batch_login and cloud_gateway) and the my_host host, which is the host that runs these helper services and the WMS (line 28). At line 34, the simulator specifies that the workflow data file input_file is initially available at the storage service. It then instantiates the WMS and passes to it all available services (line 36), and assigns the workflow to it (line 39). The crucial call is at line 41, where the simulation is launched and the simulator hands off control to WRENCH. When this call returns the workflow has either completed or failed. Assuming it has completed, the simulator then retrieves the ordered set of task completion events (line 43) and performs some (in this example, trivial) mining of these events (line 45).
For brevity, the example in Figure 2 omits try/catch
IV. CASE STUDY: SIMULATING A PRODUCTION WMS
In this section, we present a WRENCH-based simulator of a state-of-the-art WMS, Pegasus [2], as a case study for evaluation and validation purposes.
Pegasus is being used in production to execute workflows for dozens of high-profile applications in a wide range of scientific domains [2]. Pegasus provides the necessary abstractions for scientists to create workflows and allows for transparent execution of these workflows on a range of compute platforms including clusters, clouds, and national cyberinfrastructures. During execution, Pegasus translates an abstract resource-independent workflow into an executable workflow, determining the specific executables, data, and computational resources required for the execution. Workflow execution with Pegasus includes data management, monitoring, and failure handling, and is managed by HTCondor DAGMan [55]. Individual workflow tasks are managed by a workload management framework, HTCondor [59], which supervises task executions on local and remote resources.
A. Implementing Pegasus with WRENCH
Since Pegasus relies on HTCondor, first we have implemented the HTCondor services as simulated core CI services, which together form a new Compute Service that exposes the WRENCH Developer API. This makes HTCondor available to any WMS implementation that is to be simulated using WRENCH, and will be included in the next WRENCH release as part of the growing set of simulated core CI services provided by WRENCH.
HTCondor is composed of six main service daemons (startd, starter, schedd, shadow, negotiator, and collector). In addition, each host on which one or more of these daemons is spawned must also run a master daemon, which controls the execution of all other daemons (including initialization and completion). The bottom part of Figure 3 depicts the components of our simulated HTCondor implementation, where daemons are shown in red-bordered boxes. In our simulator we implement the 3 fundamental HTCondor services, implemented as particular sets of daemons, as depicted in the bottom part of Figure 3 in borderless white boxes. The Job Execution Service consists of a startd daemon, which adds the host on which it is running to the HTCondor pool, and of a starter daemon, which manages task executions on this host. The Central Manager Service consists of a collector daemon, which collects information about all other daemons, and of a negotiator daemon, which performs task/resource matchmaking. The Job Submission Service consists of a schedd daemon, which maintains a queue of tasks, and of several instances of a shadow daemon, each of which corresponds to a task submitted to the Condor pool for execution.
Given the simulated HTCondor implementation above, we then implemented the simulated Pegasus WMS, including the DAGMan workflow engine, using the WRENCH Developer API. This implementation instantiates all services and parses the workflow description file, the platform description file, and a Pegasus-specific configuration file. DAGMan orchestrates the workflow execution (e.g., a task is marked as ready for execution once all its parent tasks have successfully completed), and monitors the status of tasks submitted to the HTCondor pool using a pull model, i.e., task status is fetched from the pool at regular time intervals. The top part of Figure 3 depicts the components of our simulated Pegasus implementation (each shown in a red box).
By leveraging WRENCH’s high-level simulation abstractions, implementing HTCondor as a reusable core WRENCH service using the Developer API required only 613 lines of code. Similarly, implementing a simulated version of Pegasus, including DAGMan, was done with only 666 lines of code (127 of which are merely parsing simulation configuration files). These numbers include both header and source files, but exclude comments. We argue that the above corresponds to minor simulation software development efforts when considering the complexity of the system being simulated.
Service implementations in WRENCH are all parameterizable. For instance, as services use message-based communications it is possible to specify all message payloads in bytes (e.g., for control messages). Other parameters encompass various overheads, either in seconds or in computation volumes (e.g., task startup overhead on a compute service). In WRENCH, all service implementations come with default
values for all these parameters, but it is possible to pick custom values upon service instantiation. The process of picking parameter values so as to match a specific real-world system is referred to as simulation calibration. We calibrated our simulator by measuring delays observed in event traces of real-world executions for workflows on hardware/software infrastructures (see Section V-B).
The simulator code, details on the simulation calibration procedure, and experimental scenarios used in the rest of this section are all publicly available online [60].
B. Experimental Scenarios
We consider experimental scenarios defined by particular workflow instances to be executed on particular platforms. Due to the lack of publicly available detailed workflow execution traces (i.e., execution logs that include data sizes for all files, all execution delays, etc.), we have performed real workflow executions with Pegasus and collected raw, time-stamped event traces from these executions. These traces form the ground truth to which we can compare simulated executions. We consider these workflow applications:
- **1000Genome** [61]: A data-intensive workflow that identifies mutational overlaps using data from the 1000 genomes project in order to provide a null distribution for rigorous statistical evaluation of potential disease-related mutations. We consider a 1000Genome instance that comprises 71 tasks.
- **Montage** [2]: A compute-intensive astronomy workflow for generating custom mosaics of the sky. For this experiment, we ran Montage for processing 1.5 and 2.0 square degrees mosaic 2MASS. We thus refer to each configuration as Montage-1.5 and Montage-2.0, respectively. Montage-1.5, resp. Montage-2.0, comprises 573, resp. 1,240, tasks.
We use these platforms, deploying on each a submit node (which runs Pegasus, DAGMan, and HTCondor’s job submission and central manager services), four worker nodes (4 cores per node / shared file system), and a data node in the WAN:
- **ExoGENI**: A widely distributed networked infrastructure-as-a-service testbed representative of a “bare metal” platform. Each worker node is a 4-core 2.0GHz processor with 12GiB of RAM. The bandwidth between the data node and the submit node was ∼0.40 Gbps, and the bandwidth between the submit and worker nodes was ∼1.00 Gbps.
- **AWS**: Amazon’s cloud platform, on which we use two types of virtual machine instances: t2.xlarge and m5.xlarge. The bandwidth between the data node and the submit node was ∼0.44 Gbps, and the bandwidth between the submit and worker nodes on these instances were ∼0.74 Gbps and ∼1.24 Gbps, respectively.
C. Simulation Accuracy
To evaluate the accuracy of our simulator, we consider 3 particular experimental scenarios: 1000Genome on ExoGENI, Montage-1.5 on AWS-t2.xlarge, and Montage-2.0 on AWS-m5.xlarge. Each execution is repeated 5 times and the overall workflow execution times, or makespans, are recorded.
The third column in Table I shows average relative differences between actual and simulated makespans. We see that simulated makespans are close to actual makespans across the board (average relative error is below 5%). One of the key advantages of building WRENCH on top of SimGrid is that WRENCH simulators benefit from the high-accuracy network models in SimGrid, e.g., these models capture many features of the TCP protocol. And indeed, when comparing real-world and simulated executions we observe average relative error below 3% for data movement operations. The many processes involved in a workflow execution with Pegasus interact by exchanging (typically small) control messages. Our simulator simulates these interactions. For instance, each time an output file is produced by a task a data registry service is contacted so that a new entry can be added to its database of file replicas, which incurs some overhead due to a message exchange. When comparing real-world to simulated executions we observe average relative simulation error below 1% for these data registration overheads.
To draw comparisons with a state-of-the-art simulator, we repeated the above simulations using WorkflowSim [35]. WorkflowSim does not provide a detailed simulated HTCondor implementation, does not offer the same simulation calibration capabilities as WRENCH, and is built on top of the CloudSim simulation framework [25]. Nevertheless, we have painstakingly calibrated our WorkflowSim simulator so that it models the hardware and software infrastructures of our experimental scenarios as closely as possible. For each of the 3 experimental scenarios, we find that the relative average makespan percentage error is 12.09 ± 2.84, 26.87 ± 6.26, and 13.32 ± 1.12, respectively, i.e., from 4x up to 11x larger than the error values obtained with our WRENCH-based simulator. The reasons for the discrepancies between WorkflowSim and real-world results are twofold. First, WorkflowSim uses the simplistic network models in CloudSim (see discussion in Section II) and thus suffers from simulation bias w.r.t. data
<table>
<thead>
<tr>
<th>Experimental Scenario</th>
<th>Platform</th>
<th>Avg. Makespan Error (%)</th>
<th>Task Submissions p-value distance</th>
<th>Tasks completions p-value distance</th>
</tr>
</thead>
<tbody>
<tr>
<td>1000Genome</td>
<td>ExoGENI</td>
<td>1.10 ± 0.28</td>
<td>0.06 ± 0.01</td>
<td>0.72 ± 0.06</td>
</tr>
<tr>
<td>Montage-1.5</td>
<td>AWS-t2.xlarge</td>
<td>4.25 ± 1.16</td>
<td>0.08 ± 0.01</td>
<td>0.12 ± 0.05</td>
</tr>
<tr>
<td>Montage-2.0</td>
<td>AWS-m5.xlarge</td>
<td>3.37 ± 0.46</td>
<td>0.11 ± 0.03</td>
<td>0.10 ± 0.01</td>
</tr>
</tbody>
</table>
Table I: Average simulated makespan error (%), and p-values and Kolmogorov-Smirnov (KS) distances for task submission and completion dates, computed for 5 runs of each of our 3 experimental scenarios.
transfer times. Second, WorkflowSim does not capture all the relevant details of the system and its execution. By contrast, implementing a fully detailed simulator with WRENCH can be done in a few hundred lines of code.
In our experiments we also record the submission and completion dates of each task, thus obtaining empirical cumulative density functions (ECDFs) of these times, for both real-world executions and simulated executions. To further validate the accuracy of our simulation results we apply Kolmogorov-Smirnov goodness of fit tests (KS tests) with null hypotheses ($H_0$) that the real-world and simulation samples are drawn from the same distributions. The two-sample KS test results in a miss if the null hypothesis (two-sided alternative hypothesis) is rejected at 5% significance level ($p$-value ≤ 0.05). Each test for which the null hypothesis is not rejected ($p$-value > 0.05), indicates that the simulated execution statistically matches the real-world execution. Table I shows $p$-value and KS test distance for both task submission times and task completion times. The null hypothesis is not rejected, and we thus conclude that simulated workflow task executions statistically match real-world executions well. These conclusions are confirmed by visually comparing ECDFs. For instance, Figure 3 shows real-world and simulated ECDFs for sample runs of Montage-2.0 on AWS-m5.xlarge, with task submission, resp. completion, date ECDFs on the left-hand, resp. right-hand, side. We observe that the simulated ECDFs (“wrench”) track the real-world ECDFs (“pegasus”) closely. We repeated these simulations using WorkflowSim, and found that the null hypothesis is rejected for all 3 simulation scenarios. This is confirmed visually in Figure 4 where the ECDFs obtained from the WorkflowSim simulation (“workflowsim”) are far from the real-world ECDFs.
Although KS tests and ECDFs visual inspections validate that the WRENCH-simulated ECDFs match the real-world ECDFs statistically, these results do not distinguish between individual tasks. In fact, there are some discrepancies between real-world and simulated schedules. For instance, Figure 5 shows Gantt charts corresponding to the workflow executions shown in Figure 3 with the real-world execution on the left-hand side (“pegasus”) and the simulated execution on the right-hand side (“wrench”). Tasks executions are shown on the vertical axis, each shown as a line segment along the horizontal time axis, spanning the time between the task’s start time and the task’s finish time. Different task types, i.e., different executables, are shown with different colors. In this workflow, all tasks of the same type are independent and have the same priority. We see that the shapes of the yellow regions, for example, vary between the two executions. These variations are explained by implementation-dependent behaviors of the workflow scheduler. In many instances throughout workflow execution several ready tasks can be selected for execution, e.g., sets of independent tasks in the same level of the workflow. When the number of available compute resources, $n$, is smaller than the number of ready tasks, the scheduler picks $n$ ready tasks for immediate execution. In most WMSs, these tasks are picked as whatever first $n$ tasks are returned when iterating over data structures in which task objects are stored. Building a perfectly faithful simulation of a WMS would thus entail implementing/using the exact same data structures as that in the actual implementation. This could be labor intensive or perhaps not even possible depending on which data structures, languages, and/or libraries are used in that implementation. In the context of this Pegasus case study, the production implementation of the DAGMan scheduler uses a custom priority list implementation to store ready tasks, while our simulation version of it stores workflow tasks in a std::map data structure indexed by task string IDs. Consequently, when the real-world scheduler picks the first $n$ ready tasks it typically picks different tasks than those picked by its simulated implementation. This is the cause the discrepancies seen in Figure 5.
D. Simulation Scalability
Table II shows average simulated makespans and simulation execution times for our 3 experimental scenarios. Simulations are executed on a single core of a MacBook Pro 3.5 GHz Intel
Our WRENCH-based simulator (by a factor $\sim$1.48 for 10,000-task workflows). However, WorkflowSim is faster than our WRENCH-based simulator (by a factor $\sim$1.81 for 10,000-task workflows), with roughly similar trends. The reason why WorkflowSim is faster is because it simply does not simulate many aspects of the execution. The downside, as seen in the previous section, is that its simulation results are inaccurate.
V. CONCLUSION
In this paper we have presented WRENCH, a simulation framework for building simulators of Workflow Management Systems. WRENCH implements high-level simulation abstractions on top of the SimGrid simulation framework, so as to make it possible to build simulators that are accurate, that can run scalably on a single computer, and that can be implemented with minimal software development effort. Via a case study for the Pegasus production WMS we have demonstrated that WRENCH achieves these objectives, and that it favorably compares to a recently proposed workflow simulator. The main finding is that with WRENCH one can implement an accurate and scalable simulator of a complex real-world system with a few hundred lines of code. WRENCH is open source and welcomes contributors. WRENCH is already being used for several research and education projects, and Version 1.1 was released in August 2018. We refer the reader to http://wrench-project.org for software, documentation, and links to related projects.
A short-term development direction is to use WRENCH to simulate the execution of current production WMSs (as was done for Pegasus in Section IV). Although we have designed WRENCH with knowledge of these WMSs and with the intent of making their implementations with WRENCH feasible, we expect that WRENCH APIs and abstractions will evolve once we set out to realize these implementations. Another development direction is the implementation of more CI service abstractions in WRENCH, e.g., a Hadoop Compute Service, specific distributed cloud Storage Services. From a research perspective, a future direction is that of automated simulation calibration. As seen in our Pegasus case study, even when using validated simulation models, the values of a number of simulation parameters must be carefully chosen in order to obtain accurate simulation results. This issue is not confined to WRENCH, but is faced by all distributed system simulators. In our case study we have calibrated these parameters manually by analyzing and comparing simulated and real-world execution event traces. While, to the best of our knowledge, this is the typical practice, what is truly needed is an automated calibration method. Ideally, this method would process a (small) number of (not too large) real-world execution traces for “training scenarios”, and compute a valid and robust set of calibration parameter values. An important research question will then be to understand to which extent these automatically computed calibrations can be composed and extrapolated to scenarios beyond the training scenarios.
Acknowledgments. This work is funded by NSF contracts #1642369 and #1642335, “S12-SSE: WRENCH: A Simulation Workbench for Scientific Workflow Users, Developers, and Researchers”, and by CNRS under grant #PICS07239. We thank Martin Quinson, Arnaud Legrand, and Pierre-François Dutot for their valuable help.
<table>
<thead>
<tr>
<th>Experimental Scenario</th>
<th>Workflow Platform</th>
<th>Avg. Workflow Makespan (s)</th>
<th>Avg. Simulation Time (s)</th>
</tr>
</thead>
<tbody>
<tr>
<td>1000Genome</td>
<td>ExoGENI</td>
<td>761.0 ± 7.93</td>
<td>0.3 ± 0.01</td>
</tr>
<tr>
<td>Montage-1.5</td>
<td>AWS-t2.xlarge</td>
<td>1,784.0 ± 137.67</td>
<td>8.3 ± 0.09</td>
</tr>
<tr>
<td>Montage-2.0</td>
<td>AWS-m5.xlarge</td>
<td>2,911.8 ± 48.80</td>
<td>28.1 ± 0.52</td>
</tr>
</tbody>
</table>
Fig. 6: Average simulation time (in seconds, left vertical axis) and memory usage (maximum resident set size, right vertical axis) in MiB vs. workflow size.
TABLE II: Simulated workflow makespans and simulation times averaged over 5 runs of each of our 3 experimental scenarios.
Core i7 with 16GiB of RAM. For these scenarios, simulation times are more than 100x and up to 2500x shorter than real-world workflow executions. This is because SimGrid simulates computation and communication operations as delays computed based on computation and communication volumes using simulation models with low computational complexity.
To evaluate the scalability of our simulator, we use a workflow generator [62] to generate representative randomized configurations of the Montage workflow with from 1,000 up to 10,000 tasks. We generate 5 workflow instances for each number of tasks, and simulate the execution of these generated workflow instances on 128 cores (AWS-m5.xlarge with 32 4-core nodes). Figure 6 shows simulation time (left vertical axis) and maximum resident set size (right vertical axis) vs. the number of tasks in the workflow. Each sample point is the average over the 5 workflow instances (error bars are shown as well). As expected, both simulation time and memory footprint increase as workflows become larger. The memory footprint grows linearly with the number of tasks (simply due to the need to store more task objects). The simulation time grows faster initially, but then linearly beyond 7,000 tasks. We conclude that the simulation scales well, making it possible to simulate very large 10,000-task Montage configurations in under 40 minutes on a standard laptop computer.
Figure 6 also includes results obtained with WorkflowSim. We find that WorkflowSim has a larger memory footprint than our WRENCH-based simulator (by a factor $\sim$1.48 for 10,000-task workflows). However, WorkflowSim is faster than our WRENCH-based simulator (by a factor $\sim$1.81 for 10,000-task workflows), with roughly similar trends. The reason why...
|
{"Source-Url": "https://inria.hal.science/hal-01948162/file/casanova-works-2018.pdf", "len_cl100k_base": 11279, "olmocr-version": "0.1.49", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 42298, "total-output-tokens": 13459, "length": "2e13", "weborganizer": {"__label__adult": 0.00035190582275390625, "__label__art_design": 0.0005693435668945312, "__label__crime_law": 0.00032639503479003906, "__label__education_jobs": 0.002368927001953125, "__label__entertainment": 0.0001609325408935547, "__label__fashion_beauty": 0.0002155303955078125, "__label__finance_business": 0.0005726814270019531, "__label__food_dining": 0.0003857612609863281, "__label__games": 0.0009088516235351562, "__label__hardware": 0.001811981201171875, "__label__health": 0.0006046295166015625, "__label__history": 0.0006079673767089844, "__label__home_hobbies": 0.00014412403106689453, "__label__industrial": 0.00078582763671875, "__label__literature": 0.0003690719604492187, "__label__politics": 0.0003418922424316406, "__label__religion": 0.0005359649658203125, "__label__science_tech": 0.35546875, "__label__social_life": 0.00016808509826660156, "__label__software": 0.0246734619140625, "__label__software_dev": 0.607421875, "__label__sports_fitness": 0.0003211498260498047, "__label__transportation": 0.0006618499755859375, "__label__travel": 0.0002753734588623047}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58854, 0.02897]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58854, 0.28221]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58854, 0.90825]], "google_gemma-3-12b-it_contains_pii": [[0, 1084, false], [1084, 6784, null], [6784, 13101, null], [13101, 17720, null], [17720, 23813, null], [23813, 30146, null], [30146, 34170, null], [34170, 38615, null], [38615, 44551, null], [44551, 48954, null], [48954, 54879, null], [54879, 54879, null], [54879, 58854, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1084, true], [1084, 6784, null], [6784, 13101, null], [13101, 17720, null], [17720, 23813, null], [23813, 30146, null], [30146, 34170, null], [34170, 38615, null], [38615, 44551, null], [44551, 48954, null], [48954, 54879, null], [54879, 54879, null], [54879, 58854, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58854, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58854, null]], "pdf_page_numbers": [[0, 1084, 1], [1084, 6784, 2], [6784, 13101, 3], [13101, 17720, 4], [17720, 23813, 5], [23813, 30146, 6], [30146, 34170, 7], [34170, 38615, 8], [38615, 44551, 9], [44551, 48954, 10], [48954, 54879, 11], [54879, 54879, 12], [54879, 58854, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58854, 0.05263]]}
|
olmocr_science_pdfs
|
2024-11-25
|
2024-11-25
|
b37145869c96c24f19f2c7dd302efad3177c2aeb
|
[REMOVED]
|
{"len_cl100k_base": 11690, "olmocr-version": "0.1.53", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 39741, "total-output-tokens": 15111, "length": "2e13", "weborganizer": {"__label__adult": 0.00037288665771484375, "__label__art_design": 0.0003600120544433594, "__label__crime_law": 0.00027370452880859375, "__label__education_jobs": 0.0012493133544921875, "__label__entertainment": 6.592273712158203e-05, "__label__fashion_beauty": 0.000148773193359375, "__label__finance_business": 0.00016987323760986328, "__label__food_dining": 0.0002627372741699219, "__label__games": 0.0005364418029785156, "__label__hardware": 0.0004820823669433594, "__label__health": 0.0003180503845214844, "__label__history": 0.0001786947250366211, "__label__home_hobbies": 9.012222290039062e-05, "__label__industrial": 0.0002112388610839844, "__label__literature": 0.0002906322479248047, "__label__politics": 0.00017261505126953125, "__label__religion": 0.00035071372985839844, "__label__science_tech": 0.00614166259765625, "__label__social_life": 0.0001143813133239746, "__label__software": 0.005657196044921875, "__label__software_dev": 0.98193359375, "__label__sports_fitness": 0.0002307891845703125, "__label__transportation": 0.00028324127197265625, "__label__travel": 0.00015664100646972656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 63718, 0.05791]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 63718, 0.20286]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 63718, 0.86548]], "google_gemma-3-12b-it_contains_pii": [[0, 3709, false], [3709, 9033, null], [9033, 15150, null], [15150, 20138, null], [20138, 24725, null], [24725, 29260, null], [29260, 33833, null], [33833, 39467, null], [39467, 45026, null], [45026, 50703, null], [50703, 58727, null], [58727, 63718, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3709, true], [3709, 9033, null], [9033, 15150, null], [15150, 20138, null], [20138, 24725, null], [24725, 29260, null], [29260, 33833, null], [33833, 39467, null], [39467, 45026, null], [45026, 50703, null], [50703, 58727, null], [58727, 63718, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 63718, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 63718, null]], "pdf_page_numbers": [[0, 3709, 1], [3709, 9033, 2], [9033, 15150, 3], [15150, 20138, 4], [20138, 24725, 5], [24725, 29260, 6], [29260, 33833, 7], [33833, 39467, 8], [39467, 45026, 9], [45026, 50703, 10], [50703, 58727, 11], [58727, 63718, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 63718, 0.12329]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
b5ce8de144a781cf2b817fc3faa17c491d5bb6f5
|
[REMOVED]
|
{"len_cl100k_base": 11210, "olmocr-version": "0.1.53", "pdf-total-pages": 18, "total-fallback-pages": 0, "total-input-tokens": 58627, "total-output-tokens": 15664, "length": "2e13", "weborganizer": {"__label__adult": 0.0005316734313964844, "__label__art_design": 0.00058746337890625, "__label__crime_law": 0.0006680488586425781, "__label__education_jobs": 0.0021076202392578125, "__label__entertainment": 0.00016605854034423828, "__label__fashion_beauty": 0.0002903938293457031, "__label__finance_business": 0.0003268718719482422, "__label__food_dining": 0.0007452964782714844, "__label__games": 0.0022869110107421875, "__label__hardware": 0.0014867782592773438, "__label__health": 0.0013856887817382812, "__label__history": 0.0005598068237304688, "__label__home_hobbies": 0.0002110004425048828, "__label__industrial": 0.0007853507995605469, "__label__literature": 0.0007486343383789062, "__label__politics": 0.0005068778991699219, "__label__religion": 0.0007877349853515625, "__label__science_tech": 0.2064208984375, "__label__social_life": 0.00015223026275634766, "__label__software": 0.0074462890625, "__label__software_dev": 0.77001953125, "__label__sports_fitness": 0.0005402565002441406, "__label__transportation": 0.0011453628540039062, "__label__travel": 0.0003159046173095703}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 54332, 0.03525]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 54332, 0.50471]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 54332, 0.86004]], "google_gemma-3-12b-it_contains_pii": [[0, 2490, false], [2490, 6000, null], [6000, 8977, null], [8977, 12541, null], [12541, 15792, null], [15792, 18837, null], [18837, 22401, null], [22401, 25037, null], [25037, 26969, null], [26969, 29840, null], [29840, 33010, null], [33010, 36613, null], [36613, 39540, null], [39540, 42706, null], [42706, 45552, null], [45552, 49033, null], [49033, 52644, null], [52644, 54332, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2490, true], [2490, 6000, null], [6000, 8977, null], [8977, 12541, null], [12541, 15792, null], [15792, 18837, null], [18837, 22401, null], [22401, 25037, null], [25037, 26969, null], [26969, 29840, null], [29840, 33010, null], [33010, 36613, null], [36613, 39540, null], [39540, 42706, null], [42706, 45552, null], [45552, 49033, null], [49033, 52644, null], [52644, 54332, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 54332, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 54332, null]], "pdf_page_numbers": [[0, 2490, 1], [2490, 6000, 2], [6000, 8977, 3], [8977, 12541, 4], [12541, 15792, 5], [15792, 18837, 6], [18837, 22401, 7], [22401, 25037, 8], [25037, 26969, 9], [26969, 29840, 10], [29840, 33010, 11], [33010, 36613, 12], [36613, 39540, 13], [39540, 42706, 14], [42706, 45552, 15], [45552, 49033, 16], [49033, 52644, 17], [52644, 54332, 18]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 54332, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
7389290eadcaea1deaa011825a209476574ecc89
|
Interdisciplinary Programming Language Design
DRAFT - Distributed for Comments
Michael Coblenz, Jonathan Aldrich, Brad Myers, Joshua Sunshine
Carnegie Mellon University
Pittsburgh, PA, USA
mcoblenz,jonathan.aldrich,bam,sunshine@cs.cmu.edu
Abstract
Traditional programming language design approaches center around theoretical and performance-oriented evaluation. Recently, researchers have been considering more approaches to language design, including the use of quantitative and qualitative user studies to evaluate how different designs affect users. In this paper, we argue for an interdisciplinary approach that incorporates many different methods in the creation and design of programming languages. We show how the addition of user-oriented design techniques can be helpful at many different stages in the programming language design process.
1 Introduction
Since the beginning of computing, programmers have argued about how they should express their programs. Some argue that the language should closely match the way users think about problems [23]. Others focus on training programmers to think in a way that facilitates correct reasoning about programs, and then they design languages to match this style of reasoning.
The stakes in this debate are high: programming languages are the most basic tool used by programmers, and therefore have a major impact on the software development process. Empirical studies suggest that the choice of programming language can significantly impact software quality and security [47], as well as performance and programmer productivity [41]. Understanding how to design languages better could clearly improve the way we engineer software.
In this paper, we discuss the question of design process, arguing that the most commonly used approaches fail to consider a broad enough range of different kinds of evidence. In contrast, we propose a variety of different approaches that can be used to create and evaluate programming languages.
Languages, like other kinds of software projects, frequently follow an iterative design process, summarized in Fig. 1:
Figure 1. A typical design process with suggested methods
1. In the requirements elicitation and creation phase, the designer studies the application domain for the language. The designer creates a draft version of the language, likely including a language specification and language implementation.
2. In the evaluation phase, the designer evaluates how well the language fulfills its requirements.
After evaluation, the design process may repeat to address shortcomings that were identified. We use the word design to refer to the entire process, including requirements analysis, specification, implementation, and evaluation. We use the word creation to refer to the part of the process that includes specifying the language, including its syntax and semantics, as well as the implementation phase, because these phases are usually intertwined.
Language designers face unique challenges relative to designers of other kinds of tools. Programming language designs must meet a unique set of interdisciplinary constraints and objectives, including mathematical foundations, performance characteristics of the created software, the ability of individual programmers to work efficiently (i.e. usability), and the ability of teams to construct large-scale software effectively (i.e. software engineering). All these considerations may conflict. For example, mathematical modeling of a language’s design can be used to create a type-safe or memory-safe language, with significant software engineering benefits. However, implementing the necessary checks at run time imposes performance overhead, while implementing those checks at compile time can preserve performance but may make the language less usable. Unfortunately, language design is too often done in an ad-hoc way that ignores one or more disciplines that should inform it. For example, many languages are designed without user-centered evaluations [52], resulting in designs that may fulfill theoretical and...
performance requirements but impose unnecessary burdens on their users.
In this paper, we argue that the large, complex design space of programming languages justifies treating language design as an engineering activity—one that makes principled tradeoffs among considerations from multiple disciplines. As with software development, language development should be iterative, and incorporate not just summative evaluation on completed designs but also formative methods during the design process itself. We show how we and other researchers have used a wide range of research and design methods to gain insight into how to design programming languages so that they are as effective as possible for programmers. Our account will emphasize human-centered methods, as these tend to receive less emphasis in the existing literature, but will also demonstrate synergies between these methods and traditional approaches such as type theory. Finally, we show how qualitative evaluation methods can complement quantitative methods to inform the search through the language design space.
Overall, we argue for an approach to language design that:
1. Uses a diverse array of complementary methods to address a variety of design questions and evaluate the design from a wide range of perspectives.
2. Prioritizes specific quality attributes of a language according to domain needs, rather than assuming that a particular set of attributes is best for all languages.
3. Strategically selects which methods to apply at each step in the design process.
We use the word interdisciplinary rather than multidisciplinary because it emphasizes the benefit of combining techniques and approaches into a unified method. In contrast, a multidisciplinary approach would emphasize using different approaches individually, perhaps by independent experts. The latter is insufficient because the lessons learned from one approach should affect both the process and the lessons learned from other approaches. For example, language theory should guide the set of designs that are tested with users, while studies of the software development process should affect which are the most important theorems to prove.
We describe in §2 the goals for programming language designs that we believe most designers have. Next (§3), we discuss current approaches to programming language design, and argue for a holistic approach that prominently includes human-centered methods. §4 explains why no single method is sufficient in general, and why combining multiple approaches is more likely to lead to high-quality designs. §5 describes the methods that we and others have found useful in the creation and evaluation of programming languages. §6 discusses approaches for choosing which methods to use, and §7 describes two particular language designs in which different design and evaluation methods complemented one another.
2 Desiderata of Programming Languages
Quality attributes [31] describe properties that are used to evaluate software systems. For example, the maintainability quality attribute refers to a class of scenarios pertaining to how easy or difficult it is for maintainers of a system to modify it. Design decisions frequently involve tradeoffs among different quality attributes. A strong, static type system may guarantee the absence of certain kinds of bugs (correctness), but if it is too hard to use, a non-safety-critical system may be better served by a less type-safe language (modifiability). Furthermore, it is impossible to evaluate a programming language without knowing what one wants to evaluate it for— that is, what should be optimized? And evaluation of the attainment of different quality attributes requires different evaluation methods. For example, measuring correctness can be evaluated with formal methods or with testing; learnability requires evaluation with novices; performance typically requires executing benchmarks.
Below, we show a partial list of relevant considerations; practically every software quality attribute may be affected by a language design. Our purpose in this section is to highlight how particular characteristics of programs relate to programming language design and how they trade off with each other. As a result, we argue that programming language designers should be intentional and explicit about their priorities, and they should select from a diverse set of methods according to their design goals.
2.1 Traditional goals
Correctness concerns the question of whether a particular program has specific desirable properties, such as adherence to a specification and the absence of certain classes of bugs. Languages typically support correctness through formal approaches such as type systems or proof systems. Researchers evaluate the correctness of the formalism using proofs of appropriate soundness theorems.
Performance (of the resulting code) is typically evaluated with benchmarks on the particular hardware of interest. Performance is amenable to benchmarking methods, which have been well-described for particular performance evaluation domains [22].
Expressiveness is the ability of a programmer to express their intent explicitly in the language.¹ For example, a type system adds expressiveness compared to an untyped language in the sense of communicating the programmer’s intended restrictions on the contents of variables that are annotated with types. However, that same type system may rule
¹For a more formal definition that is consistent with the more intuitive arguments made in this paragraph, we point the interested reader to Felleisen [20]
out certain desirable program structures, limiting expressiveness in a different sense. Some forms of expressiveness may come at a usability cost, e.g. because users are forced to express decisions that they would prefer to postpone or prefer not to express formally. Additional expressiveness may facilitate some kinds of modifiability (e.g. changing the type of a function may allow the compiler to find all the calls that need to be updated) while inhibiting others (e.g. a large modification cannot be even partially tested until it is completely done). Researchers typically evaluate the expressiveness of the formal system by giving examples of the properties it can verify on sample programs.
**Speed of compiling** was a major concern when computers were less powerful, and remains important for providing quick feedback in IDEs and when compiling very large systems. The module structure of Go, for example, was designed to improve compilation times compared to C++ [44].
### 2.2 User-centered goals
**Understandability** is the property of how easy it is for a reader of the code of a program to understand it. Programmers typically spend far more time reading code than writing it, with one estimate suggesting a 10:1 ratio of time spent reading to writing [33]. Researchers and practitioners have proposed a variety of measures of and proxies for understandability. One approach involves user studies in which participants are given source code and asked to answer questions about program behavior. Some languages have been evaluated by observing that the designers were able to implement a particular program in fewer lines of code using their language than using prior languages, but it is not necessarily the case that shorter programs are always easier to understand or maintain than longer programs [2]. Our primary interest is in the ability of programmers to correctly and quickly answer important questions about their programs (in service of their goals).
**Ease of reasoning** is the user-focused analog of correctness: for each form of correctness desired of a program, one might ask how easy or difficult it is for users to show a particular program is correct. This question is answered much less often than the questions about correctness itself, but it can be evaluated through user experiments [15]. Arguably, if programmers cannot use the correctness features correctly, then the resulting code will not be correct.
**Modifiability** captures the ease of making particular changes to programs. Developers spend more time maintaining and evolving programs than creating them in the first place, so modifiability has long been a major concern in the field of software engineering. Parnas suggested that information hiding, in service of modifiability, should be the main criteria for decomposing systems into modules [42], and so module systems have been a central way that language designers support this goal.
Module systems can also support ease of reasoning, since when appropriate specifications are added to module boundaries, reasoning can be done more locally. Languages may need to support modifiability in other ways, too, since modular decomposition sometimes has a deleterious effect on performance. Small-scale modifiability can be evaluated with user studies, while modifiability at larger scales requires case studies or repository mining methodologies.
**Learnability** is an important practical concern for adoption: to what extent can existing programmers use the language with little training? It is often evaluated in studies with novices.
### 3 Current Perspectives on Language Design
For comparison and discussion purposes, we describe several existing approaches to language design. Programming languages are designed by various people in various contexts (e.g., in universities or corporations) for many purposes. Our intent here is to promote discussion and draw contrasts between important language design styles, recognizing that in practice individual language designers may (as we advocate) use a combination of the approaches below.
A **logician** is primarily concerned with developing logical systems that are relevant to computation. Viewing programming as the practice of writing correct programs — that is, programs that meet particular mathematical specifications — the logician is focused on concise, convenient, correct expression of algorithms. Programming is considered to be a task that is best suited to experts, who can be thoroughly trained in the appropriate mathematics so that they can write correct programs. Since programming is considered to be primarily a mathematical pursuit, the best language for programming is likely to be very close to the language of mathematics (and presumably close to the way the logician is thinking, as in the closeness of mapping heuristic [23]). Future discoveries of mathematical principles may lead to better programming languages — ones in which programs can be expressed more beautifully and with stronger guarantees of correctness.
The **industrialist** is interested in designing languages that are effective for writing large software systems in order to achieve various commercial goals. As such, performance and adoption (which depends on many different attributes, including learnability [34] and interoperability) are often priorities.
The **empiricist** views programming languages as critical tools for programmer performance. The focus is on using carefully-designed experiments to show concrete effects of specific design decisions on programmers’ success on programming tasks. The empiricist expects that by doing a large number of experiments, researchers will learn how language designs affect users; after gathering sufficient data, language designers will be able to make a large portion of their design
We view each of the stylized approaches above to be useful methods from social science can be applied effectively to summative evaluations of systems; traditional quantitative deficiencies.
ity constraints, so users will have to learn workarounds for flaws in a deployed language due to backwards compatibility. However, it will be difficult to fix major design flaws in a deployed language due to backwards compatibility constraints, so users will have to learn workarounds for deficiencies.
We find the empirical approach compelling for conducting summative evaluations of systems; traditional quantitative methods from social science can be applied effectively to show, for example, that certain static type systems have certain benefits over dynamic type systems [19]. However, summative evaluations are only useful on systems that are complete enough to withstand user tests, which can require significant engineering work; furthermore, of the thousands of design decisions involved in a particular programming language design, a particular experiment can only consider a small set of options. For example, a 2 x 2 factorial study studies two design options in each of two dimensions, and even this would require a large number of participants if one wants statistically significant results. In cases where design choices interact—something we have observed to be very common in language design—it quickly becomes impossible to evaluate the cross product of the possible choices. These interactions between design features make it difficult to go from study results to holistic language designs. In contrast, language designers need approaches that allow them to explore and evaluate a larger portion of the design space. Additional challenges include the difficulty of studying longer and more complex tasks in a controlled, laboratory setting; and the difficulty of recruiting a representative sample of software engineers and retaining them in a laboratory environment long enough to obtain results.
Medicine is another discipline that seeks to inform its recommendations with empirical data. Some researchers have argued that the programming languages community might look to the field of medicine for insight regarding appropriate evidence in scientific fields [52, 53]. Evidence-based medicine rests on three pillars: individual clinical expertise; external clinical evidence from systematic research (particularly from controlled trials when considering therapeutic options, when available); and patient values, preferences, and characteristics. [18, 48, 51] Notably, controlled trials form only one component of three; the medical community considers other relevant aspects of a clinical situation when recommending treatment. Even if language designers were to use a medical approach, then, they would need to consider arguments beyond those which are directly supported by controlled experiments. However, although clinicians can typically choose to not recommend a treatment, this option is not available to programming language designers, whose closest moral equivalent might be to abandon the pursuit of language design (instead recommending that users use existing languages). Language designers are frequently forced to make decisions or recommendations lacking direct experimental evidence.
The number of design decisions involved in designing a particular programming language is immense; we hope that future work will analyze this space more formally, but our experience suggests that there are at least thousands of decisions that are made in the design of any given language. For example, high-level decisions such as what paradigms and type systems to use, medium-level decisions such as what control structures and modularity features to provide in the
4 Interdisciplinary Design
We view each of the stylized approaches above to be useful for language design, but we view them as being individually too limited. Instead, our approach is to combine many different methods according to the design goals of the language. A logical approach (the logician) forms a sound basis for language design. It is useful for quickly eliminating from consideration many designs that are not internally consistent. However, formal methods do not specify which of many different, sound languages will be best for programmers, who are people [35]. While formal methods can link language constructs to program properties, they cannot directly tell us which program properties are the most important, and therefore should be the focus of a type system or proof system design.
An industrial approach is practically useful for designing serviceable languages. As researchers, however, we focus on longer-term goals of improving productivity at scale, as unconstrained as possible by the market forces and technological experience of current programmers. This points to the need to consider software engineering theory to understand how language constructs such as modules affect the software development process as programs scale up in size.
In industry, risk aversion frequently results in the selection of well-proven techniques, such as object-oriented and imperative programming. It is not necessary to show that the design is the best possible one, since a high-quality design that is of practical use suffices. Knowing what aspects of the design contribute to or detract from programmer success may be of lower priority than creating a design quickly and cost-effectively in which it is practical to implement interesting, commercially-relevant systems. Over time, as the community gains experience with the language, the design will be modified to make writing certain programs more convenient. However, it will be difficult to fix major design flaws in a deployed language due to backwards compatibility constraints, so users will have to learn workarounds for deficiencies.
We find the empirical approach compelling for conducting summative evaluations of systems; traditional quantitative methods from social science can be applied effectively to
language, and lower level decisions such as the concrete syntax and which reserved words to use. In practice, designers complete their work by making many decisions on the basis of prior successful systems and their own intuition and experience. Although orthogonality of constructs is one of the canonical recommendations for language designers [45, 49], it is our experience that many language design decisions are not orthogonal. We argue, then, that it is risky to combine the results of individual experiments without some more holistic evaluation that either provides evidence that the decisions are in fact orthogonal, or provides enough guidance about how the decisions interact to properly interpret the experimental results.
Instead of relying on exhaustive experimentation, then, we propose to use many different methods from the field of Design to triangulate when making design decisions; although a particular method might only suggest a particular region in the design space, we can obtain further guidance helping us narrow it further by using different methods. Although this approach lacks the statistical satisfaction of randomized controlled trials, it has the benefit of producing evidence grounded in real users that can be obtained practically and applied to wide variety of different language designs. We show in §5 examples of how various techniques have been used to obtain insight about programming languages.
The perspective of the teacher is useful in the design of practical languages because languages that are difficult to learn are less likely to be adopted. Insights from pedagogy may also provide hints as to which approaches are more or less natural for users. However, languages that focus on pedagogical goals may not be ideal for creating large, complex systems; instead, a teacher’s focus is on teaching particular aspects of programming so that students can be effective when using other languages.
An important aspect of an interdisciplinary approach is that it allows a collection of detailed qualitative results regarding different designs. Rather than focusing on whether a particular design promotes faster task completion times compared to another, we seek to learn why [29]: when programmers are confused, what is the cause of the confusion? What concrete improvements can we make to the language, the programming environment, and the training materials to improve task performance?
We seek to use human-centered approaches broadly in order to first obtain lower-cost, qualitative knowledge about designs, and then to obtain quantitative results showing how new designs compare to existing ones. Our assumption is that we are likely to obtain a better design (one for which a quantitative evaluation is likely to show a superior result) if we take user data into account throughout the design process [36] rather than focusing the use of user-oriented methods only at the end of the process.
In general, the discipline of design is about creating tools that help people achieve their goals while considering practical constraints [12]. Design is applicable to large design spaces, such as that of programming languages, including in high-stakes situations. For example, an airplane cockpit is designed taking human factors into account in order to reduce error rates to improve airplane safety [63]. The design recommendations are drawn from a variety of sources, including human factors texts and industry standards. The aviation industry learns how to design safe cockpits with an interdisciplinary approach; it does not restrict itself to quantitative studies of pilots with candidate interfaces.
5 Methods
We divide the methods into those that are primarily oriented around eliciting and iterating on design ideas (without needing a prototype to evaluate) and those that are oriented around evaluation (requiring a prototype).
5.1 Methods for requirements and creation
Surveys are a useful way to assess opinions and experience among a large sample, for example for assessing whether a proposed problem is one that a large fraction of practitioners face, or assessing whether which problems are the most important to solve from a practitioner’s point of view. Some researchers have also used surveys to get direct insight into programming language designs [60], but the results have been inconclusive regarding specific design guidance. Most surveys ask people what they believe, but in some cases people’s beliefs do not lead to designs that benefit users in practice. Furthermore, survey results can be difficult to interpret or clouded with noise. Sometimes, little verifiable information is known about participants, and there may be motives that detract from data validity (e.g. Mechanical Turk workers may want to complete the survey as fast as possible to maximize their hourly wage).
Interviews can be a valuable source of information for areas in which researchers can find experts. These can be a useful approach to quickly obtain knowledge about existing problems and their existing solutions. For example, we interviewed experienced software engineers and API designers to understand how practitioners use immutability in their software designs [16]; the insights led to a new tool, Glacier [15], which is designed around the needs of real users instead of around maximizing expressiveness. Glacier extends Java to support transitive class immutability, a kind of immutability that the interviewees expressed was useful in real software. Interviews are limited in external validity because it may be difficult or impossible to interview a representative sample of any particular population. The results strongly depend on the participants themselves as well as the skill of the interviewer in eliciting as much useful information as possible with minimal bias.
Corpus studies can show the prevalence of particular patterns in existing code, including patterns of bugs in bug databases. For example, Callau et al. [13] investigated the use of dynamic features in Smalltalk programs, Malayeri et al. [32] investigated whether programs might benefit from a language that supported structural subtyping, and we studied how Java programmers used exception handling features [26]. Corpus studies can show that a particular problem occurs often enough that it might be worth addressing; they can also show how broadly a particular solution applies to real-world programs, as in Unkel and Lam’s analysis of stationary fields in Java [62]. However, it can be difficult to obtain a representative corpus. For example, though GitHub contains many open source projects, they can be difficult to build; it can be difficult to sample in an unbiased way; and open source code may not be representative of closed source code.
Natural Programming [36] is a technique to elicit how people express solutions to problems without any special training. It aims to find out how people might “naturally” write programs. These approaches have been useful for HANDS [40], a programming environment for children, as well as professionally-targeted languages, such as blockchain programming languages [3]. However, the results are biased by participants’ prior experience and education, and results depend on careful choice of prompts to avoid biased language.
Rapid prototyping is commonly used in many different areas of HCI, and can be used for language design as well [35]. Low-fidelity prototypes, such as paper prototypes, can be used to obtain feedback from users on early-stage designs ideas. Wizard-of-Oz testing involves an experimenter substituting for a missing or insufficient implementation. For example, when evaluating possible designs for a type system for a blockchain programming language, we gave participants brief documentation on a language proposal and asked them to do tasks in a text editor. Because there was no type-checker implemented, the experimenter gave verbal feedback when participants wrote ill-typed code. This allowed us to learn about the usability of various designs without the expense of implementing designs that were about to be revised anyway. However, low fidelity prototypes may differ in substantive ways from polished systems, misleading participants. The results depend on the skill and perspectives of the experimenter and the participants, resulting in limited external validity.
Programming language and software engineering theory provide a useful guide when considering the requirements for a programming language. For example, the guarantees that a transitive immutability system can provide in the areas of both security and concurrency—which have been well-established in the programming language theory literature—were key reasons that we chose this semantics for the Glacier type system [16]. Similarly, an understanding of how modularity affects modifiability from the software engineering literature [42] motivates the module systems present in many languages, and more recent theories about how software architecture [50] influences software development motivated our design of the ArchJava language [1].
However, theoretical guarantees that pertain to optional language features will not be obtained if the features are misunderstood or not used. Furthermore, guarantees can be compromised by bugs in unverified tool implementations.
5.2 Methods for evaluation
Qualitative user studies have been used to evaluate many different kinds of tools, including programming languages [16, 40], APIs [37], and development environments [28]. Some of these consist of usability analyses, in which participants are given tasks to complete with a set of tools and the experimenter collects data regarding obstacles the participants encounter while performing the tasks. Unlike randomized controlled trials, these are usually not comparative; that analysis is left to a future study. Instead, they focus on learning as much as possible in a short amount of time in order to test feasibility of a particular approach and improve the tool for a future iteration of the design process.
Another qualitative approach involves participatory design [10, 39], in which participants are asked to help explore the design space and analyze tradeoffs. The assumption is that domain experts are likely to give feedback and suggestions that are of practical use in designing tools. Unlike usability studies, which evaluate existing prototypes, this approach is formative, intended to help inform the exploration of a design space.
Qualitative user studies can also be used to understand a problem that a language design is intended to solve, and help to guide other research methods used to evaluate the eventual solution. We studied programmers solving protocol-related programming problems that were gleaned from real StackOverflow questions in order to understand the barriers developers face when using stateful libraries [57]. The results of the study were useful in developing a language and its associated tools, and produced a set of tasks that were used in a later user experiment. Because of the qualitative user study, we knew these tasks were the most time-consuming component of real-world programming problems, mitigating the most significant external threat to the validity of the user experiment.
Qualitative user studies are usually limited to short-duration tasks with participants that researchers can find. In practice, this sometimes limits the sizes of the programs that the tasks concern because larger programs typically require more sophisticated participants and more participant time. Although a typical qualitative user study might only take an hour or two per participant, even a small real-world programming task might take a day or more.
Case studies show expressiveness: a solution to a particular programming problem can be expressed in the language in question. Many case studies aim to show concision, observing that the solution is expressible with a short program, particularly in comparison to the length of a typical solution in a comparison language. Case studies are particularly helpful when the language imposes restrictions that might cause a reader to wonder whether the restrictions prevent application of the language to real problems.
Case studies can also be used to learn about how a programming language design works in practice. For example, we used exploratory case studies on ArchJava, an extension of Java with software architecture constructs, to learn about the strengths and limitations of the language design and to generate hypotheses about how the approach might affect the software engineering process.
Case studies have limited external validity because they necessarily only consider a small set of use cases (perhaps just one). As a result, the conclusions are biased by the selection of the cases. Furthermore, the results may not generalize to typical users, since the case studies may be done by expert users of the system under evaluation.
Expert evaluation methods, such as Cognitive Dimensions of Notations [23] and Nielsen’s Heuristic Analysis [38], provide a vocabulary for discussing design tradeoffs. Although they do not definitively specify what decision to make in any particular case because each option may trade off with another, they provide a validated mechanism for identifying advantages and disadvantages of various approaches. This approach has been widely used in the visual languages community. However, expert evaluation requires access to experts and a validated and relevant set of criteria. The traditional criteria, such as Cognitive Dimensions of Notations, have not yet been validated against traditional textual languages by showing that their results are correlated with quantitative experiments.
Performance evaluation, typically via benchmarks, is well-accepted for comparing languages and tools. Performance evaluation can be critical if it is relevant to the claims made about a language, but many popular languages are not as fast as alternatives (consider Python vs. C), so it is important to decide how much performance is required. SIGPLAN released a checklist [6] for empirical evaluations of programming languages; although it is titled only “Empirical Evaluation Checklist,” it only describes performance evaluations. The checklist hints at limitations of this approach, such as mismatch between benchmark suite and real-world applications; an insufficient number of trials; and unrealistic input.
User experiments, also known as randomized controlled trials (RCTs) have been used to address a variety of programming language design questions, such as the benefits of C++ lambdas [61], static type systems [19], and typechecking [46]. In some ways, these represent the gold standard for summative evaluations. However, they do not always lead to insights that can be used to design or improve systems, and unless they are supplemented by theory (e.g., gleaned from qualitative studies), it can be difficult to be certain that results on a narrow problem studied in the laboratory will apply to a more complex real-world setting. For example, Uesbeck et al. discuss in what contexts their conclusions might apply [61], but not how one might improve C++ lambdas to retain possible advantages but mitigate identified shortcomings.
Formalism and proof are traditional tools for showing that a specific language design has particular properties, such as type soundness [43]. In many languages, a formal model provides key insight that inspires a new language design; in these cases, the formal analysis might be the first step in a language design. However, in other languages, a formalism serves primarily to provide a specification and a safety guarantee, in which case this work might be done much later.
A typechecker provides some safety guarantees once a program typechecks, but one must compare the difficulty of writing a type-correct program to the difficulty of obtaining safety some other way (for example, with runtime checks, at the cost of deferring verification to runtime) and to the option of not providing the guarantee at all. In some systems, safety guarantees are not necessary; for example, the consequences of a bug in a video game may be smaller than the consequences of a bug in avionics software.
Formal verification via tools such as Dafny [30] or Coq [7] can provide even stronger guarantees, likely at greater implementation cost. However, if the tools are too difficult to use, programmers may not obtain the guarantees because they may circumvent the tools (e.g., by implementing difficult procedures in a lower-level language) or because they may fail to complete their projects within their cost and time constraints.
In practice, there is typically a gap between what is actually specified in a formal specification of correctness and what is desired by the programmer. For example, a programmer may specify the correct output of a factorial function in a recursive way, implement the function iteratively to avoid overflowing the stack for large input, and leave the specification that the program shall not overflow the stack for input within the expressible range of machine-size integers unwritten.
6 Strategy
Language designers must be strategic in their selection and order of method application. We propose using approaches according to likely risk reduction: prefer techniques that are most likely to provide insight that most reduces the risk of failing to achieve the design goals. Languages that are inspired by the desire to investigate how a particular logical system corresponds with computation are likely to start with
a formal analysis. The design of a domain-specific language might start by using formative methods to understand the appropriate domain and its users. The domain-specific language design project may do the formal analysis last because although the designers want to show that their language is sound, they do not expect the results of the formal analysis to significantly impact the design of the language.
Research-oriented language designers may only use a small number of different methods in order to achieve the research objectives. Researchers are typically interested in languages that are novel along particular dimensions but standard along others; they should therefore use methods that target the novel aspects of their language rather than attempting to evaluate all aspects of their language designs. Application of various methods requires special expertise, and individual researchers will likely specialize in one or more methods. This offers an opportunity for collaboration to apply the appropriate methods to each language design. Production-oriented language designers may need a wider variety of methods because their designs must have high quality along many different axes. However, they may also be likely to make traditional choices in a wide variety of aspects of the design; as such, they may not be interested in design methods that address most of the aspects of the design. For example, designers of a new object-oriented language need not conduct evaluations on the usability of object-oriented programming, but they might conduct evaluations of their particular syntax and compiler error messages.
7 Examples
In this section, we show how combinations of the above methods have been helpful in two examples of programming language designs.
Typestate is a way of tracking the conceptual states of objects in a type system, ensuring that state-sensitive operations such as read on a File are not applied when the object is in an inappropriate state, such as closed [55]. Two of us were involved in a decade-long interdisciplinary research project that illustrates how different research methods complement one another in exploring language and type system support for typestate.2
We wanted to know how common it is in practice to have state protocols, so we carried out a code corpus study identifying and classifying classes that define protocols in Java library and application code [5]. Our study was a bit unusual for corpus studies: while we used tools to identify code that might define protocols, because the definition of protocols includes the notion of abstract states, we had to manually examine each candidate identified by the tool to verify that it really defined a protocol. We found that at least 7% of types
2We present the work in a logical order; the actual research was done in an order that reflected the interests of students as well as our group’s ongoing exploration of different research methods.
StackOverflow tasks. This example shows that a properly-designed pairing of experiment and formative study can be much more convincing than either study in isolation.
Glacier is an extension to Java that supports transitive class immutability [15, 16]. We started with the question “What kinds of immutability should a programming language support, and how should it support them?” We began with a literature review to understand existing approaches. We found a progression of increasingly complex research systems [11, 24, 25, 27, 59] supporting increasing numbers of kinds of immutability, but little evidence regarding which of these were actually needed in practice. However, immutability is a frequently-discussed topic in the software industry so it is an area where experts are like to have well-formed opinions. Therefore, we conducted interviews of professional software engineers to see what kinds of evidence we could gather regarding the utility of different kinds of immutability approaches. These interviews suggested, among other things, that developers would like immutability to help in developing concurrent systems. Language theory tells us that a transitive immutability system could be effective for this and other identified goals, an observation also supported by the interviews themselves. Interested in evaluating the effect of supporting transitive immutability, we built a prototype (informed by a formal model) and conducted a small qualitative study comparing an existing research tool, IGJ [64], with our prototype, IGJ-T. We noted that our participants had difficulty understanding the error messages in IGJ, which resulted in part from the wide variety of scenarios that IGJ was designed to support, such as both class and object immutability.
To improve our chances of obtaining a system that people could use effectively, we focused on a simpler system, which supported only transitive, class-based immutability. We hoped that this point in the design space would result in a simple, usable system that expressed constraints that were relevant to real programs. We evaluated this hypothesis in a randomized, controlled experiment. We assigned ten participants to use plain Java with final and ten others to use our extension, Glacier, on code writing tasks. We found that most of the participants who only had final accidentally modified state in immutable objects, resulting in bugs. We also asked participants to use their assigned tools to specify immutability in a small codebase. We found that most of the participants who had Glacier could specify immutability correctly; in contrast, every final user made mistakes when attempting to enforce immutability. Both of these results were statistically significant.
Finally, we conducted a case study applying Glacier to real-world systems. We were able to express the kinds of immutability used in a real Java spreadsheet implementation and in a Guava collections class (with one caveat for caching). On this basis, we argue that Glacier is likely to be applicable to a variety of real-world systems; its simplicity does not limit its utility to tasks in artificial lab studies. In contrast, we argue that its simplicity increases its value by providing usability so that programmers are able to use it effectively.
Glacier shows one example of how researchers can inform their research with qualitative methods, including interviews and qualitative lab studies, and then show benefit of their tools in a quantitative lab study afterward. We were able to show a successful quantitative result after significant iterations and qualitative human-centered evaluations, arguably because our design had been informed by other research methods.
8 Conclusions
Every design method has limitations. We argue for the application of many different methods in the programming language design process: theoretical methods as well as quantitative and qualitative user-oriented methods. These user-oriented methods have been shown to be useful in the process of creating and evaluating programming languages and programming language extensions.
An individual researcher or language designer may not be familiar with the entire breadth of methods we promote. Instead, we recommend collaborative efforts, where researchers work together to apply theoretical, formative, and summative techniques in order to prove relevant properties, explore fruitful portions of the design space, and show that their designs benefit users in specific, quantifiable ways.
Acknowledgments
This material is based upon work supported by NSF grant CNS-1734138, NSF grant CNS-1423054, by NASA labout contract H98230-14-C-0140, by the Software Engineering Institute, and by AFRL and DARPA under agreement #FA8750-16-2-0042. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of the sponsors.
References
Interdisciplinary PL Design
Symposia on Human Centric Computing Languages and Environments. 198–206. DOI: http://dx.doi.org/10.1109/HCC.2002.1046372
|
{"Source-Url": "http://materials.dagstuhl.de/files/18/18061/18061.MichaelCoblenz.Preprint.pdf", "len_cl100k_base": 8483, "olmocr-version": "0.1.53", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 35691, "total-output-tokens": 11221, "length": "2e13", "weborganizer": {"__label__adult": 0.0005078315734863281, "__label__art_design": 0.0004849433898925781, "__label__crime_law": 0.0002586841583251953, "__label__education_jobs": 0.0017223358154296875, "__label__entertainment": 6.383657455444336e-05, "__label__fashion_beauty": 0.00018012523651123047, "__label__finance_business": 0.0001951456069946289, "__label__food_dining": 0.0003845691680908203, "__label__games": 0.0004925727844238281, "__label__hardware": 0.0005488395690917969, "__label__health": 0.0004901885986328125, "__label__history": 0.00026154518127441406, "__label__home_hobbies": 8.255243301391602e-05, "__label__industrial": 0.00032448768615722656, "__label__literature": 0.0004324913024902344, "__label__politics": 0.00032138824462890625, "__label__religion": 0.0005803108215332031, "__label__science_tech": 0.00524139404296875, "__label__social_life": 0.00010734796524047852, "__label__software": 0.002597808837890625, "__label__software_dev": 0.9833984375, "__label__sports_fitness": 0.0003669261932373047, "__label__transportation": 0.0006089210510253906, "__label__travel": 0.0002288818359375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 53649, 0.02931]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 53649, 0.82276]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 53649, 0.92828]], "google_gemma-3-12b-it_contains_pii": [[0, 4081, false], [4081, 9687, null], [9687, 15532, null], [15532, 21602, null], [21602, 27437, null], [27437, 33362, null], [33362, 39277, null], [39277, 42234, null], [42234, 48210, null], [48210, 48210, null], [48210, 53649, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4081, true], [4081, 9687, null], [9687, 15532, null], [15532, 21602, null], [21602, 27437, null], [27437, 33362, null], [33362, 39277, null], [39277, 42234, null], [42234, 48210, null], [48210, 48210, null], [48210, 53649, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 53649, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 53649, null]], "pdf_page_numbers": [[0, 4081, 1], [4081, 9687, 2], [9687, 15532, 3], [15532, 21602, 4], [21602, 27437, 5], [27437, 33362, 6], [33362, 39277, 7], [39277, 42234, 8], [42234, 48210, 9], [48210, 48210, 10], [48210, 53649, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 53649, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
b7afaee5569f81b2b7a0a6f1f207949579eed58e
|
Abstract. Four new synchronisation primitives (SEMAPHOREs, RESOURCES, EVENTS and BUCKETS) were introduced in the KRoC 0.8beta release of occam for SPARC (SunOS/Solaris) and Alpha (OSF/1) UNIX workstations [1][2][3]. This paper reports on the rationale, application and implementation of two of these (SEMAPHOREs and EVENTS). Details on the other two may be found on the web [4].
The new primitives are designed to support higher-level mechanisms of SHARING between parallel processes and give us greater powers of expression. They will also let greater levels of concurrency be safely exploited from future parallel architectures, such as those providing (virtual) shared-memory. They demonstrate that occam is neutral in any debate between the merits of message-passing versus shared-memory parallelism, enabling applications to take advantage of whichever paradigm (or mixture of paradigms) is the most appropriate.
The new primitives could be (but are not) implemented in terms of traditional channels, but only at the expense of increased complexity and computational overhead. The primitives are immediately useful even for uni-processors – for example, the cost of a fair ALT can be reduced from $O(n)$ to $O(1)$. In fact, all the operations associated with new primitives have constant space and time complexities; and the constants are very low.
The KRoC release provides an Abstract Data Type interface to the primitives. However, direct use of such mechanisms still allows the user to misuse them. They must be used in the ways prescribed (in this paper and in [4]) else their semantics become unpredictable. No tool is provided to check correct usage at this level.
The intention is to bind those primitives found to be useful into higher level versions of occam. Some of the primitives (e.g. SEMAPHOREs) may never themselves be made visible in the language, but may be used to implement bindings of higher-level paradigms (such as SHARED channels and BLACKBOARDS). The compiler will perform the relevant usage checking on all new language bindings, closing the security loopholes opened by raw use of the primitives.
The paper closes by relating this work with the notions of virtual transputers, microcoded schedulers, object orientation and Java threads.
1 Channels are not enough
An occam channel is a primitive combining communication and synchronisation. As a synchronisation primitive, it applies to two processes at a time. Some applications require many processes to synchronise before any can continue – for example, the barrier synchronisations used by common shared-memory parallel algorithms.
Multi-way synchronisation is a fundamental idea in CSP [5], but is not implemented in occam. The computational arrangements for allowing any of the synchronising processes to back off (which CSP allows) is even more costly than allowing both parties to back off during channel synchronisation. However, just as allowing only the receiver to back
off an offer to communicate enabled an efficient channel implementation in occam, a similarly drastic rule – allowing no parties to back off a multi-way synchronisation – makes possible an efficient implementation of the CSP EVENT. Does such a restriction still leave a useful primitive? Just as for occam channels, the answer seems to be yes.
A different way of looking at channels is that they provide a peg on which to hang a blocked process. If we have lots of processes we wish to suspend for some common reason (e.g. they are waiting on a common event or for some shared resource, access to which is restricted by some rules), we either have to have lots of channels on which to hang them (and, later, organise their release) or we put them on a timer queue. Neither of these may be convenient or computationally light.
What are needed are different kinds of peg on which we may hang arbitrary numbers of processes ... plus the ability to retrieve them one at a time (SEMAPHOREs and RESOURCES) ... or all at once (EVENTs and BUCKETS) ... or some other way ...
2 Abstract Data Types
Each new primitive is presented as an Abstract Data Type. Each is implemented as an occam2.1 DATA TYPE, together with a set of operations defined through INLINED occam2.1 PROCs and FUNCTIONS.
Full source code is provided in the KRoC 0.9beta occam system. Each primitive is accessed through a separate #INCLUDE file. KRoC releases may be found on the Internet Parallel Computing Archive:
<URL:http://www.hensa.ac.uk/parallel/occam/projects/occam-for-all/kroc/>
<URL:ftp://unix.hensa.ac.uk/pub/parallel/occam/projects/occam-for-all/kroc/>
Although users have visibility of the data structures used for each primitive, advantage must not be taken of this visibility. Components of the data structures must not be accessed directly by user programs. Instances of the primitives may only be operated on by calling the PROCs and FUNCTIONS provided.
3 SEMAPHOREs
3.1 SEMAPHORE Abstract Data Type
These implement classic counting semaphores:
- **DATA TYPE SEMAPHORE**
Users may declare their own SEMAPHORE variables and pass them as reference parameters. One SEMAPHORE should be declared to control access to each shared resource (which could be a data or channel structure). SEMAPHOREs must not be duplicated by assignment or communication through channels.
- **PROC initialise.semaophore (SEMAPHORE s, VAL INT count)**
Each SEMAPHORE must be initialised with this routine before it is used. The count value is the number of processes allowed simultaneous access to the shared resource. For exclusive access, set this to 1.
• PROC claim.seaphore (SEMAPHORE s)
Before accessing the shared resource, a process must call this routine to claim the associated SEMAPHORE. If there are less than count (where count is the value with which the SEMAPHORE was initialised) processes using the shared resource, this process will be allowed through – i.e. the call will return immediately. Otherwise, the process will be blocked and put on the queue of processes associated with the SEMAPHORE.
• PROC release.seaphore (SEMAPHORE s)
When a process has finished with the shared resource, it must call this routine to register its release of the associated SEMAPHORE. If there are processes waiting to claim that SEMAPHORE, the first process on that queue is re-scheduled – i.e. allowed through to use the resource.
3.2 The normal pattern of use
So, the normal pattern of use is:
... thing declaration (where thing is to be SHARED by many processes)
#PRAGMA SHARED thing -- suppress parallel usage checking
SEMAPHORE thing.s:
#PRAGMA SHARED thing.s -- suppress parallel usage checking
SEQ
initialise.seaphore (thing.s, 1) -- for exclusive access (for example)
PAR
... process using thing
... another process using thing
... another process using thing
... another process using thing
... etc.
Within each `process using thing`, each use must be protected within a claim and release:
SEQ
claim.seaphore (thing.s)
... now use thing
release.seaphore (thing.s)
[Note: in the literature, claim is sometimes referred to as wait (or P) and release is sometimes called signal (or V).]
3.3 occam3 SHARED channels (via SEMAPHOREs)
The main motivation for implementing SEMAPHOREs is to support the occam3 SHARED channel [7]. This is a language construct to describe client-server applications, where multiple clients compete for exclusive access to a single server. In occam2, this has to be implemented through an array of channels (or channel-pairs for two-way interaction) over which the server performs a fair ALT. The problems with this are:
- the array of channels has to be declared and made visible to the server, which
means that the number of clients has to be known at the point where the server is
installed;
- the computational complexity of the server ALT is $O(n)$, where $n$ is the number of
clients. For not very large $n$, especially in a hard real-time application, this can
become prohibitive.
On the other hand, with the SEMAPHORE implementation of a SHARED channel:
- there is a fixed-sized space overhead (3 words), regardless of the number of clients;
- the computational complexity of setting up and closing down each client-server
transaction is $O(1)$ – i.e. independent of the number of clients and the same order
of magnitude as an ordinary context switch (sub-microsecond);
- the server does not know that the client end is SHARED – it sees an ordinary channel
(or channel-pair). This means that a server may ALT over a set of SHARED (or
ordinary) channels using normal mechanisms.
### 3.3.1 Client transactions over a SHARED channel
An occam3 SHARED channel (or channel-pair) connects any number of client processes
with a server process. To use the SHARED channel, the client process must first claim it:
```
CLAIM c
... use any of the channels within c
```
occam3 has a CHAN TYPE structure that allows us to group a collection of channels (each
with differing PROTOCOLs and directions of use) as fields in a record:
```
CHAN TYPE CONNECT -- CONNECT is the user-chosen name for this channel type
RECORD
CHAN OF REQUEST request:
CHAN OF REPLY reply:
;,
SHARED CONNECT c:
```
So, a typical transaction might look like:
```
CLAIM c
--{{ use any of the channels within c
SEQ
c[request] ! some.request
c[reply] ? some.reply
... follow-up questions and answers
}}
```
Note that any attempted use of $c$ outside a CLAIM body would be jumped on by the
compiler. occam3 also forbids any synchronisation attempts inside the CLAIM body other
than those involving $c$. In particular, a process is not allowed to accumulate resources
through nested CLAIMs (which eliminates the classic danger of deadlock through partially
acquired resources).
3.3.2 *SHARED channels in occam2.1 (client end)*
In *occam2.1*, the channel components need to be declared separately, together with a controlling semaphore:
CHAN OF REQUEST c.request:
#PRAGMA SHARED c.request -- suppress parallel usage checking
CHAN OF REPLY c.reply:
#PRAGMA SHARED c.reply -- suppress parallel usage checking
SEMAPHORE c.s:
#PRAGMA SHARED c.s -- suppress parallel usage checking
The client transaction becomes:
SEQ
claim.semaphore (c.s)
---{{ use any of the channels within c
SEQ
c.request ! some.request
c.reply ? some.reply
... follow-up questions and answers
}}
release.semaphore (c.s)
3.3.3 *Server transactions over a SHARED channel*
At the server end, *occam3* establishes the client connection with an explicit:
GRANT c
... use any of the channels within c
As for the clients, the server is not allowed to use c outside a GRANT body and any attempt would be disallowed by the compiler. However, servers are allowed to make further synchronisations (e.g. CLAIMs or other GRANTS) within a GRANT body.
In this example, a transaction matching the client CLAIM might be:
GRANT c
---{{ use any of the channels within c
... local declarations
SEQ
c[request] ? some.request
... compute the correct response
c[reply] ! some.reply
... follow-up questions and answers
}}
5
3.3.4 \textit{SHARED channels in occam2.1 (server end)}
The \textit{occam2.1} implementation for this \texttt{GRANT} is null. It simply maps to:
\begin{verbatim}
-{{
use any of the channels within c
...
local declarations
SEQ
c.request ? some.request
...
compute the correct response
c.reply ! some.reply
...
follow-up questions and answers
-}}
\end{verbatim}
Note that, provided each \texttt{CLAIM} opens with a communication to the server, \texttt{ALTT}ing between the \texttt{SHARED} channel and any other \texttt{ALT} guard (\texttt{SHARED} or not) is immediately possible by the server. If the transaction opens with a communication in the other direction, a dummy request will need to be added to allow the server to \texttt{ALT}.
Finally, some transaction bodies may contain no communications at all! For example, the server may be a \texttt{SHARED} signal-handler (where the signal is raised by a client simply making a \texttt{CLAIM} with a \texttt{SKIP} body). In this case again, a dummy request will need to be added to synchronise the client with the server.
3.4 \textit{Dining Philosophers (via SEMAPHOREs)}
3.4.1 The classical \textsl{occam} model
Sometimes, \texttt{SEMAPHORE}s can be used to represent objects in their own right. For example, the forks in Dijkstra's classic Dining Philosophers system are simply binary \texttt{SEMAPHORE}s shared by the two philosophers whose place settings are on either side of the fork. In classic \textsl{occam}, they are simply modelled by a fork process such as:
\begin{verbatim}
PROC fork (CHAN OF BOOL left, right)
WHILE TRUE
ALT
BOOL any:
left ? any
-- philosopher left picks up fork
left ? any
-- philosopher left puts down fork
BOOL any:
right ? any
-- philosopher right picks up fork
right ? any
-- philosopher right puts down fork
:
\end{verbatim}
Similarly, the security guard (or butler), who only allows into the dining room up to four philosophers at a time, is a counting semaphore initialised to four. In classic \textsl{occam}, this is modelled:
\begin{verbatim}
PROC security ([5]CHAN OF BOOL down, up)
VAL BYTE max IS 4:
INITIAL BYTE n.sat.down IS 0:
\end{verbatim}
WHILE TRUE
ALT i = 0 FOR 5
-- should be a 'fair' ALT
ALT
--{{ philosopher i wants to sit down
BOOL any:
(n.sat.down < max) & down[i] ? any
n.sat.down := n.sat.down + 1
}}
--{{ philosopher i wants to stand up
up[i] ? any
n.sat.down := n.sat.down - 1
}}
A philosopher interacts with two forks and the security guard:
PROC philosopher (CHAN OF BOOL left, right, forks
CHAN OF BOOL down, up) security guard
WHILE TRUE
SEQ
... think-a-while
--{{ get past the security guard
down ! TRUE
}}
--{{ pick up the forks
PAR
left ! TRUE
right ! TRUE
}}
... eat-a-while
--{{ put down the forks
PAR
left ! TRUE
right ! TRUE
}}
--{{ notify security you have finished
up ! TRUE
}}
The college consists of 5 philosophers, 5 forks and the security guard:
PROC college ()
[5]CHAN OF BOOL left, right, down, up:
PAR
security (down, up)
PAR i = 0 FOR 5
PAR
philosopher (left[i], right[i], down[i], up[i])
fork (left[i], right[(i + 1)\5])
3.4.2 The occam2.1 model using SEMAPHOREs
With real SEMAPHOREs, there is no need for the fork and security processes:
```
PROC college ()
[5]SEMAPHORE fork:
#PRAGMA SHARED fork -- suppress parallel usage checking
SEMAPHORE security:
#PRAGMA SHARED security -- suppress parallel usage checking
SEQ
SEQ i = 0 FOR 5
initialise.semaphore (fork[i], 1) -- exclusive use
initialise.semaphore (security, 4) -- allow four at a time
PAR i = 0 FOR 5
philosopher (fork[i], fork[(i + 1)\5], security)
: where the philosopher still interacts with two forks and the security guard:
PROC philosopher (SEMAPHORE left, right, -- forks
SEMAPHORE security) -- security guard
WHILE TRUE
SEQ
... think-a-while
--{{
get past the security guard
claim.semaphore (security)
--}}
--{{
pick up the forks
PAR
claim.semaphore (left) -- pick up left fork
claim.semaphore (right) -- pick up right fork
--}}
... eat-a-while
--{{
put down the forks
PAR
release.semaphore (left) -- put down left fork (no wait)
release.semaphore (right) -- put down right fork (no wait)
--}}
--{{
notify security you have finished
release.semaphore (security)
--}}
:
```
The SEMAPHORE implementations of the forks and security guard give us *fair* sharing. No philosopher can get locked out indefinitely by un-thinking neighbours racing back to the dining room and grabbing forks. The SEMAPHORE implementations don’t need programming; they just need initialising! They give us more functionality and execute far faster than the original processes. However, their use in the above has nothing to do with occam3 SHARED channels, so such application requires care.
One common problem solved by \texttt{SHARED} channels is multiplexing data streams to single devices. For example, when animating the behaviour of a network of processes (for diagnostic or demonstration purposes), we want to print information to some display or file. Writing, installing and wiring up the necessary multiplexors to route the information coming from all the processes under inspection can be daunting ... we can't just put in print statements!
Or, at least, that used to be the case!! By making, for example, the screen channel \texttt{SHARED}, we can \textit{just-put-in-print-statements} and we can do it within any number of parallel processes and have full control over the atomicity of any particular message.
The dining philosophers' college (from the previous section) will compile and run without deadlock, but is somewhat unexciting to watch -- all the action is internal and we can't see it. The following modification instruments the college with a single reporting channel that is \texttt{SHARED} by all the philosophers:
\begin{verbatim}
PROC college (CHAN OF BYTE screen)
#PRAGMA SHARED screen
SEMAPHORE screen.s:
#PRAGMA SHARED screen.s
[5]SEMAPHORE fork:
#PRAGMA SHARED fork
SEMAPHORE security:
#PRAGMA SHARED security
SEQ
SEQ i = 0 FOR 5
initialise.semaphore (fork[i], 1) -- exclusive use
initialise.semaphore (security, 4) -- allow four at a time
initialise.semaphore (screen.s, 1) -- exclusive use
PAR i = 0 FOR 5
philosopher (i, fork[i], fork[(i + 1)\5], security, screen, screen.s)
:
\end{verbatim}
Each philosopher can now report its state, at any time, to the screen channel (provided, of course, it remembers to claim and release the guarding \texttt{SEMAPHORE} correctly). A full version of this code, together with a much more exciting animation, may be found on:
\begin{verbatim}
<URL:http://www.hensa.ac.uk/parallel/occam/projects/occam-for-all/hlps/>
<URL:ftp://unix.hensa.ac.uk/pub/parallel/occam/projects/occam-for-all/hlps/>
\end{verbatim}
The animation was designed by a second year undergraduate (Nick Hollands) at Kent. The system contains some 52 processes (42 to 47 simultaneously active), with 25 driving the screen via a single \texttt{SHARED} channel.
4 EVENTS
4.1 Barrier synchronisation
4.1.1 SPMD barriers
Barrier synchronisation is a common primitive in many models of parallel computing – in some cases, it is an essential element. In SIMD parallelism, there is global synchronisation between all processors after every instruction. In the slightly more flexible SPMD model, there is still just one barrier on which all processors synchronise; however, the point at which synchronisation takes place is application dependent and has to be programmed explicitly (and, usually, cyclically).
For example, SPMD parallelism has the general form:
```plaintext
... shared global data
PAR i = 0 FOR n.processors
WHILE TRUE -- one identical serial process per processor
SEQ
... do something
SYNC -- barrier: wait for all processors to get here
```
occam already imposes an implicit barrier synchronisation at the end of each PAR construct. This can be exploited to obtain the above model by moving the external PAR inside the serial control structure:
```plaintext
... shared global data
WHILE TRUE
PAR i = 0 FOR n.processors
... do something
```
We now have a loop of parallel processes, each of which has to terminate, instead of a parallel set of loops that have to synchronise once per cycle. This would be disadvantageous if the start-up/shut-down overheads for parallel processes were large in comparison to their compute times, but this would not normally be the case for occam. A more serious problem arises if the processors use local state that has to survive the barrier:
```plaintext
... shared global data
PAR i = 0 FOR n.processors
INITIAL INT x IS 0: -- local state
WHILE TRUE
SEQ
... do something
SYNC
```
This is, of course, a very common requirement. Bringing the parallelism inside the loop forces the set of local states into the global data space:
which is not very pretty and threatens unnecessary run-time overhead! It also breaks
the natural object-oriented encapsulation of local state that o ccam processes normally
provide.
So, we need to introduce the explicit SYNC primitive to regain simplicity.
4.1.2 MIMD named barriers
However, o ccam is a MIMD parallel language and we don’t want to be constrained by
SPMD thinking. In particular, we want to obtain a structured and dynamic form of
barrier synchronisation. For example, we want to allow our system to be composed
of multiple sets of processes, each set with its own local barriers. We also want the
flexibility of allowing the number of processes synchronising on any particular barrier
to grow and shrink at run-time.
To achieve this, we need to be able to name barriers and associate them with particular
sets of processes. A named barrier is simply a CSP event and its association with a set
of processes is just its inclusion in their alphabets. Barrier synchronisation is event
synchronisation, but with the restriction that processes cannot use it as a guard in a
choice operator (i.e. an ALT guard in o ccam terms). There is no semantic problem in
allowing the number of processes interested in an event to change dynamically.
There is no pragmatic problem either for an extended o ccam:
\[
\text{\ldots shared global data}
\]
PAR i = 0 FOR n.processors EVENT e
\[
\text{INITIAL BOOL running IS TRUE:}
\]
WHILE running
\[
\text{SEQ}
\]
\[
\text{... do something}
\]
\[
\text{SYNC e} \quad \text{-- named barrier}
\]
The named EVENT is declared explicitly by the above PAR construct\(^1\). This is the
only place where EVENTS can be declared. The EVENT is automatically in the alphabet
of all components of the PAR (which means that when one component SYNCs on it, all
components have to SYNC on it).
Since the EVENT is named, it can be passed as a parameter to PROCs that are called
in the body of the declaring PAR. Among other benefits, this allows the separate compi-
lation of processes that are later instanced to synchronise on any EVENTS the installer
chooses.
\(^1\)Declaring items in constructors already takes place in o ccam – for example, the control value \(i\) in
this PAR. Some people say that channel declarations ought to be similarly bound to individual PARs.
Note that processes sharing the same EVENT may terminate at different times – as can happen in the above example. Terminated processes do not block the barrier SYNCs of those that are still running. An elegant application of this principle is given later.
Since this is occam, we are not restricted to the replicated PAR of SPMD. For, example, the following system has three different processes synchronising on the named barrier:
```plaintext
... shared global data
PAR EVENT e
... process A
... process B
... process C
```
We can also have different groups of processes synchronising on different barriers:
```plaintext
... shared global data
PAR
PAR EVENT e
... process A
... process B
... process C
PAR EVENT f
... process P
... process Q
... process R
```
### 4.1.3 Dynamic enrollment in barriers
Finally, an EVENT synchronising process may contain parallel sub-processes. Normal scoping rules imply that the sub-processes can see the EVENT and may, therefore, SYNC on it. A logical policy would be to say that the number of processes taking part in the barrier automatically grows for the duration of those sub-processes. However, it is more flexible to be able to specify which sub-processes include the existing EVENT in their alphabet (and are, therefore, obliged to SYNC if the barrier represented by the EVENT needs to be overcome during their lifetime). There are two ways to get this: introduce either a hiding operator or an enrolling operator into the PAR construct. There are arguments both ways but, for now, we prefer the positive approach:
```plaintext
... shared global data
PAR i = 0 FOR n.processors EVENT e
IF
need.more.parallelism (i)
PAR j = 0 FOR more ENROLL e
... inner processes can (and, probably, better had) SYNC on e
TRUE
INITIAL BOOL running IS TRUE: -- the original code
WHILE running
SEQ
... do something
SYNC e -- named barrier
```
Alternatively, components of inner PARs may be enrolled individually in an outer EVENT:
```plaintext
... shared global data
PAR \( i = 0 \) FOR n.processors EVENT e
IF need_more_parallelism (i)
PAR
ENROLL e
... process A (includes e in its alphabet)
ENROLL e
... process B (includes e in its alphabet)
... process C (does not include e in its alphabet)
TRUE
INITIAL BOOL running IS TRUE: -- the original code
WHILE running
SEQ
... do something
SYNC e -- named barrier
```
Processes A and B in the above have to partake in SYNC e, but process C does not and cannot! Explicit enrollment means that un-enrolled EVENTS are automatically hidden from sub-components of the PAR and that any attempt to SYNC on them would be rejected by the compiler.
Finally, we note the equivalence in Figure 1.
![ Diagram of event enrollment across an inner PAR ]
**4.2 EVENT Abstract Data Type**
The current KRoc release implements:
- **DATA TYPE EVENT**
Users may declare their own EVENT variables and pass them as reference parameters. They should only be declared in association with the PAR construct that sets up the processes that synchronise on them. EVENTs must not be duplicated by assignment or communication through channels.
<table>
<thead>
<tr>
<th>occam2.x</th>
<th>occam2.1</th>
</tr>
</thead>
<tbody>
<tr>
<td>... shared global data</td>
<td>... shared global data</td>
</tr>
<tr>
<td>PAR i = 0 FOR n.processors EVENT e</td>
<td>EVENT e:</td>
</tr>
<tr>
<td>INITIAL BOOL running IS TRUE:</td>
<td>#PRAGMA SHARED e</td>
</tr>
<tr>
<td>WHILE running</td>
<td>SEQ</td>
</tr>
<tr>
<td>SEQ</td>
<td>initialise.event (e, n.processors)</td>
</tr>
<tr>
<td>... do something</td>
<td>PAR i = 0 FOR n.processors</td>
</tr>
<tr>
<td>SYNC e -- named barrier</td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>INITIAL BOOL running IS TRUE:</td>
</tr>
<tr>
<td></td>
<td>WHILE running</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... do something</td>
</tr>
<tr>
<td></td>
<td>synchronise.event (e)</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
<tr>
<td>... shared global data</td>
<td>... shared global data</td>
</tr>
<tr>
<td>PAR EVENT e</td>
<td>EVENT e:</td>
</tr>
<tr>
<td>... process A</td>
<td>#PRAGMA SHARED e</td>
</tr>
<tr>
<td>... process B</td>
<td>SEQ</td>
</tr>
<tr>
<td>... process C</td>
<td>initialise.event (e, 3)</td>
</tr>
<tr>
<td></td>
<td>PAR</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... process A</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... process B</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... process C</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td>enroll.event (e)</td>
</tr>
<tr>
<td></td>
<td>enroll.event (e, 2)</td>
</tr>
<tr>
<td></td>
<td>PAR</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... process A</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... process B</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td>SEQ</td>
</tr>
<tr>
<td></td>
<td>... process C</td>
</tr>
<tr>
<td></td>
<td>resign.event (e)</td>
</tr>
<tr>
<td></td>
<td>enroll.event (e)</td>
</tr>
<tr>
<td></td>
<td>enroll.event (e, 1)</td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
<tr>
<td>PAR ENROLL e</td>
<td></td>
</tr>
<tr>
<td>... process A</td>
<td></td>
</tr>
<tr>
<td>... process B</td>
<td></td>
</tr>
<tr>
<td>... process C</td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
</tr>
<tr>
<td>Figure 2: Mappings to occam2.1 from SPMD, MIMD and dynamic barriers</td>
<td></td>
</tr>
</tbody>
</table>
- PROC initialise.event (EVENT e, VAL INT count)
Each EVENT must be initialised with this routine before starting the associated PAR construct. The count value is the number of processes in that PAR.
- PROC resign.event (EVENT e)
Each process in the associated PAR construct must execute this routine just before it terminates.
- PROC synchronise.event (EVENT e)
This may be called by any process in the associated PAR construct. The calling process will be blocked until all its sibling processes (in the PAR) have also called it or have resigned.
- PROC enroll.event (EVENT e, VAL INT count)
This needs to be called before and after nested PAR constructs, whose components are being enrolled on the EVENT. Before the PAR, the count value is one less than the number of components being enrolled. After the PAR, the count is one.
4.3 Implementation of proposed barriers
Barrier synchronisation is normally intended to support physical concurrency. The syntax and semantics with which we have been experimenting are aimed at (virtual) shared-memory multi-processors. The current implementation is only for uni-processors, where event synchronisation is still a powerful tool for the management of processes.
Figure 2 shows the occam 2.1 implementation of these barriers via the EVENT primitives. Examples are given for SPMD, MIMD and for the dynamic enrollment of new processes in an existing barrier.
However, there is one loose end that needs to be nailed down! The last enrolled sub-process to resign should not really do so (as this may complete an external barrier that is not warranted). The last resignation should be folded with the subsequent re-enrollment and nothing should happen. With this prototype implementation, we don’t know when resigning whether we are the last resignation. With an implementation via the run-time kernel, we will have this information and the loose end can be tied.
4.4 A simple example
The following is a complete KRoC occam 2.1 program demonstrating a simple SPMD network of processes synchronising on an EVENT barrier. Each process is cyclic, waiting for a varying amount of time before synchronising once per cycle. Each process is a client of the SHARED screen channel, which is protected by a SEMAPHORE. Each process announces to the screen when it tries to synchronise and when it succeeds in synchronising.
```plaintext
#INCLUDE "semaphore.inc"
#INCLUDE "event.inc"
#USE "utils" -- in the course directory of the KRoC release
```
PROC event.test (CHAN OF BYTE keyboard, screen, error)
#PRAGMA SHARED screen
SEMAPHORE screen.s:
#PRAGMA SHARED screen.s
PROC client (VAL INT id, n.clients, EVENT e,
SEMAPHORE out.s, CHAN OF BYTE out)
INT n:
SEQ
n := id
WHILE TRUE
SEQ
--{{ wait n seconds
VAL INT seconds IS 1000000:
TIMER tim:
INT t:
SEQ
tim ? t
tim ? AFTER t PLUS (n*seconds)
}}
--{{ say ready to synchronise
SEQ
claim.semaphore (out.s)
out.number (id, 0, out)
out.string (" ready to synchronise\n\n", 0, out)
release.semaphore (out.s)
}}
synchronise.event (e)
--{{ tell the world
SEQ
claim.semaphore (out.s)
out.string ("==\n\n", 40, out)
out.number (id, 0, out)
out.string (" over the barrier ...\n\n", 0, out)
release.semaphore (out.s)
}}
n := (n.clients + 1) - n -- simple variation for the timeout
VAL INT n.clients IS 10:
EVENT e:
#PRAGMA SHARED e
SEQ
initialise.semaphore (screen.s, 1)
initialise.event (e, n.clients)
PAR n = 0 FOR n.clients
client (n + 1, n.clients, e, screen.s, screen)
It is hard to resist (and we don’t) turning this into higher-level $occam2.x$:
```
#USE "utils"
PROC event.test (CHAN OF BYTE keyboard, SHARED CHAN OF BYTE screen, error)
PROC client (VAL INT id, n.client, EVENT e, SHARED CHAN OF BYTE out)
INT n:
SEQ
n := id
WHILE TRUE
SEQ
... wait n seconds
--{{ say ready to synchronise
CLAIM out
SEQ
out.number (id, 0, out)
out.string (" ready to synchronise*c*n", 0, out)
--}}
SYNC e
--{{ tell the world
CLAIM out
SEQ
out.string ("=>", 40, out)
out.number (id, 0, out)
out.string (" over the barrier ...c*n", 0, out)
--}}
n := (n.client + 1) - n -- simple variation for the timeout
VAL INT n.client IS 10:
PAR n = 0 FOR n.client EVENT e
client (n + 1, n.client, e, screen)
```
The point is not the modest reduction in code length but the absence of special $#PRAGMAs$, explicit $SEMAPHOREs$ and explicit $SEMAPHORE$ and $EVENT$ initialisation. It is these absences, the automatic initialisation (by the compiler) of all necessary primitives, the prevention (by the compiler) of any attempted $EVENT$ duplication via assignment or communication and the mandatory use of the $CLAIM$ mechanism for the $SHARED$ channel (by the compiler) that makes the high-level bindings secure and, hence, very desirable.
4.5 A shared accumulator (and implicitly parallel recursive lazy functional occam)
occam2 has a formal denotational semantics in terms of the traces, failures and divergences model of CSP \cite{8}\cite{9}. This can be extended, in a natural way, to cover all the language extensions discussed here.
Otherwise, the state-of-the-art in parallel languages is somewhat bleak. There are no formal semantics on offer for lightweight threads libraries (even though they are becoming standardised) nor for Java threads (where, at least, the concept is bound into the language). Indeed, it is very difficult to find even informal descriptions – their semantics are mainly given by example.
Reference \cite{10} describes ParC, a language extension for C giving explicit parallelism and synchronisation (with primitives not too far from those in the extended occam). In a section titled The Meaning of Parallelism, after analysing the issues for nearly one page, it reaches the following depressing conclusion:
“As a consequence, distinct execution of the same program may lead to different results, and even to different behaviours. For example, one execution may spawn many more activities than another, or one execution may terminate with a result while another enters an infinite loop. It is therefore impossible to specify the exact semantics of ParC programs. In the absence of formal semantics, we make do with a set of rules to guide the implementation of a ParC system.”
Such conclusions give no optimism for a sound engineering basis for parallel computing and raise fundamental questions as to its viability. Fortunately, the scientific insights of occam and CSP, along with those of BSP, are becoming available to a wider audience. Unfortunately, resistance to the concept of sound engineering is hard to overestimate in today’s mainstream computer culture.
Anyway, the example in this section is taken from \cite{10} but expressed now in terms of occam. The problem is to add up the elements of an array in $O(\log n)$ time.
4.5.1 Natural barrier solution
The first solution is expressed in only modestly upgraded occam, making use of the natural barriers marking the termination of PAR constructs:
```
PROC sum ([]INT A) -- with implicit barrier synchronisation
-- assume : (SIZE A) = (2**N), for some N >= 0
-- spec : A'[0] = SIGMA {A[i] | i = 0 FOR SIZE A}
INITIAL INT n IS SIZE A: -- INVARIANT:
INITIAL INT stride IS 1: -- (n*stride) = (SIZE A)
WHILE n > 1
SEQ
n, stride := n >> 1, stride << 1
PAR i = 0 STEP stride FOR n
END
```
18
The modest extensions are the INITIALising declarations (of occam3 and used previously), the STEP in the replicator (very convenient for numeric algorithms and straightforward to implement) and the variable number of replications in the PAR construct (no semantic but a serious implementation problem, although a unified virtual shared-memory address space simplifies matters considerably).
The algorithm is simple. In the first loop, a process is spawned for all the even elements in the array (stride is 2 and n is half the array size); this process adds into its element the value of its odd (senior) neighbour. In the second loop, a process is spawned for every fourth element (stride = 4) that accumulates the contents of their neighbour two (i.e. half-a-stride) above them; every fourth element now holds the sum of all the elements within its stride. This continues for log(n) cycles until stride reaches the size of the array (and n drops to 1); after which A[0] holds the complete sum and the loop terminates.
A slight drawback is that the array size must be a power of two. Other sizes could be handled but the simplicity of the code would be damaged.
Parallel security is easy to establish. Each parallel process updates element A[i], where each i is different (so no race-hazard there). The process updating A[i] uses the value in an element half-a-stride away. But no other process is looking at these half-stride values since they are all separated by a stride (so no race-hazard there). QED. Getting such proofs checked mechanically (e.g. by the compiler) looks possible and will ultimately be necessary.
Of course, the fine granularity of the parallelism in the above example would scuttle any hoped-for performance gain from current parallel architectures, although future designs (such as the ParaPC [11][12]) could lap it up. In the meantime, the example serves as a model for combining operations that are computationally more intensive than addition and that current architectures may be able to exploit.
4.5.2 Explicit barrier solution
The second solution commutes the PAR and the WHILE constructs, catering for those who fear the costs of starting up and shutting down processes. The result is a conventional SPMD algorithm with explicit barrier synchronisation, which occam can now comfortably express:
```
PROC sum ([] INT A) -- with explicit barrier synchronisation
-- assume : (SIZE A) = (2**N), for some N >= 0
-- spec : A'[0] = SIGMA {A[i] | i = 0 FOR SIZE A}
PAR i = 0 FOR SIZE A EVENT e
INT accumulate IS A[i]:
INITIAL INT n IS (i = 0) -> SIZE A, i:
INITIAL INT stride IS 1:
WHILE (n \ 1) = 0 -- even (n)
SEQ
accumulate := accumulate + A[i + stride]
n, stride := n >> 1, stride << 1
SYNC e
```
19
We have sneaked conditional expressions into the language (since they simplify one of the initialising declarations above). The syntax used is just that already implemented for the occam configuration language in the SGS-Thomson Toolset:
\[
\text{boolean-expression} \rightarrow \text{expression, expression}
\]
This simply yields the first or second expression, depending on the value of the boolean-expression. Both expressions must, of course, yield the same type. Note that occam2 already can express the above:
\[
[\text{expression, expression}] \text{[INT boolean-expression]}
\]
although it is not quite so understandable! The order of the expressions must be reversed since FALSE and TRUE map (under INT) to 0 and 1 respectively. Also, the current implementation will evaluate both expressions at run-time before discarding the unselected one (unless boolean-expression is constant).
The conditional expression is a distraction ... forget them! Please compare the new version of sum with the old. This time processes are set up once for each element of the array (abbreviated locally to accumulate). The odd processes immediately terminate. That leaves the even processes adding their odd (senior) neighbours to themselves, exactly as before. At the end of each loop, the active processes synchronise on the barrier and half of them drop out. Recall that that does not prevent the remaining processes synchronising on their next loop. Events continue until there is only one process left, which accumulates the final answer into A[0] and terminates. There are now no processes left and the outer PAR construct terminates and the PROC returns.
Security against race-hazards is somewhat harder to prove than before. An elegant way to establish this would be to find some semantic-preserving transformations (that can be mechanised) to change the first version into the second. This is left as an exercise for the reader.
One optimising transformation on the second version that is too tempting to resist is as follows. Since the odd processes terminate without ever doing anything, don’t set them up in the first place! This is achieved simply by changing the PAR constructor:
```
PROC sum ([]INT A) -- with explicit barrier synchronisation
... same specification
PAR i = 0 STEP 2 FOR (SIZE A) >> 1 EVENT e
... same replicated process (but only half as many of them!)
```
4.5.3 Recursive solution with only local synchronisations
Finally, for those who find barrier synchronisation a little unnatural, here is a taste of some much wilder ideas. The following code also sums its array in \(O(\log n)\) time but:
- is (first order) functional, relying on the compiler and run-time system to extract the parallelism that is always implicit in occam expressions (which are free from side-effects and can, therefore, be executed in any order or concurrently);
• is recursive – however, implementation techniques that enable variable PAR replication also enable recursion;
• has no global synchronisations, only local synchronisations implied by (add) operators requiring the (two) processes computing their operands to terminate before they can operate;
• is efficient in that the (parallel) execution tree for small array fragments has been preset by standard loop unrolling – however, the implementation of table lookup at run-time needs to be made lazy for the way we have chosen to express the unravelled loop to work sensibly;
• has automatic parallel security, derived from the semantics of occam expressions;
• handles arrays of any size – not just powers of two.
It’s also pretty neat:
```occam
INT FUNCTION sum (VAL []INT A) IS
-- spec : returns SIGMA {A[i] | i = 0 FOR SIZE A}
(SIZE A) <= 8) ->
[A[0],
A[0] + A[1],
[(SIZE A) - 1],
sum ([A FOR (SIZE A) >> 1]) + sum ([A FROM (SIZE A) >> 1 ]):
```
Now, all we need is a ParaPC on which to run it.
5 Implementation overview and platform independence
The new primitives have been introduced as abstract data types and with no changes to the KRoc kernel. This means they will automatically run on any KRoc system.
The routines operating on the primitives are programmed as INLINE PROCs and use transputer ASM blocks. This means that, with certain restrictions, they will also run on real transputers (using standard occam Toolsets).
5.1 SEMAPHOREs
A SEMAPHORE is an occam2.1 RECORD with three fields: one holds a count and the others hold front and back pointers to a process queue. Processes are held on this queue using the same workspace link fields that hold them on the run-queue (a process can never be on a SEMAPHORE-queue and the run-queue at the same time). This means that no space needs to be reserved to manage this queue (other than the front and back pointers).
A process claiming a SEMAPHORE will be put on its queue (and blocked) if its count is zero. Otherwise the count is decremented.
A process releasing a SEMAPHORE will re-schedule the first process from its queue if that queue is not null. Otherwise, it increments the count.
Both these operations work in constant time.
5.1.1 Transputer restrictions
Scheduling of processes on and off the run-queue is managed using the normal transputer scheduling instructions – run-queue registers are not modified directly. This means they will be secure on a transputer even in the presence of high-priority process pre-emption (caused by transputer link, event or timer interrupts).
However, manipulation of the SEMAPHORE queues themselves (or their counters) can be corrupted by process pre-emption. No danger arises if all processes sharing the same SEMAPHORE also share the same transputer priority. Otherwise, the low-priority processes must protect their claims and releases by first popping into high-priority – for example:
\[\text{PRI PAR} \]
\[\text{claim.semaphore (s)} \]
\[\text{SKIP} \]
T9000 and ST20-derived transputers have extended instruction sets that include SEMAPHORE operations providing claim.semaphore and release.semaphore semantics (called signal and wait). If asked, we can provide a version of the SEMAPHORE abstract data type that exploits them (and the above work-around will not be necessary).
5.2 EVENTs
An EVENT is an occam2.1 RECORD with four fields: two integer counts (for the number of processes registered and the number that have not yet synchronised) and the front and back pointers to a process queue.
A process synchronising on an EVENT decrements its synchronisation count. If this has not reached zero, it attaches itself to the EVENT-queue and blocks. Otherwise, it releases all the processes on the EVENT-queue (by simply concatenating it on to the run-queue) and resets the synchronisation count back to the number currently registered. This is a constant time operation.
A process resigning an EVENT decrements both its registration count and its synchronisation count. If the latter has reached zero, it releases all the processes on the EVENT-queue (just like a synchronising process) and resets the synchronisation count. Again, this is a constant time operation.
A process enrolling on an EVENT just increments the registration and synchronisation counts equally. No rescheduling of processes takes place.
5.2.1 Transputer restrictions
Transputers have no atomic (non-preemptable) instructions for concatenating a process queue on to a run-queue. Therefore, to implement EVENT operations, we have been updating run-queue registers through a sequence of instructions. Transputer link, event and timer interrupts may preempt this sequence and try to schedule processes on to the same run-queue – with bad consequences!
Therefore, our EVENTs may be used safely on transputers provided:
- only low-priority processes use EVENTs and they protect synchronise.event and resign.event operations by popping into high-priority;
- only high-priority processes handle transputer link, event or timer interrupts. [NB: the term transputer event refers to the electronic assertion of its event pin by some external device, which has nothing to do with the EVENT primitive in this document.]
These are not severe restrictions – the second point above should properly be a design rule in any case. Of course, if the application had no need for interrupt handling (which implies that it is uni-processor), the first point can be ignored (unless, of course, the EVENT is shared between high and low priority processes).
6 Discussion
6.1 Summary
This document has described some new synchronisation primitives for occam and some higher-level language bindings that make them secure. The new primitives are directly usable within occam2.1 through the abstract data types released from KRoC 0.8beta onwards. The primitives complement, but do not replace, the traditional concept of channel communication for networks of synchronising processes. In particular, they provide an implementation for SHARED channels (as proposed for occam3), as well as a range of higher-level and relaxed forms of safely SHARED resource that will allow more parallelism to be extracted from applications. Details of these higher-level mechanisms for sharing will be reported separately.
6.2 Performance and optimisation
The released primitives have not yet been benchmarked, but we believe that none of the operations cost more in time than about two or three context-switches (i.e. between one and two micro-seconds on a SPARC-20). For portability, the prototypes have not been burnt into the KRoC kernel, but have been implemented with transputer instructions (which KRoC uses as an abstract intermediate code). It is straightforward to move the implementation of the primitives into the KRoC kernel and this will be done later for those that prove useful. This should reduce the overheads for each operation towards a single context-switch. It will also nail down the loose end currently exposing a minor security problem in EVENTs (when the number of processes engaged in a barrier synchronisation grows temporarily).
6.3 Virtual transputers
Working with virtual transputers has its benefits over working with real ones: modern micro-processors mean they run faster and we can experiment with new instructions with relative ease. For example, to move the primitives into the kernel, the abstract (or virtual) transputer machine used by KRoC will have to extended with new instructions for the manipulation of process queues. We want to do this anyway for other reasons – such as a full implementation of PRI PAR (that allows any number of prioritised components).
When KRoC goes multi-priority and multi-processor, we want no constraints on our use of the primitives (such as are necessary with the existing microcode on real transputers). With control over the virtual architecture, this should be possible (and inexpensive) to arrange.
The long-term (medium-term?) architectural goal, for which the new primitives would give major benefit, is shared-memory (real or virtual) multi-processing. To support this further, we are investigating a richer form of EVENT that implements the contradictory-sounding non-blocking barrier synchronisation, recently proposed for MPI-2. This is a two-phase event synchronisation, where the first phase registers that the process is ready to synchronise (but doesn’t block) and the second phase does block (unless and until all the other processes in the barrier have registered their first phase). With the right algorithm, processes may be able to sail through most such barriers without ever blocking!
We also want to extend the virtual instruction set to provide type information that will allow KRoC to target Java Byte Code. Additionally, this type information makes possible a Transputer Byte Code verifier that enables the distribution of occam processes as compact binaries (oeclets) with the same (or better) object-level security as Java. Note that KRoC translates these Byte Codes to target object code for execution – it doesn’t interpret them at run-time. In that sense, it already provides just-in-time compilation (but we do realise there is a little bit more involved than that).
6.4 Microcode, methods and objects
The algorithms underlying the new synchronisation primitives correspond to new micro-code for the virtual transputer. Just like the real micro-code that implements channel synchronisation on real transputers, great care has to be taken in its design – these algorithms are significantly harder to get right than algorithms of a similar size at the ocam application level.
The reason for this is that algorithms within ocam processes are naturally object-oriented – in the literal sense of the term. By this, we mean that they directly express the behaviour of objects from their own point of view – not as a set of procedure calls that are made by (and, therefore, oriented towards) external agents. This is achieved through implementing objects as active processes that run concurrently with other objects, each with their own thread (or threads) of control. Of course, this is something for which ocam was specifically designed.
Most object-oriented programming languages allow the encapsulation of object data and algorithms, but only provide for the expression of those algorithms through a set of passive methods (which are no different to procedures). Objects interact by calling
each other’s methods and, so far as algorithm design is concerned, there is no paradigm shift from traditional procedural programming.
The problem is that expressing the behaviour of an object through a set of externally-called methods is unnatural (it’s literally not object-oriented!), and it gets especially hard in a multi-threaded (or multi-processing) environment. The reason is that the semantics of concurrently operated methods (even the synchronized methods of Java) do not compose. This means that in order to design/understand two methods of some object, we have to design/understand both at the same time. Of course, this gets worse the more methods an object contains and puts a limit on the complexity of system that can be designed in this way.
Unfortunately, we cannot build a system purely from active objects! An active object cannot directly interfere with another active object (this would break the principle of data abstraction and introduce all manner of semantic chaos) and it cannot directly call on it (because active objects are active and don’t provide passive facilities). So, we need some passive medium through which they can interact.
Fortunately, we only need a small variety of passive objects to construct this medium and these can be hidden in micro-code and/or burnt into a high-level language. Then, the system engineer only needs to work with active (truly object-oriented) objects, whose semantics do compose and put no limit on the complexity of the design.
Hence, we have occam and the virtual transputer, where the necessary (but hard to program) passive objects are its Channels, Semaphores, Resources, Events and Buckets and, hopefully, not too many more!
The most complex in this list is the Resource [4]. The non-compositional nature of the semantics of its implementation bites as we are obliged to think simultaneously about its claim and release algorithms — they cannot be understood individually. The same is true for all the other primitives, including the original occam Channel whose input and output methods (especially in the context of ALTs) are so elegant, but are completely interdependent. It is an immense relief we don’t have to understand this, or program like this, at the occam level.
6.5 Java threads and occam
These ideas were examined in the context of Java at the Java Threads Workshop, which took place at the University of Kent last September (1996). Java allows both passive and active objects at the user-program level, with passive being the default mechanism everyone learns first. Java threads are not based upon CSP, but on the earlier concept of monitors. Thread synchronisation can only be achieved through calling synchronized methods. These methods belong to passive objects and have to be programmed — which is hard.
The workshop has stimulated the development (by at least three research groups) of CSP class libraries that package a range of passive hard-to-program objects (like channels, shared channels, buffers, shared buffers, events and buckets). Multi-threaded Java applications can now be developed that interact with each other in the occam/CSP style and use only active easy-to-program objects. Details from this workshop can be found on:
<URL:http://www.hensa.ac.uk/parallel/groups/wotug/java/>
<URL:ftp://unix.hensa.ac.uk/pub/parallel/groups/wotug/java/>
6.6 If only ...
These ideas could have been introduced at low cost over ten years ago – they do not require any special hardware technology. They remain vital today because they enhance any multi-processing (or multi-threaded) technology that doesn’t have them – and that seems to include most everything. Now if only we had had them since 1985, ...
References
|
{"Source-Url": "https://kar.kent.ac.uk/21515/1/Higher_Levels_of_Process_Synchronisation.pdf", "len_cl100k_base": 13116, "olmocr-version": "0.1.53", "pdf-total-pages": 27, "total-fallback-pages": 0, "total-input-tokens": 65699, "total-output-tokens": 15153, "length": "2e13", "weborganizer": {"__label__adult": 0.0003070831298828125, "__label__art_design": 0.00028586387634277344, "__label__crime_law": 0.0002467632293701172, "__label__education_jobs": 0.0003979206085205078, "__label__entertainment": 6.395578384399414e-05, "__label__fashion_beauty": 0.00012230873107910156, "__label__finance_business": 0.00018870830535888672, "__label__food_dining": 0.00029277801513671875, "__label__games": 0.0005831718444824219, "__label__hardware": 0.0014133453369140625, "__label__health": 0.0003614425659179687, "__label__history": 0.0002675056457519531, "__label__home_hobbies": 9.41753387451172e-05, "__label__industrial": 0.0004296302795410156, "__label__literature": 0.00025582313537597656, "__label__politics": 0.00023365020751953125, "__label__religion": 0.0005326271057128906, "__label__science_tech": 0.027740478515625, "__label__social_life": 6.586313247680664e-05, "__label__software": 0.0060577392578125, "__label__software_dev": 0.958984375, "__label__sports_fitness": 0.0003032684326171875, "__label__transportation": 0.0006031990051269531, "__label__travel": 0.0002191066741943359}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 64047, 0.01594]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 64047, 0.26125]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 64047, 0.88777]], "google_gemma-3-12b-it_contains_pii": [[0, 184, false], [184, 3155, null], [3155, 5778, null], [5778, 7804, null], [7804, 9973, null], [9973, 11292, null], [11292, 13499, null], [13499, 14550, null], [14550, 16371, null], [16371, 18645, null], [18645, 20498, null], [20498, 22813, null], [22813, 24768, null], [24768, 26042, null], [26042, 33671, null], [33671, 36177, null], [36177, 37593, null], [37593, 39214, null], [39214, 41832, null], [41832, 44617, null], [44617, 47496, null], [47496, 49248, null], [49248, 52150, null], [52150, 54933, null], [54933, 58277, null], [58277, 61639, null], [61639, 64047, null]], "google_gemma-3-12b-it_is_public_document": [[0, 184, true], [184, 3155, null], [3155, 5778, null], [5778, 7804, null], [7804, 9973, null], [9973, 11292, null], [11292, 13499, null], [13499, 14550, null], [14550, 16371, null], [16371, 18645, null], [18645, 20498, null], [20498, 22813, null], [22813, 24768, null], [24768, 26042, null], [26042, 33671, null], [33671, 36177, null], [36177, 37593, null], [37593, 39214, null], [39214, 41832, null], [41832, 44617, null], [44617, 47496, null], [47496, 49248, null], [49248, 52150, null], [52150, 54933, null], [54933, 58277, null], [58277, 61639, null], [61639, 64047, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 64047, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 64047, null]], "pdf_page_numbers": [[0, 184, 1], [184, 3155, 2], [3155, 5778, 3], [5778, 7804, 4], [7804, 9973, 5], [9973, 11292, 6], [11292, 13499, 7], [13499, 14550, 8], [14550, 16371, 9], [16371, 18645, 10], [18645, 20498, 11], [20498, 22813, 12], [22813, 24768, 13], [24768, 26042, 14], [26042, 33671, 15], [33671, 36177, 16], [36177, 37593, 17], [37593, 39214, 18], [39214, 41832, 19], [41832, 44617, 20], [44617, 47496, 21], [47496, 49248, 22], [49248, 52150, 23], [52150, 54933, 24], [54933, 58277, 25], [58277, 61639, 26], [61639, 64047, 27]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 64047, 0.07123]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
b0c53c3369189d50eb1dc86b3447232968bee972
|
Ruby master - Feature #6284
Add composition for procs
04/12/2012 03:21 PM - pabloh (Pablo Herrero)
Status: Closed
Priority: Normal
Assignee: nobu (Nobuyoshi Nakada)
Target version:
Description
It would be nice to be able to compose procs like functions in functional programming languages:
to_camel = :capitalize.to_proc
add_header = ->val {"Title: " + val}
format_as_title = add_header << to_camel << :strip
instead of:
format_as_title = lambda {|val| "Title: " + val.strip.capitalize }
It's pretty easy to implement in pure ruby:
class Proc
def << block
proc { |*args| self.call( block.to_proc.call(*args) ) }
end
end
Related issues:
Related to Ruby master - Feature #13600: yield_self should be chainable/compo... Rejected
Associated revisions
Revision a43e967b - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
proc.c: Implement Proc#* for Proc composition
* proc.c (proc-compose): Implement Proc#* for Proc composition, enabling composition of Procs and Methods. [Feature #6284]
* test/ruby/test_proc.rb: Add test cases for Proc composition.
From: Paul Mucur mudge@mudge.name
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65911 b2dd03c8-39d4-4d8f-98ff-823fe69b080e
Revision 65911 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
proc.c: Implement Proc#* for Proc composition
* proc.c (proc-compose): Implement Proc#* for Proc composition, enabling composition of Procs and Methods. [Feature #6284]
* test/ruby/test_proc.rb: Add test cases for Proc composition.
From: Paul Mucur mudge@mudge.name
Revision 65911 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
proc.c: Implement Proc#* for Proc composition
* proc.c (proc-compose): Implement Proc#* for Proc composition, enabling composition of Procs and Methods. [Feature #6284]
* test/ruby/test_proc.rb: Add test cases for Proc composition.
Revision 3b7b7065 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
proc.c: Support any callable when composing Procs
- proc.c (proc-compose): support any object with a call method rather than supporting only procs. [Feature #6284]
- proc.c (compose): use the function call on the given object rather than rb_proc_call_with_block in order to support any object.
- test/ruby/test_proc.rb: Add test cases for composing Procs with callable objects.
- test/ruby/test_method.rb: Add test cases for composing Methods with callable objects.
From: Paul Mucur paul@altmetric.com
Revision 65913 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
proc.c: Support any callable when composing Procs
- proc.c (proc-compose): support any object with a call method rather than supporting only procs. [Feature #6284]
- proc.c (compose): use the function call on the given object rather than rb_proc_call_with_block in order to support any object.
- test/ruby/test_proc.rb: Add test cases for composing Procs with callable objects.
- test/ruby/test_method.rb: Add test cases for composing Methods with callable objects.
From: Paul Mucur paul@altmetric.com
Revision 65913 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
proc.c: Support any callable when composing Procs
- proc.c (proc-compose): support any object with a call method rather than supporting only procs. [Feature #6284]
- proc.c (compose): use the function call on the given object rather than rb_proc_call_with_block in order to support any object.
- test/ruby/test_proc.rb: Add test cases for composing Procs with callable objects.
- test/ruby/test_method.rb: Add test cases for composing Methods with callable objects.
From: Paul Mucur paul@altmetric.com
Revision c71cc2db - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
Proc#<< and Proc#>>
[Feature #6284]
git-svn-id: svn+ssh://ci.ruby-lang.org/ruby/trunk@65914 b2dd03c8-39d4-4d8f-98f1-823fe69b080e
Revision 65914 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
Proc#<< and Proc#>>
[Feature #6284]
Proc<< and Proc>>
[Feature #6284]
History
#1 - 04/12/2012 11:25 PM - trans (Thomas Sawyer)
Or
format_as_title = ->{ |val| add_header[to_camel[val.strip]] }
#2 - 04/12/2012 11:28 PM - trans (Thomas Sawyer)
Also, I think #+ is better.
#3 - 04/13/2012 02:02 AM - pabloh (Pablo Herrero)
trans (Thomas Sawyer) wrote:
Also, I think #+ is better.
I saw facets has some similar feature that uses #* instead, maybe because it looks a bit closer to Haskell's composition syntax. Nevertheless, I still like #<< better, it feels your are "connecting" the blocks together.
#4 - 04/13/2012 06:26 AM - aprescott (Adam Prescott)
I would vote for #*. I think #<< is usually changing the left argument (in place).
#5 - 04/13/2012 06:53 AM - aprescott (Adam Prescott)
See also: http://web.archive.org/web/20101228224741/http://drmcawesome.com/FunctionCompositionInRuby
#6 - 04/13/2012 07:53 AM - mame (Yusuke Endoh)
- Status changed from Open to Assigned
- Assignee set to matz (Yukihiro Matsumoto)
#7 - 04/13/2012 08:59 AM - pabloh (Pablo Herrero)
aprescott (Adam Prescott) wrote:
See also: http://web.archive.org/web/20101228224741/http://drmcawesome.com/FunctionCompositionInRuby
Maybe #| could be a possibility. (Without implementing #> or #<).
But I find the article's proposition about the chaining order a bit misleading:
transform = add1 | sub3 | negate
For me that feels more like "piping" add1 to sub3 to negate, from left to right, not the other way around.
If we choose to take that path I think the following code would be a plausible implementation:
class Proc
def | block
proc { |*args| block.to_proc.call( self.call(*args) ) }
end
end
class Symbol
def | block
self.to_proc | block
end
end
#8 - 04/13/2012 02:56 PM - aprescott (Adam Prescott)
What about #* for composing traditionally (right to left) and #| for piping (left to right)? In mathematics, depending of the area and the subject, both ways are used, and some argue that "piping" is more natural than "precomposing". However, when functions are "piped", the arguments are usually on the left: (arguments)(function1 function2).
Update: i think having the both was a bad idea, it would be redundant.
#9 - 04/15/2012 05:10 PM - trans (Thomas Sawyer)
I agree, #* is appropriate for composition.
#10 - 04/16/2012 11:59 AM - pabloh (Pablo Herrero)
alexeymuranov (Alexey Muranov) wrote:
Update: i think having the both was a bad idea, it would be redundant.
I was going to say the same thing. Having both #* and #| is redundant and also a bit confusing, since #| doesn't really feel to be the opposite operation of #* at any context. We should choose one or the other but not both.
I still like #| (chaining from left to right) a bit better, but I rather have #* than nothing.
#11 - 10/27/2012 10:44 AM - matz (Yukihiro Matsumoto)
- Status changed from Assigned to Feedback
Positive about adding function composition. But we need method name consensus before adding it?
Is #* OK for everyone?
Matz.
#12 - 11/09/2012 06:56 PM - jballanc (Joshua Ballanco)
Might I humbly suggest #<-:
to_camel = :capitalize.to_proc
add_header = ->val {"Title: " + val}
format_as_title = add_header <- to_camel <- :strip
Seems to have a nice symmetry with #->
#13 - 11/09/2012 08:10 PM - rosenfeld (Rodrigo Rosenfeld Rosas)
I think "<-" reads better but I'm ok with "" as well.
#14 - 11/09/2012 08:23 PM - rohitarondekar (Rohit Arondekar)
I'm with Joshua, I think #<- reads a lot better.
#15 - 11/10/2012 01:25 AM - alexeymuranov (Alexey Muranov)
I think that the meaning of #<- would not be symmetric with the meaning of #->.
Also, in mathematics, arrows are more like relations than operations. When used to describe functions, usually function arguments go to the arrow's tail, function values to the arrow's head, and function's name, for example, goes on top of the arrow.
(In this sense Ruby's lambda syntax would look to me more natural in the form f = (a,b)->{ a + b } instead of f = ->(a,b){ a + b }.)
The main drawback of #* in my opinion is that does not specify the direction of composition (f**(g(x)) is f(g(x)) or g(f(x)))?, but since in Ruby function arguments are written on the right (f(g(x))), i think it can be assumed that the inner function is on the right and the outer is on the left.
Update : Just for reference, here is how it is done in Haskell : http://www.haskell.org/haskellwiki/Function_composition
#16 - 11/10/2012 02:42 AM - marcandre (Marc-Andre Lafortune)
+1 for #*
The symbol used in mathematics for function composition is a circle (∘); the arrows are for the definitions of functions (like lambdas) only, so #<- or whatever make no sense to me.
Finally, the f ∘ g(x) is defined as f(g(x)), so there is no argument there either.
#17 - 11/10/2012 12:06 PM - duerst (Martin Dürst)
marcandre (Marc-Andre Lafortune) wrote:
+1 for #*
The symbol used in mathematics for function composition is a circle (∘); the arrows are for the definitions of functions (like lambdas) only, so #<- or whatever make no sense to me.
Very good point.
Finally, the $f \circ g(x)$ is defined as $f(g(x))$, so there is no argument there either.
Not true. Depending on which field of mathematics you look at, either $(f \circ g)(x)$ is either $f(g(x))$, or it is $g(f(x))$. The later is in particular true in work involving relations, see e.g. [http://en.wikipedia.org/wiki/Composition_of_relations#Definition](http://en.wikipedia.org/wiki/Composition_of_relations#Definition).
Speaking from a more programming-related viewpoint, $f(g(x))$ is what is used e.g. in Haskell, and probably in many other functional languages, and so may be familiar with many programmers.
However, we should take into account that a functional language writes e.g. `reverse(sort(array))`, so it makes sense to define `reversort = reverse * sort` (i.e. $(f \circ g)(x)$ is $f(g(x))$). But in Ruby, it would be `array.sort.reverse`, so `reversort = sort * reverse` may feel much more natural (i.e. $(f \circ g)(x)$ is $g(f(x))$).
---
**#18 - 11/10/2012 01:23 PM - phluid61 (Matthew Kerwin)**
I agree that $(f \circ g)(x)$ is $g(f(x))$ is more intuitive from a purely programmatic point of view. It is "natural" for the operations to be applied left to right, exactly like method chaining.
Matthew Kerwin, B.Sc (CompSci) (Hons)
http://matthew.kerwin.net.au/
ABN: 59-013-727-651
"You'll never find a programming language that frees you from the burden of clarifying your ideas." - xkcd
**#19 - 11/10/2012 06:23 PM - alexeymuranov (Alexey Muranov)**
phluid61 (Matthew Kerwin) wrote:
I agree that $(f \circ g)(x)$ is $g(f(x))$ is more intuitive from a purely programmatic point of view. It is "natural" for the operations to be applied left to right, exactly like method chaining.
When functions are applied from left to right, the argument is usually (if not always) on the left. The form $(x)(fg)=((x)f)g$ may look awkward (though i personally used it in a math paper), so i think usually the "exponential" notation is preferred: $x^y = (x')^y$ where $x'$ corresponds to $f(x)$ in the usual notation.
With method chaining, IMO, the "main argument" of a method is the receiver, and it is on the left. Lambdas and Procs are not chained in the same way as method calls.
Update: I agree that the common syntax for calling functions $(f(x)$ rather then $(x)f)$ should not be an obstacle if Ruby decides to consistently multiply functions putting the inner on the left and the outer on the right. Another syntax for calling functions can be invented in the future, or rubists can learn to live with this inconsistency. For example, Ruby (or Matz) can decide to multiply lambdas with the inner on the left and the outer on the right, and add the following syntax:
```ruby
format_as_title = :strip.to_proc * :capitalize.to_proc * lambda { |val| "Title: " + val }
title = " over here " ^ format_as_title # instead of `title = format_as_title.call(" over here ")`
```
Update 2012-11-11: I was not clear what i meant by "multiplying from left to right". I meant to say: putting the inner function on the left and the outer on the right. I am correcting this phrase.
**#20 - 11/10/2012 09:33 PM - rosenfeld (Rodrigo Rosenfeld Rosas)**
In Math multiplication is always associative, even for matrix. I.e: $(A*B)*C == A*(B*C)$. If we use $*$ for $(\circ)$ (composition) it resembles multiplication. Function composition is analog to matrix multiplication which are commonly used for transformation compositions as well. In fact, function composition is also associative.
So, when representing $h = f \circ g$ as $h = f \ast g$ it makes sense to me (although Math preferring a different symbol for multiplication and composition is a good indication that we should consider this as well for Ruby - more on that later on). But Math representation is procedural, not object oriented. If we try to mix both approaches to fit Ruby philosophy this could lead to great confusion.
Ruby can be also used for procedural programming:
```ruby
sqrt = ->(n) { Math.sqrt n }
# Although I agree that (n)->{} would read more natural to me, just like in CoffeeScript
square_sum = ->(a, b) { a*a + b*b }
hypotenuse = sqrt * square_sum
5 == hypotenuse.call 3, 4 # equivalent to: sqrt.call square_sum.call 3, 4
```
This makes total sense to me using procedural notation. I'm not sure how would someone use this using some OO notation instead...
Now with regards to composition notation, I think a different notation could help those reading some code and trying to understand it. Suppose this method:
```ruby
def bad_name(bad_argument_name, b)
bad_argument_name * b # or bad_argument_name << b
end
```
You can't know beforehand if bad_argument_name is an array, a number or a proc/lambda. If we read this instead:
```ruby
def bad_name(bad_argument_name, b)
bad_argument_name <- b
end
```
we would then have a clear indication that bad_argument_name is probably a proc/lambda. I know the same argument could be used to differentiate `<<` between strings and arrays among other cases. But I think that function composition is conceptually much different from those other operations (concatenation, multiplication) than concatenation (`<<`) is for strings and arrays. In both cases we are concatenating but concatenation means different things for strings and arrays in non surprising ways.
But then using this arrow notation I would expect that `(a <- b)` would mean "a before b" `(b(a(...)))` while `(a b)` means "a after b" `(a(b(...)))`.
I find it a bit awful to use "hypotenuse = square_sum <- sqrt", although it is the way OO usually work (``square_num.sqrt - pseudo-code of course). But we would not be using "[4, 5].hypotenuse", but "hypotenuse.call 4, 5", right? So, since we're using procedural notation for procs/lambdas we should be thinking of procedural programming when deciding which operator to use.
I would really prefer to have lambda syntax as "double = <-{|n| n * 2}" and function composition as "hypotenuse = sqrt -> square_sum" (sqrt after square_sum). But since I don't believe the lambda syntax won't ever change, let's try to see this over a different perspective.
Instead of reading `(a <- b)` as "a before b", I'll try to think of it as being "b applied to a" `(a(b(...)))`. This also make sense to me so I can easily get used to this. It would work the same way as "*" but there would be a clear indication that this refers to function composition rather than some generic multiplication algorithm.
Having said that, I'd like to confirm that I’m ok with either "*" or "<-" and I'd really like to have function composition as part of Ruby.
#21 - 11/24/2012 10:31 AM - mame (Yusuke Endoh)
- Target version changed from 2.0.0 to 2.6
#22 - 12/02/2012 04:53 AM - rits (First Last)
proc composition is not commutative, so the operator should:
1. not imply commutativity
2. not conceal the order of application
i.e. the operator should be visually asymmetrical with clear directionality
E.g. `<<, <<<, <->`
```
a << b << c = a(b(c(x)))
```
perhaps it also makes sense to have the other direction: `c >> b >> a = a(b(c(x)))`
#23 - 12/05/2012 12:13 PM - Anonymous
+1 to #.
+1 to rosenfeld's first 2 paragraphs (h = f ↘ g as h = f * g, and matrix multiplication analogy).
+1 to "<-". Rationale: It is too easy invent a guitar with one more string. Furthermore, when it comes to operators, I consider design by jury a weak approach.
#24 - 12/05/2012 07:47 PM - rosenfeld (Rodrigo Rosenfeld Rosas)
I play a 7-string guitar and I can tell you that the extra string greatly improves our possibilities and it is pretty common in Samba and Choro Brazilian music styles:
http://www.youtube.com/watch?v=3mTdpRY6yMI
http://www.youtube.com/watch?v=NDXcVr1Pks (here we not only have a 7-string guitar but also a 10-string bandolim while the usual one has 8 strings)
I'm not against "." I just slightly prefer ":-" over ":".
#25 - 12/05/2012 08:23 PM - alexeymuranov (Alexey Muranov)
rits (First Last) wrote:
```
proc composition is not commutative, so the operator should:
```
03/20/2022
**#26 - 06/14/2015 04:55 PM - mudge (Paul Mucur)**
- File 0001-proc.c-Implement-Proc-for-Proc-composition.patch added
- File 0002-proc.c-Implement-Method-for-Method-composition.patch added
Attached patches for Proc* and Method* for Proc and Method composition including test cases. Also raised as a pull request on GitHub at [https://github.com/ruby/ruby/pull/935](https://github.com/ruby/ruby/pull/935)
One thing that might be worth discussing is the necessity of the type checking: should it be possible to compose any callable object (rather than explicitly Procs and Methods)? The Ruby implementation suggested in the description of this issue suggests calling to_proc on the given argument but we could also demand that the supplied argument responds to call, e.g. in pure Ruby:
```ruby
class Proc
def *(g)
proc { |*args, &blk| call(g.call(*args, &blk)) }
end
end
```
vs.
```ruby
class Proc
def *(g)
proc { |*args, &blk| call(g.to_proc.call(*args, &blk)) }
end
end
```
**#27 - 06/23/2015 02:47 PM - mudge (Paul Mucur)**
- File 0003-proc.c-Support-any-callable-when-composing-Procs.patch added
Attached patch to support composing with any object that responds to call (rather than raising a TypeError if the object was not a Proc or Method), e.g.
```ruby
class Foo
def call(x, y)
x + y
end
end
f = proc { |x| x * 2 }
g = f * Foo.new
g.call(1, 2) #=> 6
```
**#28 - 06/29/2015 08:39 AM - mudge (Paul Mucur)**
Regarding the syntax: I also support * as the operator where f * g = f(g(x)) (as it seems close enough to the mathematical syntax already used by other languages such as Haskell and Idris) but if that is too divisive, we could choose a method name from the mathematical definition ([https://en.wikipedia.org/wiki/Function_composition](https://en.wikipedia.org/wiki/Function_composition)) instead:
The notation g ∘ f is read as "g circle f", or "g round f", or "g composed with f", "g after f", "g following f", or "g of f", or "g on f".
This opens up the following options:
- Proc#compose: f.compose(g) #=> f(g(x))
- Proc#after: f.after(g) #=> f(g(x))
- Proc#following: f.following(g) #=> f(g(x))
- Proc#of: f.of(g) #=> f(g(x))
- Proc#on: f.on(g) #=> f(g(x))
**#29 - 06/29/2015 09:32 PM - pabloh (Pablo Herrero)**
It would be nice to be able to compose functions in both ways, like in F#, you can do g << f or g >> f, sadly this was rejected before.
I would settle to have Proc# for "regular" composition and Proc# for "piping". Last time there was no consensus about the syntax. Hopefully we can manage to solve this before 2.3 is released.
I'm teaching Haskell in a graduate class, so I'm quite familiar with function composition and use it a lot, but the original example isn't convincing at all. For me, in Ruby, something like `val.strip.capitalize` reads much, much better than some artificially forced function composition. If there were a method `String#prepend`, it would be even more natural: `val.strip.capitalize.prepend('Title: ')`.
If there are better examples that feel more natural in Ruby, please post them here.
There is `String#prepend`, but it mutates the receiver.
I don't believe you need a pure PF language to benefit from a feature like this. Many ETL projects like transproc ([https://github.com/solnic/transproc](https://github.com/solnic/transproc)) would probably find it useful too.
Transproc is what actually inspired me to submit a patch here: my hope is that having functional composition in the Ruby language itself will enable easier data pipelining using only Procs, Methods and other objects implementing call. The presence of `curry` ([http://ruby-doc.org/core-2.2.2/Proc.html#method-i-curry](http://ruby-doc.org/core-2.2.2/Proc.html#method-i-curry)) seems like a good precedent for adding such functional primitives to the core.
I support the proposed `Proc#*/Method#*` syntax and semantics.
The feature being added is function composition; not relation composition, not method chaining. Its target audience is most likely to read \( f \circ g \) as "\( f \) after \( g \)\), so that's how it should work. Perhaps some Ruby programmers will not use this feature directly (as with `Proc#curry`) because they neither program nor think in a functional style, but it should be designed to be useful and familiar to those who do. The proposed implementation achieves that.
The asterisk isn't ideal, but it's the best choice available.
I fully agree with you that a good example really helps to understand the problem which lead to a better solution.
The few times I've missed function composition is ruby is always when I wanted to replace the same pattern:
```ruby
res = step1(base)
res = step2(res)
res = step3(res)
```
I hate having to mutate the `res` variable and I would hate even more creating different temp variables especially when I do not know how many steps there will be. I would like to be able to combine the steps then apply the step combination to `base`
There are a lot of examples for this pattern so here is one: data retrieval with ActiveRecord (I assume most people know this library but this is only an example)
```ruby
messages = all_messages
messages = restrict_to_owner(messages, owner)
filters.each do |filter|
messages = filter.apply(messages)
end
messages = messages.order(created_at: :asc)
messages = paginate(messages)
```
#36 - 12/30/2015 11:15 AM - mudge (Paul Mucur)
- File deleted (0002-proc.c-Implement-Method-for-Method-composition.patch)
#37 - 12/30/2015 11:15 AM - mudge (Paul Mucur)
- File deleted (0001-proc.c-Implement-Proc-for-Proc-composition.patch)
#38 - 12/30/2015 11:15 AM - mudge (Paul Mucur)
- File deleted (0003-proc.c-Support-any-callable-when-composing-Procs.patch)
#39 - 12/30/2015 11:18 AM - mudge (Paul Mucur)
- File proc-compose.patch added
With the recent addition of Hash#to_proc and performance improvements to Procs in 2.3.0, I have rebased my patch (attached) to add composition between Procs, Methods and other objects responding to call with " in the hope of reviving this proposal.
c.f. [https://github.com/ruby/ruby/pull/935](https://github.com/ruby/ruby/pull/935)
#40 - 12/30/2015 12:28 PM - mudge (Paul Mucur)
- File 0001-proc.c-Implement-Proc-for-Proc-composition.patch added
- File 0002-proc.c-Implement-Method-for-Method-composition.patch added
- File 0003-proc.c-Support-any-callable-when-composing-Procs.patch added
#41 - 12/30/2015 12:28 PM - mudge (Paul Mucur)
- File deleted (proc-compose.patch)
#42 - 10/09/2016 02:30 PM - why-capslock-though (Alexander Moore-Niemi)
I wrote a gem with a C extension of Proc#compose: [https://github.com/mooreniemi/proc_compose#usage](https://github.com/mooreniemi/proc_compose#usage)
What motivated me was map f (map g xs) = map (f . g) xs, and what I still don’t understand (being a newbie to extending Ruby or understanding its internals) is that .map(&some_proc).map(&some_other_proc) still behaves better than .map(&(some_proc * some_other_proc)) given my current implementation of compose. Although I think composition has a lot of uses, the admittedly small but free performance benefit I expected to gain was top of my list.
I do think emphasis on composition suggests a somewhat different style of writing Ruby, but I think it can be a good one, actually.
Paul: what’s the performance of your compose? If I have time later I can use [https://github.com/mooreniemi/graph-function](https://github.com/mooreniemi/graph-function) to try and see.
#43 - 10/11/2016 08:18 PM - mudge (Paul Mucur)
Alexander Moore-Niemi wrote:
Paul: what’s the performance of your compose? If I have time later I can use [https://github.com/mooreniemi/graph-function](https://github.com/mooreniemi/graph-function) to try and see.
I ran the following benchmark with benchmark-ips:
```ruby
require 'benchmark/ips'
double = proc { |x| x * 2 }
quadruple = proc { |x| x * 4 }
octuple = double * quadruple
```
inline_octuple = proc { |x| x * 2 * 4 }
nested_octuple = proc { |x| quadruple.call(double.call(x)) }
numbers = [1, 2, 3, 4, 5]
Benchmark.ips do |x|
x.report('composing procs') do
numbers.map(&octuple)
end
x.report('chaining procs') do
numbers.map(&double).map(&quadruple)
end
x.report('single proc') do
numbers.map(&inline_octuple)
end
x.report('nested proc') do
numbers.map(&nested_octuple)
end
x.compare!
end
And also see a performance drop with composition over chaining multiple maps:
Warming up --------------------------------------
composing procs 27.822k i/100ms
chaining procs 32.096k i/100ms
single proc 49.021k i/100ms
nested proc 27.337k i/100ms
Calculating -------------------------------------
composing procs 341.874k (± 0.5%) i/s - 1.725M in 5.045764s
chaining procs 389.031k (± 0.7%) i/s - 1.958M in 5.032912s
single proc 666.544k (± 0.6%) i/s - 3.333M in 5.001266s
nested proc 321.919k (± 0.8%) i/s - 1.613M in 5.010562s
Comparison:
single proc: 666543.8 i/s
chaining procs: 389031.4 i/s - 1.71x slower
composing procs: 341873.8 i/s - 1.95x slower
nested proc: 321919.1 i/s - 2.07x slower
It might be interesting to look at object allocations as we effectively create a nested Proc which might account for the slow down.
#44 - 10/12/2016 08:28 AM - mudge (Paul Mucur)
Yukihiro Matsumoto wrote:
Positive about adding function composition. But we need method name consensus before adding it?
Is #* OK for everyone?
Aside from implementation details, is the lack of consensus on the method name (and the resulting behaviour re left vs right operand) the main blocker here?
#45 - 10/12/2016 09:46 AM - duerst (Martin Dürst)
Paul Mucur wrote:
Yukihiro Matsumoto wrote:
Positive about adding function composition. But we need method name consensus before adding it?
Is #* OK for everyone?
Aside from implementation details, is the lack of consensus on the method name (and the resulting behaviour re left vs right operand) the main blocker here?
If Matz says so, then yes, this is the main blocker.
#46 - 01/22/2017 06:50 PM - thorstenhirsch (Thorsten Hirsch)
f.y.i: Composing procs is way faster now. It beats all other algorithms in Paul’s benchmark on ruby 2.3.3:
<p>| | | | |</p>
<table>
<thead>
<tr>
<th></th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>Warming up</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>composing procs</td>
<td>135.483k i/100ms</td>
<td></td>
<td></td>
</tr>
<tr>
<td>chaining procs</td>
<td>55.595k i/100ms</td>
<td></td>
<td></td>
</tr>
<tr>
<td>single proc</td>
<td>84.448k i/100ms</td>
<td></td>
<td></td>
</tr>
<tr>
<td>nested proc</td>
<td>48.095k i/100ms</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Calculating</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>composing procs</td>
<td>2.363M (± 6.2%) i/s - 11.787M in 5.011859s</td>
<td></td>
<td></td>
</tr>
<tr>
<td>chaining procs</td>
<td>701.936k (± 3.4%) i/s - 3.558M in 5.074972s</td>
<td></td>
<td></td>
</tr>
<tr>
<td>single proc</td>
<td>1.207M (± 3.6%) i/s - 6.080M in 5.044891s</td>
<td></td>
<td></td>
</tr>
<tr>
<td>nested proc</td>
<td>579.005k (± 2.9%) i/s - 2.934M in 5.071310s</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Comparison:
- composing procs: 2362658.0 i/s
- single proc: 1206768.2 i/s - 1.96x slower
- chaining procs: 701936.2 i/s - 3.37x slower
- nested proc: 579005.4 i/s - 4.08x slower
Tested on linux and macOS (same result).
#47 - 03/13/2017 08:13 AM - matz (Yukihiro Matsumoto)
I want to make sure if everyone agrees with "*" instead of OP's "<<".
Besides that I also wanted to mention that Koichi concerns that function composition may be far slower than method chaining.
Matz.
#48 - 04/28/2017 07:47 PM - RichOrElse (Ritchie Buitre)
matz (Yukihiro Matsumoto) wrote:
I want to make sure if everyone agrees with "*" instead of OP's "<<".
Besides that I also wanted to mention that Koichi concerns that function composition may be far slower than method chaining.
Matz.
+1 for #*
Initially I thought of the F# convention #<< and it's counter part #>> as intuitive. But after giving it some thought, and practice, I prefer #* and #+.
class Proc
def +(other) # forward compose operation
other.to_proc * self
end
def *(other) # backward compose operation
arg { self.call other.(arg) } # with only one argument for performance reason.
end
f = -> n { n * 2 }
g = -> n { n * 4 }
h = f * g # equivalent to (g + f)
puts h.(5) #=> 40
puts (h + :odd?).call(3) #=> false
Instead of composition I see potential use for shovel operations #<< and #>> for piping purposes similar to Elixir's #>.
class Object
def >>((transform) # piping operation
transform.(self)
end
class Integer
def >>((num_or_func)
return self << -num_or_func unless num_or_func.respond_to? :call
super
end
class Proc
```ruby
def <<(input) # feed value
call input
end
end
add_header = -> val {"Title: #{val}" }
format_as_title = add_header + :capitalize + :strip
puts 'Title goes here.' >> format_as_title #=> Title: title goes here.
puts format_as_title << 'Title goes there.' #=> Title: title goes there.
puts 100 >> format_as_title #=> Title: 100
puts 100 >> 2 #=> 25
```
#49 - 05/27/2017 01:35 AM - shyouhei (Shyouhei Urabe)
- Related to Feature #13600: yield_self should be chainable/composable added
#50 - 08/31/2017 06:26 AM - yuroyoro (TOMOHITO Ozaki)
Most languages do not define function composition in built-in operators, but provide them as function or method such as compose.
In some few languages defined function composition operator as following.
haskell :
F# : << and >>
Groovy : << and >>
I think Ruby should provide compose method (and and_then as forward composition), and alias some operator (like #<< or `#`) to them.
In my opinion, +1 for << and >> instead of `.`.
Because there is no language define function composition as `*` and `+`, but F# and groovy defined as << and >>. It is intuitive.
By the way, It is useful if Symbol#<< (or Symbol#+) is shortcut method to sym.to_proc.compose(other_proc).
arr.map(&:upcase.to_proc.compose(:to_s.to_proc))
arr.map(&:to_s >> :upcase)
It is more visually and intuitive.
My reference implemention of composition is following.
https://github.com/yuroyoro/ruby/pull/7
#51 - 01/29/2018 07:49 AM - zverok (Victor Shepelev)
Please take this to next Developers Meeting Agenda?
The best possible name is discussed for 6 years already, and feature seems pretty nice to have. I believe that sometimes it is just good to make "executive decision" about name once and for all, at least better than postpone feature for years.
(I still unhappy with yield_self name, though :))
#52 - 04/20/2018 06:34 PM - baweaver (Brandon Weaver)
yuroyoro (TOMOHITO Ozaki) wrote:
Most languages do not define function composition in built-in operators, but provide them as function or method such as compose.
In some few languages defined function composition operator as following.
haskell :
F# : << and >>
Groovy : << and >>
I think Ruby should provide compose method (and and_then as forward composition), and alias some operator (like #<< or `#`) to them.
In my opinion, +1 for << and >> instead of `.`.
Because there is no language define function composition as `*` and `+`, but F# and groovy defined as << and >>. It is intuitive.
By the way, It is useful if Symbol#<< (or Symbol#+) is shortcut method to sym.to_proc.compose(other_proc).
It is more visually and intuitive.
My reference implemenation of composition is following.
https://github.com/yuroyoro/ruby/pull/7
I do like the idea of using shovel as it's already fairly common in the language, and present in others. I'd be interested in potentially making a container variant though:
```ruby
arr.map(&:to_s >> :upcase)
# becomes
arr.map(&[:to_s, :upcase])
```
Though that would involve introducing the idea of Array#to_proc. I've used a similar technique in a few gems with #[](), so a vague implementation may look like:
```ruby
class Array
def to_proc
self[1..].reduce(self[0].to_proc, :compose)
end
end
```
Wherein it'd be nice if #compose tried to coerce its argument:
```ruby
def compose(sym)
fn = sym.is_a?(Proc) ? sym : sym.to_proc
...
end
```
Currently planning on releasing a gem this weekend that will do a similar thing, it could serve as inspiration for an implementation, but my C is no where near good enough to try at the Ruby core level.
Aside: Have we ever considered implicitly to_procing anything passed to a method expecting a block? e.g. `arr.map(&:to_s)` becomes `arr.map(to_s)`
#53 - 04/30/2018 06:51 PM - baweeaver (Brandon Weaver)
I just realized that infix operators evaluate before to_proc, allowing something like this:
```ruby
[1,2,3].map(&:succ.to_proc >> :to_str.to_proc)
```
Codified it into a gem for kicks: https://github.com/baweeaver/mf
#54 - 05/17/2018 05:41 AM - nobu (Nobuyoshi Nakada)
- Description updated
#55 - 05/17/2018 05:56 AM - matz (Yukihiro Matsumoto)
- Status changed from Feedback to Open
Considering the combination of OOP and FP, it seems a good idea to adding both forward and reverse combination of procs. So we pick Groovy way (adding << and >> methods to Proc).
We need more discussion if we would add combination methods to the Symbol class.
Matz.
#56 - 07/30/2018 08:13 AM - printercu (Max Melentiev)
matz (Yukihiro Matsumoto) wrote:
Considering the combination of OOP and FP, it seems a good idea to adding both forward and reverse combination of procs. So we pick Groovy way (adding << and >> methods to Proc).
What do you think about selecting only single operation? Here is Groovy example from provided link:
```ruby
final times2 = { it * 2 }
final plus1 = { it + 1 }
```
It looks like << is less intuitive and readable. I'm also really afraid of having chance to read something like a >> b << c >> d.
#57 - 08/08/2018 05:41 AM - ko1 (Koichi Sasada)
- Assignee changed from matz (Yukihiro Matsumoto) to nobu (Nobuyoshi Nakada)
#58 - 08/08/2018 06:37 PM - shan (Shannon Skipper)
matz (Yukihiro Matsumoto) wrote:
We need more discussion if we would add combination methods to the Symbol class.
Matz.
A little sugar on Symbol does look really nice:
https://gist.github.com/havenwood/d305b42f5b542a9de1eaa8e56ba6bd47#file-compose_procs-rb-L32-L45
#59 - 08/10/2018 07:58 PM - shan (Shannon Skipper)
I used this proposal along with #14781 for a fun solution to a problem proposed in the #ruby irc channel:
https://gist.github.com/havenwood/b9d6555c128f563750c91bbee339a8922
I like these features a lot and love that you can just implement them in the meanwhile. <3 Ruby!
#60 - 11/12/2018 12:43 PM - nobu (Nobuyoshi Nakada)
I've forgotten to post the patch to use << and >>.
#61 - 11/14/2018 05:33 PM - pabloh (Pablo Herrero)
nobu (Nobuyoshi Nakada) wrote:
I've forgotten to post the patch to use << and >>.
Is adding composition methods to the Symbol class still being considered?
#62 - 11/15/2018 02:22 AM - nobu (Nobuyoshi Nakada)
pabloh (Pablo Herrero) wrote:
Is adding composition methods to the Symbol class still being considered?
No, I don't think that we consider a Symbol object a method.
#63 - 11/22/2018 05:51 AM - nobu (Nobuyoshi Nakada)
- Status changed from Open to Closed
Applied in changeset trunk:65911.
proc.c: Implement Proc* for Proc composition
- proc.c (proc-compose): Implement Proc* for Proc composition, enabling composition of Procs and Methods. [Feature #6284]
- test/ruby/test_proc.rb: Add test cases for Proc composition.
From: Paul Mucur mudge@mudge.name
Files
---
0001-proc.c-Implement-Proc-for-Proc-composition.patch 3.65 KB 12/30/2015 mudge (Paul Mucur)
|
{"Source-Url": "https://bugs.ruby-lang.org/issues/6284.pdf", "len_cl100k_base": 10441, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 37812, "total-output-tokens": 12247, "length": "2e13", "weborganizer": {"__label__adult": 0.0003767013549804687, "__label__art_design": 0.00032448768615722656, "__label__crime_law": 0.0002231597900390625, "__label__education_jobs": 0.0004243850708007813, "__label__entertainment": 6.520748138427734e-05, "__label__fashion_beauty": 0.00011593103408813477, "__label__finance_business": 0.00012695789337158203, "__label__food_dining": 0.0003614425659179687, "__label__games": 0.0004138946533203125, "__label__hardware": 0.00032329559326171875, "__label__health": 0.0002340078353881836, "__label__history": 0.00013053417205810547, "__label__home_hobbies": 6.264448165893555e-05, "__label__industrial": 0.000209808349609375, "__label__literature": 0.00018286705017089844, "__label__politics": 0.0001995563507080078, "__label__religion": 0.00034427642822265625, "__label__science_tech": 0.0012083053588867188, "__label__social_life": 9.60230827331543e-05, "__label__software": 0.003734588623046875, "__label__software_dev": 0.990234375, "__label__sports_fitness": 0.00022530555725097656, "__label__transportation": 0.0002474784851074219, "__label__travel": 0.0001571178436279297}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 37046, 0.05704]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 37046, 0.25893]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 37046, 0.82966]], "google_gemma-3-12b-it_contains_pii": [[0, 1832, false], [1832, 3837, null], [3837, 5965, null], [5965, 8883, null], [8883, 13252, null], [13252, 16930, null], [16930, 19781, null], [19781, 21904, null], [21904, 25100, null], [25100, 27232, null], [27232, 29901, null], [29901, 32603, null], [32603, 34892, null], [34892, 37046, null], [37046, 37046, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1832, true], [1832, 3837, null], [3837, 5965, null], [5965, 8883, null], [8883, 13252, null], [13252, 16930, null], [16930, 19781, null], [19781, 21904, null], [21904, 25100, null], [25100, 27232, null], [27232, 29901, null], [29901, 32603, null], [32603, 34892, null], [34892, 37046, null], [37046, 37046, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 37046, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 37046, null]], "pdf_page_numbers": [[0, 1832, 1], [1832, 3837, 2], [3837, 5965, 3], [5965, 8883, 4], [8883, 13252, 5], [13252, 16930, 6], [16930, 19781, 7], [19781, 21904, 8], [21904, 25100, 9], [25100, 27232, 10], [27232, 29901, 11], [29901, 32603, 12], [32603, 34892, 13], [34892, 37046, 14], [37046, 37046, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 37046, 0.02162]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
7a752197ef3e37942b229481ca37a0e3f1cf3663
|
[REMOVED]
|
{"Source-Url": "https://www.mroland.at/publications/2023-lins-nordsec/Lins_2023_NordSec2023_MADT.pdf", "len_cl100k_base": 9260, "olmocr-version": "0.1.50", "pdf-total-pages": 19, "total-fallback-pages": 0, "total-input-tokens": 48189, "total-output-tokens": 12469, "length": "2e13", "weborganizer": {"__label__adult": 0.0007147789001464844, "__label__art_design": 0.0006389617919921875, "__label__crime_law": 0.00682830810546875, "__label__education_jobs": 0.0008625984191894531, "__label__entertainment": 0.00014150142669677734, "__label__fashion_beauty": 0.00030112266540527344, "__label__finance_business": 0.0013427734375, "__label__food_dining": 0.0003962516784667969, "__label__games": 0.001773834228515625, "__label__hardware": 0.005046844482421875, "__label__health": 0.0008726119995117188, "__label__history": 0.0005388259887695312, "__label__home_hobbies": 0.00016450881958007812, "__label__industrial": 0.0008449554443359375, "__label__literature": 0.0004305839538574219, "__label__politics": 0.0008654594421386719, "__label__religion": 0.000530242919921875, "__label__science_tech": 0.291015625, "__label__social_life": 0.00016772747039794922, "__label__software": 0.056549072265625, "__label__software_dev": 0.62841796875, "__label__sports_fitness": 0.00030922889709472656, "__label__transportation": 0.0008745193481445312, "__label__travel": 0.0002092123031616211}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 52787, 0.04176]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 52787, 0.23716]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 52787, 0.89823]], "google_gemma-3-12b-it_contains_pii": [[0, 2449, false], [2449, 5461, null], [5461, 8589, null], [8589, 11300, null], [11300, 13205, null], [13205, 15946, null], [15946, 18838, null], [18838, 20999, null], [20999, 22669, null], [22669, 25769, null], [25769, 28385, null], [28385, 31663, null], [31663, 34738, null], [34738, 37604, null], [37604, 41000, null], [41000, 43782, null], [43782, 46692, null], [46692, 49898, null], [49898, 52787, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2449, true], [2449, 5461, null], [5461, 8589, null], [8589, 11300, null], [11300, 13205, null], [13205, 15946, null], [15946, 18838, null], [18838, 20999, null], [20999, 22669, null], [22669, 25769, null], [25769, 28385, null], [28385, 31663, null], [31663, 34738, null], [34738, 37604, null], [37604, 41000, null], [41000, 43782, null], [43782, 46692, null], [46692, 49898, null], [49898, 52787, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 52787, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 52787, null]], "pdf_page_numbers": [[0, 2449, 1], [2449, 5461, 2], [5461, 8589, 3], [8589, 11300, 4], [11300, 13205, 5], [13205, 15946, 6], [15946, 18838, 7], [18838, 20999, 8], [20999, 22669, 9], [22669, 25769, 10], [25769, 28385, 11], [28385, 31663, 12], [31663, 34738, 13], [34738, 37604, 14], [37604, 41000, 15], [41000, 43782, 16], [43782, 46692, 17], [46692, 49898, 18], [49898, 52787, 19]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 52787, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
4b2bac7f4699c0fab04d5fc1ce24593d1d7d5656
|
[REMOVED]
|
{"Source-Url": "https://hal.sorbonne-universite.fr/hal-01360566/file/article-suzanne-al-sas16.pdf", "len_cl100k_base": 14437, "olmocr-version": "0.1.53", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 68057, "total-output-tokens": 16569, "length": "2e13", "weborganizer": {"__label__adult": 0.0003685951232910156, "__label__art_design": 0.0004489421844482422, "__label__crime_law": 0.0004122257232666016, "__label__education_jobs": 0.0009002685546875, "__label__entertainment": 8.52346420288086e-05, "__label__fashion_beauty": 0.00018012523651123047, "__label__finance_business": 0.00032019615173339844, "__label__food_dining": 0.00040268898010253906, "__label__games": 0.000927448272705078, "__label__hardware": 0.0022869110107421875, "__label__health": 0.0006136894226074219, "__label__history": 0.00039577484130859375, "__label__home_hobbies": 0.00015497207641601562, "__label__industrial": 0.000743865966796875, "__label__literature": 0.00030732154846191406, "__label__politics": 0.0004096031188964844, "__label__religion": 0.0006403923034667969, "__label__science_tech": 0.11773681640625, "__label__social_life": 9.006261825561523e-05, "__label__software": 0.007965087890625, "__label__software_dev": 0.86328125, "__label__sports_fitness": 0.0003676414489746094, "__label__transportation": 0.000903606414794922, "__label__travel": 0.0002372264862060547}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58007, 0.04112]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58007, 0.36446]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58007, 0.87389]], "google_gemma-3-12b-it_contains_pii": [[0, 1198, false], [1198, 3589, null], [3589, 5561, null], [5561, 8716, null], [8716, 11487, null], [11487, 14569, null], [14569, 17585, null], [17585, 20545, null], [20545, 23449, null], [23449, 25647, null], [25647, 28750, null], [28750, 31012, null], [31012, 34275, null], [34275, 37144, null], [37144, 39703, null], [39703, 42237, null], [42237, 45204, null], [45204, 48406, null], [48406, 50532, null], [50532, 53405, null], [53405, 56601, null], [56601, 58007, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1198, true], [1198, 3589, null], [3589, 5561, null], [5561, 8716, null], [8716, 11487, null], [11487, 14569, null], [14569, 17585, null], [17585, 20545, null], [20545, 23449, null], [23449, 25647, null], [25647, 28750, null], [28750, 31012, null], [31012, 34275, null], [34275, 37144, null], [37144, 39703, null], [39703, 42237, null], [42237, 45204, null], [45204, 48406, null], [48406, 50532, null], [50532, 53405, null], [53405, 56601, null], [56601, 58007, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58007, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58007, null]], "pdf_page_numbers": [[0, 1198, 1], [1198, 3589, 2], [3589, 5561, 3], [5561, 8716, 4], [8716, 11487, 5], [11487, 14569, 6], [14569, 17585, 7], [17585, 20545, 8], [20545, 23449, 9], [23449, 25647, 10], [25647, 28750, 11], [28750, 31012, 12], [31012, 34275, 13], [34275, 37144, 14], [37144, 39703, 15], [39703, 42237, 16], [42237, 45204, 17], [45204, 48406, 18], [48406, 50532, 19], [50532, 53405, 20], [53405, 56601, 21], [56601, 58007, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58007, 0.06701]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
370d618cfe4eba8c2fed800964be93e8796480b3
|
A Survey of Path-finding Algorithms
Employing Automatic Hierarchical Abstraction
Jonathan Vermette
vermett@uwindsor.ca
University of Windsor
The demand for fast, near-optimal search driven by the needs of modern computer games, which can feature hundreds of simultaneous requests, has spurred the development of a wide range of pathfinding techniques. One approach that is seeing increased use by industry programmers and researchers is the incorporation of automatically generated hierarchical abstractions into their pathfinding systems. The following is a survey of the research that has been conducted in this area, including papers that present new algorithms, novel hierarchical constructions, and analyses of the effectiveness of hierarchical abstraction to the pathfinding field.
Categories and Subject Descriptors: []
Additional Key Words and Phrases:
Contents
1 Introduction 2
1.1 Overview .................................................. 2
2 Survey of Research 3
2.1 Clique-Based Hierarchies ................................. 3
2.1.1 Overview ........................................... 3
2.1.2 Hierarchical A* (HA*) .............................. 4
2.1.3 Partial Refinement A* (PRA*) ..................... 5
2.1.4 Triangulation Reduction A* (TRA*) ............... 6
2.1.5 Partial Refinement Learning Real-time Search (PR LRTS) 7
2.1.6 Summary ........................................... 9
2.2 Sector-Based Hierarchies ................................. 10
2.2.1 Overview ........................................... 10
2.2.2 Hierarchical Path-finding A* (HPA*) ............. 10
2.2.3 HPA* Enhancements ................................ 12
2.2.4 Hierarchical Annotated A* (HAA*) ............... 13
2.2.5 Summary ........................................... 14
2.3 Contraction-Based Hierarchy ............................. 15
2.3.1 Overview ........................................... 15
2.3.2 Contraction Hierarchies (CHs) .................... 15
2.3.3 Analysis of Contraction Hierarchies .............. 16
2.3.4 Summary ........................................... 17
3 References 19
1. INTRODUCTION
1.1 Overview
Pathfinding is a well known and studied problem in the Artificial Intelligence community, being applicable to many fields. It can be found for example in robotics, logistics, or simulation systems that study real world issues like crowd dynamics. One domain where pathfinding is especially prevalent is video games. Here, the need for highly efficient techniques is apparent as modern games place high demands on the CPU and memory, also needing time for graphics, physics simulation. Pathfinding itself is typically only part of a larger overall game AI system, which may incorporate goal selection, group coordination, animation, etc. One researcher who collaborates with a major computer game developer cites their limit of only 1-3ms of a update frame towards pathfinding for all agents [Bulitko et al. 2007]. With problem complexity tied to search space size, and sizes ever growing, faster and accurate pathfinding techniques are always in demand by game industry professionals.
For this reason, video games are finding greater prominence amongst researchers as a testbed. The majority of the papers examined in this survey focused on introducing new algorithms intended for the video game AI community, and those that incorporate empirical experiments do so using video game derived test problems.
The particular technique that is the object of this survey is the use of the abstraction hierarchies in pathfinding algorithms. The concept of abstraction itself is natural, based on the way people reason about the world and how they move through it. [Botea et al. 2004] provide a motivating example in their paper of the general idea: Consider a road trip across North America. If one had a complete map of the continent down to the level of individual streets and address for every town, planning a trip street by street is the not the normal way of planning out the trip. Rather, one would consider only a means of leaving their city, followed by a high level plan moving through states and provinces towards their destination. Here, the various streets in Windsor, Ontario or any other city are abstracted into a city. The various cities are abstracted into states and provinces. The states and provinces are abstracted into the United States and Canada, and the two countries are abstracted into North America. Abstraction hierarchies are a way of representing this human style method of path-finding for a computer program.
A hierarchy then is nothing more than a series of successive abstractions. This is illustrated in Figure 1. A common notation is to reference the hierarchy as layers or
levels in ascending order, with the lowest, $L_0$ being the un-abstracted game space and subsequent layers numbered $L_1, L_2$ and so on, up to a $L_l$.
Researchers have taken different approaches in how a hierarchy is used in their search algorithms. [Holte et al. 1996] runs A* at $L_0$ and uses its hierarchy to query and refine heuristic distance estimates. [Bulitko et al. 2007] conducts search at a high level to generate a constraint for the lowest level. The remaining algorithms in this survey however follow the approach outlined in the North America trip example. A complete abstract path is defined, which is then ‘filled in’ with more refined sections of path at lower abstraction levels, until a complete path is defined at the lowest level which the agent can follow.
The results presented in the papers examined in this survey show that hierarchical abstraction has led to many-fold reduction in running times for pathfinding problems. Claimed results have been as high as one hundred times faster than simple A* search [Sturtevant 2007].
2. SURVEY OF RESEARCH
This survey is broken down into sections thematically. Each defines a different style of constructing a abstraction hierarchy. Section 2.1 examines clique-based hierarchies, which are built around the particular topography of the search space. Section 2.2 examines work conducted in the area of sector-based hierarchies, which forms a regular structure. Finally, Section 2.3 looks at the newest form of abstraction applied to game pathfinding, the Contraction Hierarchy, which differs significantly from the other two.
2.1 Clique-Based Hierarchies
2.1.1 Overview.

Fig. 2. An illustration of the clique-abstraction, (from [Bulitko et al. 2007], Fig. 9). Each level is a successive abstraction of the previous level.
In graph theory, a *clique* is understood to be a subset of an undirected graph where every pair of nodes in the sub-graph are joined by an edge. Though not adhering to the formal definition, ‘clique’ is a useful shorthand for describing this style of abstraction. With each, groups of nodes are abstracted based on their relationship to each other. Figure 2 illustrates this idea.
This technique originates with [Holte et al. 1996], who discussed grouping nodes with its neighbors, starting with the node of maximal degree, and working down. Later work focused on two-dimensional grid spaces representing a game ‘map’. [Sturtevant and Buro 2005] describes building an abstraction around 4-cliques, and smaller groupings where nodes have lower degrees. [Bulitko et al. 2007] used this same implementation for its hierarchy construction.
[Denny and Buro 2006] took this idea and applied it to triangulations of 2-d polygonal space mapped to a graph based on triangle adjacency. The abstract nodes represent polygonal corridors which can be searched through.
### 2.1.2 Hierarchical A* (HA*)
Holte et al. were concerned with the use of search space abstractions to create heuristic estimates, but in a manner that was more efficient than the techniques known at the time.
The authors note that others examined the use of abstraction techniques to generate admissible heuristics, mentioning [Gaschnig ; Guida and Somalvico 1978; Pearl 1984; Prieditis and Davis 1995] as examples of this approach. In general, these other techniques work by using the true cost of moving from start to goal in an abstracted search space as the heuristic estimate of the problem in the search space.
However, they point out that using such a heuristic results in increased computational cost to find a solution. They claim that these existing approaches cannot assure that the cost of generating a heuristic will exceed the cost of using no heuristic at all. They cite the analysis performed by [Valtorta 1984] as evidence of this claim. Further, they claim that the only existing program that broke “Valtorta’s Barrier” was the Absolver II in [Prieditis and Davis 1995], but only for certain types of problems.
Holte et al. devised an algorithm to search for a solution in an abstraction hierarchy, calling this algorithm Hierarchical A*. To build an abstraction hierarchy, Holte et al. used the STAR graph abstraction technique from [Holte et al. 1996]. In the STAR technique, the node on the search graph with the highest degree is abstracted with its neighbours into a single abstract state, repeating until all nodes have been mapped to this abstract graph. A hierarchy is produced by repeating the process on the created abstract graph, producing a higher level abstraction, and repeating this process until it is collapsed to a single node.
They describe the operation of the algorithm as running much the same as regular A* search. When a state is expanded, the heuristic costs of its successors is determined by discovering the cost from state to goal in the next level of the abstraction hierarchy, which when determined is passed down to the lower abstraction level as a heuristic estimate. Knowing that an abstracted state at level $l$ represents many states in $l-1$, Hierarchical A* caches these costs to speed up the algorithm by preventing rework.
To test their new algorithm, the authors set up several series of experiments to compare their Hierarchical A* against what they call A* with no heuristic scoring \((h(s) = 0)\). They call this “blind search”. Both approaches were tested in different search spaces, with examples including the 5-puzzle and Hanoi-7.
Holte et al. claim to have expanded fewer search nodes than indicated by Val-torta’s Barrier in the majority of problems they tested. In two particular search spaces, they claim Hierarchical A* was the more efficient algorithm for 95% of the problems they tried.
The authors state that they have shown that “A* search using heuristics created automatically by homomorphic abstractions . . . can outperform blind search”. They claim that this shows that hierarchical path-finding is worth further study.
### 2.1.3 Partial Refinement A* (PRA*).
Sturtevant and Buro examined the problem of real-time path-finding, which allows an interleaving of planning and execution to occur. Traditional path-finding algorithms like A* return a complete solution, leaving a search agent to wait until the algorithm is completely finished. Real-time path-finding allows the search agent to perform actions while search is being conducted by returning partial solutions to follow or otherwise incorporate actions into the solution. The authors are particularly interested in the domain of real-time strategy games, which require planning for many agents in a common space while subject to limited resources and the desire for a high quality result.
Sturtevant and Buro refer to [Korf 1990], which introduced the Learning Real-time A* (LRTA*) algorithm as one of the first attempts at developing a real-time path-finding algorithm. Hierarchical A*, which was introduced in [Holte et al. 1996] is not a real-time algorithm, but is mentioned as an algorithm that is similar in using an abstraction hierarchy to attempt to speed up the discovery of a solution in path-finding. Finally, [Botea et al. 2004] is cited as introducing another hierarchical approach, the HPA* algorithm.
The shortcoming noted by Sturtevant and Buro of most previous algorithms is that they are not strictly real-time algorithms. Classic search algorithms slow down as problems grow more difficult, breaking time constraints. They also cannot deal with changes such as minor detours. LRTA* is noted as satisfying real-time requirements, but can produce solutions of very poor quality.
To address these shortcomings, the authors introduce a path-finding algorithm that interleaves planning and execution by calculates partial solutions which result in a complete, valid path.
First they describe a means of building an abstraction of the search environment. Diverging from the sector-style of HPA*, Sturtevant and Buro instead abstract based on areas where search nodes form cliques of up to 4 nodes. Nodes with only a single edge are treated as part of the clique they share the edge with. This produces an abstract graph of the original, which is itself, repeating until a hierarchy of graphs is created with each level being the abstract graph of the level below, and the top level being a single node for the entire search space.
The Partial Refinement A* algorithm builds on a simple approach they call QuickPath. QuickPath works by walking up the abstraction hierarchy from the
start and goal locations to determine the lowest abstract node that encompasses both. The path at an abstraction level by looking at the edges one level below which joining the given abstraction nodes. The authors claim this process is guaranteed to return a valid result.
The authors go on describe several enhancements to distinguish PRA* from QuickPath, mostly by expanding on how precisely how traversing between two abstract nodes is refined in the level below in the hierarchy.
The authors conducted an experiment to measure PRA* against the contemporary HPA* algorithm. Building the abstraction hierarchy was ignored. They created a problem set of 148480 paths across 116 maps taken from the Baldur’s Gate and Warcraft 3 computer games [Corp et al. 1998; 2000; Entertainment 2002].
Sturtevant and Buro make two major claims with regards to the PRA* algorithm. First, they claim that it performs similar to HPA* in that it outperforms the classic A* algorithm in running time while finding solutions with near-optimal path lengths. Specifically they claim that in 95% tested cases that PRA* produced a solution with 5% of the optimal, similar to result reported in [Botea et al. 2004]. Secondly, they claim that PRA* performs well in a real-time context where only a small percentage of running time is available for path planning and execution is being performed. Citing the Real-time Strategy problem domain, they claim from their experimental results that even when planning is restricted to 1% of the CPU budget, “long paths . . . less than 5% away from optimal in 98% of the cases”.
The authors state PRA* is “…the first time that partial path-finding methods have been implemented and analysed”.
2.1.4 Triangulation Reduction A* (TRA*).
In this paper the authors address the problem of abstracting the traversable space of an environment into a set of triangles for efficient path-finding. Further, they are concerned about abstracting in a fashion that deals with complex obstacles, to reduce path suboptimality.
The authors refer to the work of [Holte et al. 1996], which is a search algorithm using layers of abstractions to produce heuristic values. They also note the similarity of their prosed algorithm to HPA* [Botea et al. 2004] and PRA* [Sturtevant and Buro 2005] in quickly defining an imprecise, high-level path and later refining the individual steps.
The only shortcoming the authors address is a problem common to hierarchical algorithms in general in that there is a trade-off between the time to find a path, and a reduction in quality. They do put forward a general claim that none of the previous solutions use a polygonal representation.
In this paper Demyen and Buro introduced two new path-finding algorithms designed to work specifically in environments composed of triangles. Triangulation A* (TA*) is a triangulation path-finding algorithm, and Triangulation Reduction A* (TRA*) which finds paths in a triangulation abstraction. The authors construct a Delauney triangulation to which forms their graph, stating that this guarantees an optimal path will not revisit any triangles.
Triangulation A* (TA*) is their proposed algorithm for path-finding across a series of connected triangles. They mention it is similar to the algorithm proposed by
They claim to differ from this technique in estimating the $g$-cost to traverse the triangle whereas Kallmann method of measure centroid distances. The results is a “funnel” of triangles used to produce a path sufficiently far enough away from triangles to prevent a collision by the agent following it.
This serves as the basis for the main algorithm they introduce, Triangulation Reduction $A^*$ (TRA*), which incorporates abstraction to further reduce the size of the search graph. In TRA*, each triangle mapped to a node in an abstract graph, joined by edges where they share edges in the triangulation. By examining the degrees of the nodes, a series of reductions are performed to collapse corridors of triangles to a single abstracted node. This is repeated until the abstraction forms a tree. TRA* walks this tree in a search query to produce a reduced triangulation for TA*to work on and produce a final low-level path.
The authors performed an experimental comparison of TA*, TRA*, and PRA*, electing to use the identical data set used int [Sturtevant and Buro 2005], testing for running time and path quality.
The authors claim to have outperformed PRA* with TA* alone, with 95% of paths being found at least twice as fast, attributing the ability of triangles to sparsely represent a map over grids. Applying abstraction to run TRA* led to an even larger reduction in running time. 95% of paths fell within a few percentage points of the optimal as well. Jansen and Buro did note that TRA* falters when dealing with a search space of many small obstacles, which caused large growth in the size of the abstraction.
Jansen and Buro state that their new algorithm outperforms current algorithms, specifically mentioning PRA*, especially on larger maps and shows the promise of triangulation-based path-finding over grid-based in general.
2.1.5 Partial Refinement Learning Real-time Search (PR LRTS). In this paper, the authors look at the problem of real-time heuristic search as defined by Koenig in [Koenig 2001]. These are path-finding algorithms that interleave planning and execution. They state that such algorithms must be capable of performing a constant amount of planning actions in a constant time interval, while a search agent performs actions. Specifically, they were concerned with improving the trade-off between planning time and the rate of convergence to an optimal or near-optimal solution. They note that these measures are antagonistic; "reducing the amount of planning done before each action improves the agent’s response time, but leads to slower learning due to lower-quality actions taken by the agent”.
Bulitko et al. note that others in the field have introduced state abstractions to reduce the running time in exchange for a (usually slight) reduction in path quality. As examples, they cite Hierarchical $A^*$ [Holte et al. 1996] and HPA* [Botea et al. 2004] as earlier algorithms which do this.
The authors state that to date that has not been any work showing that the tradeoff between path quality and running time is being done in an optimal fashion.
The primary contribution of this paper is an algorithm they call Path Refinement Learning Real-time Search (PR-LRTS).
For their abstraction hierarchy, the authors use a clique hierarchy. Fully connected components are abstracted into a single node, with single-edged nodes considered to be part of a clique. Abstracting all such groups forms an abstraction.
layer, which can be repeated \( l \) times. The authors state they are using the technique as done previously by [Sturtevant and Buro 2005].
To conduct search, the authors employ LRTA* [Korf 1990] at level \( l \). LRTA* performs a fixed-depth search and determine a single step. The authors say this ‘step’ represents a groups of nodes at the base level of search, which they use as a search corridor. Within this corridor, they perform another search to establish a path to follow. The authors claim that because there is a constant upper bound on the number of nodes that form a corridor, they can assert a real-time claim as there is therefore an upper bound on the amount of search being conducted between moves.
The authors note throughout the paper that they are assuming the freespace assumption, which means the agent initially assumes a completely open map. As it senses its local environment, the map is updated with obstacle information. In light of this, the authors state that they implemented Local Repair A* [Stout 1996] as opposed to classic A* for the lowest-level search.
The freespace assumption also means the abstraction hierarchy is dynamically updated as search progresses. The authors state that when an obstacles in sensed in the path, movement halts and re-planning begins. They note that such graph updates involve removing states and edges, not inserting them, so heuristics remain admissible.
The authors conducted a set of different experiments with different objectives:
—The authors analysed their algorithm against others by an examination of dominating factors. They define an algorithm as dominant when its average performance for some measure is better than some other algorithm. Their algorithm was compared against A* and LRTA*.
—They also examined the effects of using an abstraction hierarchy on individual performance measures. The authors chose three diverse settings for their algorithm and compared the results against each other to investigate the effect of the settings on performance.
—The authors analysed how their algorithm’s performance scales with increasing problem size, with problem size expressed as cost of the optimal path.
The test bed for all of these experiments were 3000 search problems on 6 maps taken from popular computer games [Corp et al. 1998; Entertainment 2002]. The problems were uniformly distributed in optimal cost from 50 to 100.
The authors claim that for any single measure of performance in isolation, A* or LRTA* appears to be the best choice (their algorithm being the exception when looking at convergence planning). But when comparing antagonistic statistics plotted on a graph, their algorithm is the dominant. From this they claim that their algorithm is the most efficient in balancing these antagonistic measures. They also claim to have found that as the number of abstraction levels in the hierarchy is increased, suboptimality is increased, while convergence measures are lowered. Finally, they claim their trend for increasing problem size is an increase in suboptimality.
In addition, the authors presented a proof that their algorithm is complete (that is, that it will always find a solution if one exists), and more importantly that planning is constant-bounded.
Bulitko et al. assert that PR LRTS is the first real-time algorithm at that time to use any kind of automatic abstraction mechanism. Prior algorithms, they claim, are not real-time because must plan a complete path (even if only at an abstract level), and thus are not constant bounded. Other real-time algorithms make no use of abstraction and are dominated by the use of an abstraction hierarchy.
2.1.6 Summary.
Table I provides a quick breakdown of the major contributions using clique-based hierarchical abstractions.
<table>
<thead>
<tr>
<th>Year</th>
<th>Title</th>
<th>Author(s)</th>
<th>Contribution</th>
</tr>
</thead>
<tbody>
<tr>
<td>2005</td>
<td>Partial Pathfinding Using Map Abstraction and Refinement</td>
<td>Nathan Sturtevant and Michael Buro</td>
<td>Introduced PRA* algorithm, clique-based abstraction basis for subsequent algorithms</td>
</tr>
<tr>
<td>2006</td>
<td>Efficient Triangulation-Based Pathfinding</td>
<td>Douglas Demyen and Michael Buro</td>
<td>Introduced TRA* algorithm. Abstractions represent ‘corridors’ of polygon space</td>
</tr>
<tr>
<td>2007</td>
<td>Graph Abstraction in Real-time Heuristic Search</td>
<td>Vadim Bulitko and Nathan Sturtevant and Jieshan Lu and Timothy Yau</td>
<td>Analysis of effectiveness of hierarchical algorithms. Introduces PR LRTS, claims to be first Hierarchical real-time pathfinding algorithm</td>
</tr>
</tbody>
</table>
2.2 Sector-Based Hierarchies
2.2.1 Overview.
In contrast to clique based hierarchies, another approach is to define a regular partitioning and group all nodes in a partition into a single state. This was introduced by [Botea et al. 2004]. Figure 3 illustrates this.
[Harabor and Botea 2008] applied sector-abstraction to their algorithm as well. This style lends itself to pre-computation of many smaller solutions. The sectors have well-defined entry and exit points.
![Fig. 3. An illustration of the sector-abstraction, (from [Botea et al. 2004])](image)
2.2.2 Hierarchical Path-finding A* (HPA*).
Botea et al. were concerned with the need for highly efficient path-finding algorithms used in computer games, which is a demanding domain.
Rabin [Rabin 2000a] described a two-level hierarchy, which the authors state only has a “high-level presentation of the approach”. By the same author, the visibility graph in [Rabin 2000b] is described, which they note as being especially suited for interior game maps due to the nature of the geometry favouring large convex obstacles, but is less suited for outdoor environments. Navigation Meshes are described as a way of reducing unblocked portions of a 2D environment into “a minimal set of convex polygons”.
The authors mention the method in [Tozour 2002] as a fast way to produce a navigation mesh. Also described are some hierarchical approaches applied by the robotics community. Mentioned are the use of quadtrees in [Samet 1988] to reduce a map to unblocked square cells. The authors claim that agents follow suboptimal paths by always moving to the middle of cells. Referred to as an improvement on this are framed quadtrees from [Chen et al. 1995]. The authors describe this approach as a way to improve solution quality while consuming much more memory.
Hierarchical A* from [Holte et al. 1996] is described by the authors as also using a hierarchical representation of a search space, but it concerned instead with using the hierarchy to generate heuristic functions.
The authors note that while visibility graphs can produce good quality paths, the technique produces complex graphs in game maps involving many small obstacles or concave shapes, reducing its usefulness. A forest scene is given as an example where graph complexity would quickly grow. Quadtrees, they note, do not produce optimal paths because search agents are directed to seek out the centre of a cell as way-points along a path. Framed quadtrees address this specific problem, but at the cost of using much more memory to produce the framing cells.
The authors describe their technique as a process that occurs in three steps when a search is performed, which they term an online search. The first step is to perform a search from the beginning location to the border of the location’s neighbourhood, followed by a search across the abstract graph of all neighbourhoods to the neighbourhood containing the goal location using the A* approach of [Stout 1996]. Finally, local search is again performed inside the goal neighbourhood.
To build these neighbourhoods, the authors describe breaking up a map into rectangular areas called clusters of equal size. Transition cells are locations in adjacent clusters that can be traversed. Inside a cluster, costs are stored for the optimal path between two transition points. If no such path exists without searching into another cluster, the cost is not defined. The transition cells and associated edges form an abstract graph of the game world. The authors say that this process can be repeated again on the abstract graph to produce another.
Botea et al. describe optional enhancements to this approach. First, refinement can be performed by running a set of small searches between each set of transition cells inside a cluster to find an optimal path. They suggest that these paths can be cached if appropriate. The author’s second suggestion is the smoothing of paths by replacing sections with straight lines where possible.
To test their approach, the authors claim to have run 100 searches on a set of 120 different maps taken from the Baldur’s Gate series of computer games [Corp et al. 1998; 2000] with maps varying between a size of 50 × 50 to 320 × 320 cells. Two and three-level hierarchies were created with a variety of cluster sizes.
In their analysis, the authors report that a A* returned a solution sooner than their HPA* implementation for problems with a small optimal solution. The authors attribute this to “the overhead of HPA*…larger than the potential savings that the algorithm could achieve”. They also report that A* outperforms on “straight line” problems, as “using the Euclidean distance as heuristic provides perfect information, and A* expands no nodes other than those that belong to the solution”. However the authors report that HPA* outperforms low-level A* for problems larger than the overhead associated with their abstraction approach. The authors also discuss the additional memory involved to support the abstraction hierarchy.
The authors state that they have presented a hierarchical path-finding algorithm that is domain independent and works for different kinds of map topologies. They further claim that it is suitable for large scale problems and is capable of handling dynamic changes to a search environment, and claim it does so while beating the
classic A* algorithm in running time and producing solutions that are near-optimal.
2.2.3 HPA* Enhancements.
Jansen and Buro took the Hierarchical Pathfinding A* (HPA*) algorithm introduced in [Botea et al. 2004] and addressed shortcomings of this particular algorithm, proposing remedies to produce an updated version.
The authors note that others have examined the use of abstraction for speeding up path-finding. Recent work includes PRA* [Sturtevant and Buro 2005] and TRA* [Demyen and Buro 2006], both of which were designed to deal with computer game maps. The authors however have focused on the earlier HPA* algorithm from [Botea et al. 2004].
Jansen and Buro identify two shortcomings of HPA*. They note that HPA* usually does not produce an optimal path at the ground level, so the original implementation uses a smoothing technique is applied to straighten the path where possible to shorten it. They describe the original implementation as a series of ray casts along each node, and adjusting the path where suitable path intersections are found, which works but is computationally expensive method. They also claim that the paths found between each pair of entrance nodes for a given sector could have an improved worst-case running time.
Though other hierarchical algorithms were cited, this paper focuses on the shortcomings of the HPA* algorithm. Any shortcomings of similar algorithms are not discussed.
The authors have proposed a set of three improvements to the HP* algorithm. First, they propose a new means of path smoothing to address the shortcoming described earlier. They propose to restrict the amount of ray-casting performed by limiting the ray-cast to a bounding box of some size centred on the node the rays are cast from. The authors claim this results in a minor reduction of path quality but with a significant reduction in running time.
Secondly, they mention that HPA* uses A* to determine costs $E$ entrance nodes in a given sector of size $L \times L$. Jansen and Buro propose instead using the Dijkstra single-source shortest path algorithm as a better alternative.
Lastly, they propose that in dynamic domain that updating the abstraction hierarchy should use a lazy computation scheme, as a given sector may change several times before it is needed in a search query. In this way the amortised cost of recomputing the hierarchy should be lower.
For a sector of size $L \times L$, the authors note the worst-case running time for $A^*$ to build a path between two sector entrances is $O(L^2)$, compounded by searching for all pairs. Because a single pass of Dijkstra will find all shortest-paths for a given entrance in a single pass they reason that it has a lower worst-case complexity than $A^*$.
In support of these proposed enhancements the authors claim to have performed an experiment comparing HPA* with and without these enhancements to show their effectiveness. 116 maps from popular computer games [Corp et al. 1998; 2000; Entertainment 2002] and 80 artificial generated maps.
The authors claim to have found the following results from their experiments:
- The use of small bounding boxes to restrict the time spent ray casting led to a
large decrease in running time with a minor reduction in smoothing quality.
- They claim that the use of Dijkstra’s algorithm over A* can be up to twice as fast.
- Lazy computation of edge weights led to a much faster initial build of the abstraction hierarchy.
The authors state that they have shown that their enhancements to the HPA* algorithm are promising. They also suggest that their use of lazy computation may be applicable to other hierarchical algorithms to deal with “many moving objects, changing topologies, and incremental terrain discovery”.
2.2.4 Hierarchical Annotated A* (HAA*).
Harabor and Botea begin this paper with the claim that many current path-finding algorithms plan for agents of varying sizes and movement capabilities, but do not account for this. They assert that current algorithms work assuming all agents are of homogeneous size or capability, whereas in modern computer games agents can be heterogeneous. They claim their new algorithm focuses on this problem using an annotation hierarchy.
The authors refer to the work of [Botea et al. 2004], [Sturtevant and Buro 2005] and [Bulitko et al. 2007] as hierarchical path-finding methods relevant to their work. Harabor and Botea also cite the Brushfire algorithm from [Latombe 1991] as using a similar technique to denote obstacle proximity. The Corridor Map Method [Geraerts and Overmars 2007] and the Triangulation A* and Triangulation Reduction A* methods [Demyen and Buro 2006] are identified by the authors as other path-finding techniques that are capable of planning for multiple-sized agents since they incorporate obstacle clearance planning.
The authors assert that the majority of existing path-finding algorithms assume agents are of a uniform size or assume that they are all equally capable of traversing the same types of terrain. They state that this is a limitation of the kinds of problems they are capable of solving and could fail in a domain with agents of different sizes and/or capabilities.
Harabor and Botea also introduce a clearance value metric, notating for each tile the upper bound on size for an agent to legally occupy the tile. These values are calculated for every tile/capability pair.
These annotations are incorporated into A* search as additional parameters, producing a variant called Annotated A* (AA*). AA* evaluates search nodes as blocked if their annotation does not include the capability or size of the agent. They note that these annotated gridmaps allow a reduction to a simplified canonical problem, being a grid of blocked and unblocked cells and an atomic agent i.e. a homogeneous representation.
To improve efficiency the authors extended AA* with a hierarchical grid representation. They build upon the technique from [Botea et al. 2004] which constructs square cluster with entrances defined between two maximally spaced points along the border. They extend this into a set of entrances to accommodate different composite capabilities. Intra-sector edges for the abstract layer are constructed by running AA* over all capability/sector pairs.
To test the effectiveness of AA* and HAA*, the authors used the same set of 120 game maps used in [Botea et al. 2004], along with modified versions of each for a total of 720 different maps. Two different agent sizes were used, taking up $1 \times 1$ and $2 \times 2$ tiles. The derivative maps incorporate tiles traverse-able by only some agents, which they refer to as soft obstacles. 100 problems were created for each map for a total of 144,000 problems. Concerns were the abstract-graph size, path quality, and search effort.
Harabor and Botea report that in all instances of their experiments the map abstraction uses less than half the memory of the original search graph, with one instance having only 2.0% the number of nodes and 0.9% the number edges of the original. Larger cluster sizes in the abstraction generally produced smaller abstract graphs.
Path quality was measured by comparing the lengths of the AA* and HAA* paths produced for a given problem. They claim that the more complex the map from soft obstacles, the lower the error introduced by abstraction. They also claim a trend of error increasing with agent size. Harabor and Botea suggest that larger agents are forced to a single location to transition between two given abstraction clusters, producing suboptimal paths.
For search effort (the number of node expansions to find a solution), the authors claim to have found that there is a trade-off point favouring HAA* once the problems exceed a certain length, depending on other factors like the size of the clusters.
Harabor and Botea state that their new algorithm shows that incorporating consideration for heterogeneity can be done without sacrificing path quality. They claim to have shown that their analysis shows HAA* does this while still outperforming A*.
2.2.5 Summary.
Table II provides a quick breakdown of the major contributions using clique-based hierarchical abstractions.
<table>
<thead>
<tr>
<th>Year</th>
<th>Title</th>
<th>Author(s)</th>
<th>Contribution</th>
</tr>
</thead>
<tbody>
<tr>
<td>2007</td>
<td>HPA* Enhancements</td>
<td>M. Renee Jansen and Michael Buro</td>
<td>Described several changes to HPA* to reduce running time for a slight quality trade-off</td>
</tr>
<tr>
<td>2008</td>
<td>Hierarchical Path Planning for Multi-Size Agents in Heterogeneous Environments</td>
<td>Daniel Harabor and Adi Botea</td>
<td>Introduced HAA*. Claims to be first attempt to explicitly account for multiple-factor search agents in a hierarchical path-finding algorithm.</td>
</tr>
</tbody>
</table>
2.3 Contraction-Based Hierarchy
2.3.1 Overview. A new approach is radically different from the previous. [Geisberger et al. 2008] describe a hierarchy that is more properly described as a long chain of transformations. Every level of the hierarchy is the abstraction of a single node by replacement with edges. Figure 4 illustrates this.
This is a relatively new approach to pathfinding in computer games, with only a single paper found introducing the techniques to the game AI community at the time this survey was compiled.
2.3.2 Contraction Hierarchies (CHs).
In this paper, Geisberger et al. looked at the problem of planning an optimal route on a road network.
This paper does not refer to the other papers examined in this survey, but does refer to work in the area of road networks. The authors refer to their work as an extreme case of highway-node routing [Schultes and Sanders 2007; Schultes 2008]. Geisberger et al. state their original motivation was to eliminate the need for complicated Highway Hierarchies (HHs) [Sanders and Schultes 2005; 2006].
Geisberger et al. did not reference any particular shortcomings of any of the other papers examined in this survey. They make the claim that HHs rely on a subroutine called Edge Reduction that even with improvements [Goldberg et al. 2007; Bauer and Delling 2008] was an expensive process, which they claim is unnecessary given the results of their new approach.
Geisberger et al. contribute a new path-finding technique with a data structure they call a Contraction Hierarchy, which can then be queried to find a route between two different locations. The authors provide a description of constructing such a hierarchy and the query method.
To construct a contraction hierarchy, nodes of the target graph are ordered according to a given importance function to define a queue. They state the given important of a node to the contraction can change as the graph is processed, so a lazy update scheme is used. Contracting a node involves replacing it with ‘shortcut’
edges that describe the cost of traversing the node to its respective neighbors. Each such abstraction is a level in the contraction hierarchy.
Querying the contraction hierarchy for a path involves a bi-directional Dijkstra search that terminates when the ends meet. Shortcuts are unpacked in a recursive fashion.
The authors present a lemma that proves the bi-directional search produces a shortest path between two locations.
An empirical experiment was conducted to evaluate CHs against what they describe as the fastest current variant of Highway Node Routing (HNR) [Schultes and Sanders 2007]. For their road network, the authors tested against a model of Western Europe comprised of over 18 million nodes and 42 million edges. Edge weights were derived from estimated travel times based on real road conditions. The authors compared statistics like average query time, memory consumption, and pre-processing time. A variety of different heuristics to determine node ordering were also considered.
The authors claim to have found that CHs have greatly improved query times over HNRs across all the heuristic they tried for constructing the hierarchy. For example, they claim that using a particular ordering parameters results in query times four times faster than HNR. The results they present show that initial construction of the hierarchy can take longer (dominated by the time to order the graph), but this is a preprocessing step and only performed once.
The authors state that their new algorithm has been shown to outperform the current leader in road network routing. They further claim that the simplicity of their approach suggests that further improvements are likely.
2.3.3 Analysis of Contraction Hierarchies.
Sturtevant and Geisberger presented a post-mortem on the abstraction technique employed during the development of a recent computer game called Dragon Age: Origins (DA:O). They note that no analysis was performed at the time to measure the effectiveness of the two-level abstraction technique applied in the game.
The authors note that popular planning techniques in games are often based on traversable terrain abstractions like navmeshes [Tozour 2002] or waypoint graphs [Lidén and Valve-Software 2002], which may be created by hand, abstraction which are created automatically [Botea et al. 2004].
While the authors do not address the shortcomings of a particular algorithm, they note two general shortcomings of abstract search spaces. They note that abstractions still grow with the size of a problem, so given a large enough search space the abstracted space may be so large that path-finding still takes an unacceptable amount of time. This can be mitigated by using a coarser (therefore smaller) abstraction, they claim, but this second issue is that larger abstractions lower the quality of any paths produced as low-level features are lost.
As part of a product launch post-mortem, the authors contribute a comparative analysis of the two level abstraction hierarchy that was implemented in the DA:O game. They compare this implementation against alternative approaches like developing a better search heuristic, and employed a new abstraction called a contraction hierarchy (CH).
Sturtevant and Geisberger describe the abstraction in DA:O as a $16 \times 16$ sector abstraction of a grid-based map. These sectors are then subdivided into contiguous regions with each represented by a single node. This is the same technique described in [Sturtevant and Jansen 2007]. To adequately handle larger maps or interior maps, another layer of abstraction was applied to produce a two-layer abstraction hierarchy.
The authors describe the use of improved heuristics as an alternative means of improving search results. They examined the use of a differential heuristic called ALT [Goldberg and Harrelson 2005], which they briefly describe as deriving distances using the triangle inequality and reference points called landmarks.
The authors then provide a description of a contraction hierarchy as a contrast to their approach, and propose several means of reducing CH storage overhead. They did this by leveraging a data structure designed for mobile systems [Sanders et al. 2008], which they claim compressed the storage for a CH by 70%, but doubling the query time.
Sturtevant and Geisberger conducted an experiment to compare the performance of the three techniques. They describe their testbed as 10,000 randomly selected problems on 120 maps from the Baldur’s Gate game [Corp et al. 1998]. The measured statistics were memory usage, planning cost and path refinement cost.
From a baseline average of 28 kilobytes of storage for the maps, the authors claim to have found that using a two-level sector abstraction averaged from 30 to 35k. The CH averaged from 25k to 111k based on the heuristic used. Using a differential heuristic required on average 55k of storage.
For planning cost the authors examined the number of nodes expanded, and the query time in $\mu$s. They claim that all 3 techniques outperformed a basic A* search as expected. Sector-abstraction and CH reported similar results and outperformed the heuristics. For example, they claim differential heuristics took $209\mu$s on average for the longest 10%, compared to $64.3\mu$s and $68.6\mu$s for the best implementations of Sector-abstraction and CH respectively.
The authors come to the conclusion that with regards to the abstractions developed for DA:O, a larger sector size for the higher level of abstraction would have resulted in improved performance were it implemented.
The authors say that the relative strengths and weaknesses of either approach means it is difficult to state that one is superior to the other. Abstraction hierarchies and easy to understand and implement and ‘recursively similar’ and able to handle dynamic changes, but admit suboptimal paths. Contraction hierarchies offer high performance for difficult problems, noting the fact that long paths are actually faster to compute in a CH than shorter ones. They recommend to the reader that anyone building a high-quality motion planner familiarise themselves with both techniques.
2.3.4 Summary. Table III provides a quick breakdown of the major contributions using clique-based hierarchical abstractions.
Table III. Summary of Research on Contraction-Based Hierarchical Algorithms
<table>
<thead>
<tr>
<th>Year</th>
<th>Title</th>
<th>Author(s)</th>
<th>Contribution</th>
</tr>
</thead>
<tbody>
<tr>
<td>2008</td>
<td>Contraction Hierarchies: Faster and Simpler Hierarchical Routing in Road Networks</td>
<td>Robert Geisberger and Peter Sanders and Dominik Schultes and Daniel Delling</td>
<td>Introduced Contraction Hierarchy (CH) paradigm based on research in road networks.</td>
</tr>
<tr>
<td>2010</td>
<td>A Comparison and High-Level Approaches for Speeding Up Pathfinding</td>
<td>Nathan Sturtevant and Robert Geisberger</td>
<td>Conducted comparison between sector and contraction based hierarchies, and offered improvements for CH for computer games.</td>
</tr>
</tbody>
</table>
3. REFERENCES
REFERENCES
Stout, B. October/November 1996. Smart moves: Intelligent pathfinding. *Game Developer Magazine*.
|
{"Source-Url": "http://richard.myweb.cs.uwindsor.ca/cs510/vermette_survey.pdf", "len_cl100k_base": 10309, "olmocr-version": "0.1.53", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 46095, "total-output-tokens": 13558, "length": "2e13", "weborganizer": {"__label__adult": 0.0013065338134765625, "__label__art_design": 0.001407623291015625, "__label__crime_law": 0.0018062591552734375, "__label__education_jobs": 0.0035915374755859375, "__label__entertainment": 0.0006618499755859375, "__label__fashion_beauty": 0.0008716583251953125, "__label__finance_business": 0.0007109642028808594, "__label__food_dining": 0.0011777877807617188, "__label__games": 0.07470703125, "__label__hardware": 0.00396728515625, "__label__health": 0.0014476776123046875, "__label__history": 0.00217437744140625, "__label__home_hobbies": 0.0003883838653564453, "__label__industrial": 0.001598358154296875, "__label__literature": 0.0017385482788085938, "__label__politics": 0.001041412353515625, "__label__religion": 0.001251220703125, "__label__science_tech": 0.304443359375, "__label__social_life": 0.0002341270446777344, "__label__software": 0.01552581787109375, "__label__software_dev": 0.5732421875, "__label__sports_fitness": 0.0017690658569335938, "__label__transportation": 0.00443267822265625, "__label__travel": 0.0007529258728027344}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 54905, 0.03683]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 54905, 0.52995]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 54905, 0.91597]], "google_gemma-3-12b-it_contains_pii": [[0, 2247, false], [2247, 4885, null], [4885, 6725, null], [6725, 10074, null], [10074, 13436, null], [13436, 16728, null], [16728, 20191, null], [20191, 23460, null], [23460, 25210, null], [25210, 27027, null], [27027, 30594, null], [30594, 33794, null], [33794, 36934, null], [36934, 39593, null], [39593, 41629, null], [41629, 44860, null], [44860, 47938, null], [47938, 49023, null], [49023, 52417, null], [52417, 54905, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2247, true], [2247, 4885, null], [4885, 6725, null], [6725, 10074, null], [10074, 13436, null], [13436, 16728, null], [16728, 20191, null], [20191, 23460, null], [23460, 25210, null], [25210, 27027, null], [27027, 30594, null], [30594, 33794, null], [33794, 36934, null], [36934, 39593, null], [39593, 41629, null], [41629, 44860, null], [44860, 47938, null], [47938, 49023, null], [49023, 52417, null], [52417, 54905, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 54905, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 54905, null]], "pdf_page_numbers": [[0, 2247, 1], [2247, 4885, 2], [4885, 6725, 3], [6725, 10074, 4], [10074, 13436, 5], [13436, 16728, 6], [16728, 20191, 7], [20191, 23460, 8], [23460, 25210, 9], [25210, 27027, 10], [27027, 30594, 11], [30594, 33794, 12], [33794, 36934, 13], [36934, 39593, 14], [39593, 41629, 15], [41629, 44860, 16], [44860, 47938, 17], [47938, 49023, 18], [49023, 52417, 19], [52417, 54905, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 54905, 0.0625]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
c672ceab77e9ec63624955a83c470bf9e5aca9ff
|
Abstracting Timed Preemption With Engines
by
Christopher T. Haynes
and
Daniel P. Friedman
Department of Computer Science
Indiana University
Bloomington, IN 47405
TECHNICAL REPORT NO. 178
Abstracting Timed Preemption With Engines
By
Christopher T. Haynes and Daniel P. Friedman
Revised: November, 1986
Research reported herein was supported in part by the National Science Foundation under grants MCS 83-03325, MCS 83-04567, and MCS 85-01277.
ABSTRACTING TIMED PREEMPTION WITH ENGINES†
CHRISTOPHER T. HAYNES and DANIEL P. FRIEDMAN
Computer Science Department, Indiana University, Lindley Hall 101, Bloomington, IN 47405, U.S.A.
(Received 18 July 1986; revision received 19 November 1986)
Abstract—The need for a programming language abstraction for timed preemption is argued, and several possibilities for such an abstraction are presented. One, called engines, is adopted. Engines are an abstraction of bounded computation, not a process abstraction in the usual sense. However, in conjunction with first class continuations, engines allow a language to be extended with time-sharing implementations for a variety of process abstraction facilities. We present a direct implementation of hiaton streams. Engine nesting refers to the initiation of an engine computation by an already running engine. We consider the need for engine nesting and show how it may be accomplished in a manner that charges a parent engine for the computation of its offspring. We conclude by discussing the importance of simple and general abstractions such as engines.
Engines First class objects Preemption Continuations Hiatons
1. INTRODUCTION
In this paper we introduce an engine facility that abstracts the notion of timed preemption. In conjunction with the ability to maintain multiple control environments, engines allow a language to be extended with a variety of process abstraction facilities. Engines are represented as procedural objects that embody some computation. In this respect they resemble thunks (procedures of no arguments), which are sometimes called futures, for engines embody a computation that may be performed at some future time. However, futures are invoked with no limit on the time that may be required to complete their computation, whereas engines are run with a specified computation time limit after which control is returned to the invoker. In this sense engines may be thought of as bounded futures.
By designing a base language with a few general abstraction mechanisms such as engines and continuations (which abstract the control environment), a powerful basis for building programming environments is provided. In this paper we are not introducing yet another process abstraction; rather, we are proposing an abstraction that allows implementation of arbitrary process abstractions through multiprogramming. This assures language extensibility and suitability to a wide range of applications. Concurrency issues, such as process synchronization primitives, are not central to our concerns in this paper. Though asynchronous preemption of a process implemented by multiprogramming with engines may introduce synchronization pathologies similar to those encountered in multiprocessing, they may be remedied with traditional concurrent programming techniques.
Coroutines are a mechanism for maintaining multiple control contexts without multiprocessing, but they are fundamentally different from engines. Control is passed from one coroutine to another synchronously, i.e. under explicit control of the program, by means of operations such as resume and detach. An engine computation, on the other hand, may relinquish control either synchronously, by deliberately returning, or asynchronously by a timed preemption that involves no explicit action by the computation.
Most computing environments provide a mechanism for timed preemption, such as a real-time clock capable of generating interrupts. In conjunction with a means of saving control state, such as continuations, these mechanisms may be used to implement engines. However, such mechanisms are frequently unavailable to the applications programmer, and when available they are usually imported from the operating system. As such, they tend to be inconvenient to use and always
†This material is based on work supported by the National Science Foundation under grant numbers MCS 83-04567, MCS 83-03325 and MCS 85-01277.
implementation dependent. By abstracting timed preemption and making it a standard language feature, it is possible to give concise and portable expression to a wider range of problems.
Since the introduction of engines several years ago [1], this facility has been adopted by several Scheme systems [2-4]. Engines are particularly useful in a language, such as Scheme, that provides first class procedures and control objects.
In the next section we provide an overview of Scheme. We then define the engine mechanism and include some simple examples of its use. Next we illustrate a more elaborate use of engines by implementing a simple time-sharing process scheduler. Engines are then used to implement hiatons and amb operations. This raises the issue of nested engines, for which an implementation is given. We conclude with some remarks on formalization of engines and a discussion of the importance of simple and general abstractions, such as engines.
2. AN OVERVIEW OF SCHEME
Scheme is a dialect of Lisp that is applicative order, lexically scoped, and properly tail-recursive [5, 6]. Most importantly, Scheme treats procedures and continuations as first class objects.
See Fig. 1 for the syntax of a Scheme dialect sufficient for the purposes of this paper. The superscript * denotes zero or more, and + denotes one or more occurrences of the preceding form. Square brackets are interchangeable with parentheses, and are used in the indicated contexts for readability. Constants, such as numbers, are self-evaluating. quote expressions return the indicated literal object, and ' <object> is equivalent to (quote <object>). begin expressions evaluate their subexpressions in order and return the value of the last. Expression lists in lambda and let are implicit begins. lambda expressions evaluate to first-class procedural objects that lexically bind their arguments when invoked. let makes lexical bindings, destructuring the values if necessary. rec evaluates its expression in an environment that binds its identifier to the value of the expression itself. (rec is similar to the label for of Lisp.) if evaluates its second expression if the first is true, and the third otherwise. case evaluates the tag expression, and then returns the value of the first expression with a corresponding symbol that matches the tag. define assigns to a global identifier. set! modifies an existing lexical identifier. An application evaluates its expressions (in an unspecified order) and applies the procedural value of the first expression to the values of the remaining expressions.
We require a few primitive procedures: =, +, and true? are the usual arithmetic operations. cons is the traditional Lisp binary construction operation, with associated selectors car and cdr, and mutators set-car! (rplaca) and set-cdr! (rplacd). Lists are constructed of cons cells. list constructs lists, append! concatenates them, copy recursively copies list structures, and last-pair returns the last cons cell of a list. map is the usual list mapping procedure. eq?, pair? and null? are, respectively, the equality, cons cell and empty list predicates.
As any expression is evaluated, the current context of evaluation is continually maintained by the evaluation mechanism. The context of evaluation of each subexpression controls how evaluation will continue when the subexpression’s value has been obtained. Hence control contexts
```
(expression) ::=
(identifier)
(constant)
(quote (object))
(begin (expression)*)
(lambda ((identifier) (expression)*)
(let ((id-pattern) (value)*) (expression)*)
(rec (identifier) (expression))
(if (expression) (expression) (expression))
(case (tag) [(symbol) (expression)]*)
(define (identifier) (expression))
(set! (identifier) (expression))
(application)
(value), (tag), (procedure):= (expression)
(application):= ([procedure] (expression)*)
(id-pattern):= (identifier) | ([id-pattern] . [id-pattern]) | ([id-pattern]*)
```
Fig. 1. Syntax of a Scheme subset.
are called continuations. Continuations may be represented as procedural objects of one argument. When invoked with a value, the continuation proceeds with the computation as if the given value was the value of its subexpression.
Continuations are generally inaccessible to the programmer. However, in Scheme it is possible to obtain the continuation of any expression using the procedure call-with-current-continuation, abbreviated call/cc. call/cc is passed a procedure that it then calls with the current continuation (the continuation of the call/cc application). This continuation represents the remainder of the computation from the call/cc application point. At any future time this continuation may be invoked with any value, with the effect that this value is taken as the value of the call/cc application [10, 11]. (The continuation of a continuation application is discarded unless it has been saved with another call/cc.)
For example, assume the expression (+ 1 (* 2 3)) was being evaluated with continuation K (the value 7 is ultimately to be passed to K). Then the evaluation of the subexpression (* 2 3) has a continuation, k, that may be represented as ((lambda (x) (K (+ 1 x))) k), so that (K 6) = (K 7). Using call/cc we can obtain k as an object of computation and invoke it with some other value, say 2. For example, the expression
\[
(+ 1 \\
(call/cc \\
(lambda (k) \\
(* (k 2) 3))))
\]
evaluates to 3, and the multiplication is never performed.
For this simple example, k need not be a first class object. Since it was not used outside of the dynamic context that it represents, in this case the continuation invocation could have been performed by simply popping the control stack until the addition application frame was reached. Control mechanisms, such as the catch tags provided in many Lisp systems, could have been used instead. However, in this paper we make extensive use of continuations to record the control context prior to context switches. Control is then returned to a context by invoking its continuation. Such invocation is performed from within another context, and hence we make full use of the first-class nature of continuations. Catch tags and related escape procedure mechanisms, which are not first-class, could not be used for this purpose.
Though control information may still be stack allocated much of the time, the use of call/cc requires that control information be heap allocated. This reflects the need for multiple control contexts in any multiprogramming environment.
3. DEFINITION OF ENGINES
Metaphorically, an engine is run by giving it a quantity of fuel. If the engine completes its computation before running out of fuel, it returns the result of its computation and the quantity of remaining fuel. If it runs out of fuel, a new engine is returned that, when run, continues the computation.
More formally, engine fuel is measured in ticks, an unspecified unit of computation. Though in some implementations a tick may represent (as the name suggests) a fixed amount of time, this is not required by the engine abstraction. Ticks measure computation, not time.‡ An engine is a procedural object of three arguments. The procedure make-engine takes a thunk (a procedure of no arguments) and returns a new engine that, when applied to a positive integer n, a two-argument success procedure and a one-argument fail procedure, proceeds to invoke the thunk for n ticks. If the thunk invokes the procedure engine-return with some value after m ticks, for some m ≤ n, then the success procedure is applied to this value and the number \( t = n - m \) of ticks remaining.
‡Using this primitive we can define catch, a version of Landin's J operator [7-9]: (catch id exp) = (call/cc (lambda (id) exp)).
‡Thus an engine tick is similar to a computation, which has been defined as "a mythical subatomic particle that bears the unit quantity of computation" [12].
If the thunk does not invoke engine-return in \( n \) ticks, then the fail procedure is applied to a new engine that, when invoked, continues with the thunk's computation. Thus the types of engines are
\[
\begin{array}{ll}
D & \text{denotable values} \\
ET = \text{positive integers} & \text{engine ticks} \\
Succ = D \times ET \rightarrow D & \text{success procedures} \\
Fail = Eng \rightarrow D & \text{failure procedures} \\
Eng = ET \times Succ \times Fail \rightarrow D & \text{engines}
\end{array}
\]
Though we speak of an engine computation returning a value, it is an error for the thunk invoked by an engine to return a value directly (to the continuation of its invocation)—it must invoke engine-return for this purpose. When a success or fail procedure is invoked upon the termination of an engine, any value returned by the success or fail procedure is returned as the value of the engine invocation.
The type structure chosen for engines is rather arbitrary. For example, another alternative would be to avoid the use of success and failure procedures by returning a value that could be either an engine, indicating failure, or a \((\text{value, ticks-remaining})\) pair, indicating success. We prefer the form presented here, because it avoids such disjoint union types and the associated need to program a test for success or failure following almost every engine invocation. (This forms is analogous to handling disjoint unions with a union-case procedure [13]).
Engines, like all objects in Scheme, are first class: they may be passed to procedural objects, be returned by procedures, be stored in data structures, and have indefinite extent. Hence engines, and the environment and control information that they contain references to, may be reclaimed by a garbage collector when they are no longer accessible.
A tick might correspond to the execution of one Virtual Scheme Machine instruction, as in Scheme 84 [3] or to a number of milli-seconds of processor time, as in PC Scheme [4]. In general, the amount of computation associated with a tick is not defined, but must satisfy the following "real-time" constraints: (1) a larger tick count is associated with a larger expected amount of computation (in the statistical sense) and (2) unbounded real time is associated with an unbounded number of ticks (any looping computation must consume ticks). Thus, engine ticks might be metered by a real-time clock, with an unpredictable amount of time spent handling interrupts. A compiled language without a real-time clock could still implement engines by decrementing and testing a tick count at least once with each recursive call or iteration. For Scheme, which has no goto or other primitive iteration mechanism, it suffices to associate a tick with each user defined procedure invocation, as in some versions of Chez Scheme [2].
We say an engine is running if neither its success nor fail procedure has been invoked since the last invocation of the engine. The invocation of an engine by an already running engine, which we refer to as nesting of engines, is disallowed by the basic engine mechanism. That is, at most one engine may be running at a given time. This simplifies the implementation of engines, and results in no loss of generality. We shall see in Section 7 that there are at least two distinct approaches of engine nesting, both of which may be implemented using unnested engines. We prefer not to commit the standard engine mechanisms to either of these nesting alternatives. Furthermore, our experience has been that when it is believed that nested engines are needed, closer examination of the problem usually reveals that nesting is not only unnecessary, but leads to unnecessarily complicated programs. Applications with genuine need for engine nesting will be discussed in Sections 6 and 7.
In the absence of traditional operations that change state, such as assignment and I/O, engines do not have state! Invoking the same engine twice runs the engine's computation from the same point. This differs from the usual interpretation of processes. To record the progress of an engine, a new engine is created. It would be possible to define an otherwise engine-like mechanism in which
\[†\]
Reference [2] also includes an implementation of engines using a timer, with attention to the complexities encountered when engines coexist with other real-time facilities.
the original engine was modified upon exhaustion of the tick count, rather than returning a new
engine. However, we believe that the side-effect-free approach we have taken is superior; it makes
engines more suitable than traditional processes for applicative styles of programming in which side
effects are disallowed or restricted.
Though engines are "procedural", they may be non-deterministic: the result of invoking the same
engine twice may be different due to random differences in the rate at which ticks are consumed
by its computation.
4. SIMPLE EXAMPLES
(1) The engine facility could be simplified a little further by defining steppers as procedures of
two arguments, which perform just one tick of computation when invoked. It would then be
superfluous to pass the number of ticks remaining when a stepper completes (for it would always
be zero), so both the success and fail procedures would take one argument. As a simple example
of the engine mechanism proposed here, we define a procedure that takes an engine and returns
a stepper.
\[
\text{(define engine-to-stepper)}
\text{(lambda (eng)}
\text{(lambda (succeed fail)}
\text{(eng 1)}
\text{(lambda (val ticks) (succeed val))}
\text{(lambda (new-eng)}
\text{(fail (engine-to-stepper new-eng))})
\text{)}
\]
It is also possible to implement our engine mechanism using steppers. While steppers may be
more aesthetically pleasing, we have chosen to provide multi-step engines to avoid the obvious
inefficiency.
(2) It is sometimes useful to run engines with an unlimited number of ticks. We abstract this
behavior with a procedure complete, which invokes its succeed procedure with the value of the
engine's computation and the number of ticks required for it to complete. Of course, unlike engine
invocation, complete is not guaranteed to terminate. To implement complete, we repeatedly
invoke a stepper to coax the engine computation along, until a value for the engine's expression
is obtained.
\[
\text{(define complete)}
\text{(lambda (eng succeed)}
\text{(rec loop)}
\text{(lambda (stepper count)}
\text{(stepper)}
\text{(lambda (val) (succeed val count))}
\text{(lambda (new-eng) (loop new-eng (+ 1 count)))}}
\text{(engine-to-stepper eng))})
\text{)}
\]
A good exercise is to write complete using regular engines, invoked with some arbitrary number
of ticks, instead of steppers.
A procedure that just returns the number of ticks required to complete a computation may now
be defined by
\[
\text{(define ticks)}
\text{(lambda (thunk)}
\text{(complete)}
\text{(make-engine thunk)}
\text{(lambda (value tick-count) tick-count))})
\text{)}
\]
This might be used to compare the relative speeds of several algorithms, or to monitor the response
time of code segments in a real-time system.
5. AN ENGINE PROCESS SCHEDULER
As an extended example of engine programming techniques, we use engines to implement a simple time-sharing operating system kernel. These techniques have broad applicability and may be used to implement a variety of process abstractions.
The basic technique is simple. The kernel dispatches a user process by running a corresponding engine with a tick count corresponding to its time slice. When the process's time is exhausted, the engine returns control to the kernel, which then dispatches the next process. We represent processes directly as engines. A ring of these processes is maintained, with the engines stored in ring entries that correspond to the PCBs (process control blocks) of traditional operating systems. The pcb-process and pcb-process! procedures extract processes from and store processes in these queue entries. The rotate! procedure rotates the ring and returns a reference to the next ring entry. The basic form of the kernel is:
```
((rec loop
(lambda (pcb)
(pcb-process! pcb
((pcb-process pcb)
(time-slice-length
(lambda (val ticks-remaining) ???)
(lambda (new-eng) new-eng)))
(loop (rotate! ring))))
(rotate! ring))
```
In practical systems it is also necessary to provide a trap mechanism that allows the user process to return control synchronously (without preemption) to the operating system with a service request. Thus we provide a procedure trap that may be invoked by a user process with a tag, which indicates the type of operating system service required and perhaps additional information specific to the service request. Invoking trap causes control to be returned to the kernel, passing the tag and other information. The kernel must then be able to resume the engine's computation.
To enable the kernel to return control to the engine at the point of the trap procedure call, the trap procedure first obtains (with call/cc) the continuation k of its invocation.† It then invokes engine-return, passing it k and the list containing the trap type and argument with which trap was invoked. We have:
```
(define trap
(lambda (trap-type arg)
(call/cc
(lambda (k)
(engine-return
(list trap-type k arg))))))
```
The success procedure of the kernel's engine invocation receives control after a trap, with the list (trap-type k arg) as the success value. See Fig. 2 for a kernel with a simple trap handler that provides some standard process primitives. The handler dispatches on the type of the trap, obtaining some answer that is to be returned to the user. This return is accomplished by the expression (make-engine (lambda () (k ans))), which creates a new engine that invokes the user's continuation with the answer to be returned. In this scheduler the ticks-remaining is ignored, so a process loses the rest of its time-slice when it traps. As an exercise, the scheduler may be modified to charge for traps in a more consistent fashion.
†This technique of recording process state with continuations is also useful in the context of multiprocessing and interrupts [14].
In case the type of the trap is awaken or block, the standard ring operations are performed. fork creates a new process that evaluates the argument thunk. (This thunk should never return.) The atomic trap simply invokes its argument as part of the trap operation; this is used to execute code uninterruptibly.
```
(define kernel
(lambda (ring)
((rec loop
(lambda (pcb)
(pcb-process pcb
((pcb-process pcb)
time-slice-length
(lambda (trap-value ticks-remaining)
(let ([(trap-type k arg) trap-value])
(let ([(ans (trap-handler trap-type arg) ring)]
(make-engine (lambda () (k ans))))
(lambda (new-eng new-eng)))
(loop (rotatel ring))))))
(rotatel ring))))
(define trap-handler
(lambda (trap-type arg ring)
(case trap-type
[(awaken) (insert! ring arg)]
[(block) (deletel ring arg)]
[(fork) (insert! ring (make-entry (make-engine arg)))]
[(atomic) (arg)])))
Fig. 2. Operating System kernel.
```
It is possible through extensive use of procedural abstraction to abstract the trap handling procedure from the kernel and simultaneously maintain a security constraint that only the kernel be able to create and invoke engines. Furthermore, a hierarchy of trap handlers may be used, so that the user can only perform higher level trap procedures, such as semaphore operations and parbegin [1]. A version of the kernel presented here, extended to include semaphores and parbegin, has been used by our operating systems and advanced programming languages classes for several years.
6. HIATONS AND AMB
Another use of engines is to implement an asynchronous merge of two streams. The head of a stream (which may be represented by the car of a cons cell) is immediately available. An attempt to access the tail of the stream results in some computation or input operation being performed; only on completion of this operation is the tail of the stream returned. (Typically the cdr of a cons cell representing a stream refers to a thunk that is invoked when the stream tail is accessed.)
The elements of two streams may be synchronously merged to form a third stream by simply alternating from one to the other; but then the speed that the output stream is generated is limited by the slower of the two input streams. Thus if one of the input streams becomes undefined (at some point the computation of the next element does not terminate), then the merge output becomes undefined. An asynchronous merge of streams avoids these problems: it forms the output stream by selecting elements from the input streams as they become available. In particular, an asynchronous merge output becomes undefined only if both its inputs become undefined.
It is straightforward to implement an asynchronous merge directly using engines, but here we taken an indirect approach—first implementing hiaton-streams, an abstraction of the hiaton, or delays, implicit in the production of stream values that has been proposed recently [15]. A hiaton-stream is simply a stream that may contain, in addition to its normal values, distinguished tokens, or hiatons, indicating the lapse of some time or computation in the generation of the stream.
Engines may be used to implement hiaton-streams, as in the following procedure that takes a stream and returns a hiaton-stream.
(define stream → hiaton-stream
(lambda (stream)
(let (((value.thunk) stream))
(cons value
((rec loop
(lambda (eng)
(lambda ()
(eng number-of-ticks-per-hiaton
(lambda (stream ticks)
(stream → hiaton-stream stream))
(lambda (new-eng)
(cons 'H (loop new-eng)))))
(make-engine thunk)))))
The thunk created by evaluating the (lambda () ...) expression is invoked to obtain the next element of the hiaton-stream. This runs an engine that invokes the cdr of the stream. If the next stream element is produced in less than a hiaton’s worth of time, the engine invocation’s success procedure simply passes the stream element to stream → hiaton-stream, which makes the car of the stream immediately available and builds a hiaton generator for the cdr of the stream. If the next stream element is not produced in the given number of ticks, the fail procedure returns a stream whose car is the symbol H (a hiaton token) and whose cdr is a hiaton generator that, when invoked, coaxes the newly generated engine for the next stream element, or hiaton.
To implement asynchronous merge, we begin by converting the streams to hiaton-streams. A synchronous merge may then be used to combine these hiaton-streams into a single hiaton-stream, and finally the hiatons may be culled from this stream, to yield the merge output stream.
Another approach to effecting nondeterminism is amb [16]. Operationally, amb starts two computations, represented by its arguments, and returns the result of the one that finishes first. The amb arguments could be represented as streams, the second element of which is the value associated with the argument. (The first element of the stream is immaterial for our purposes; for it is immediately available and thus cannot be used to express a pending computation.) Amb may then be implemented by simply applying the asynchronous merge described above to the argument streams. The first two elements of the resulting stream are the two immaterial values, and the third element is the amb result.
One feature of this amb implementation must be noted. Amb calls may be nested to simulate an amb of more than two computations, or because one of the computations passed to amb happens to use amb at some point. In the implementation suggested above, this results in a hiaton-stream being used by another hiaton-stream, which in turn results in an engine being invoked by a computation that is already running as an engine. Thus our implementations of amb and hiaton-streams require nested engines (though it is possible to implement nested amb in other ways without nesting engines).
7. IMPLEMENTING NESTED ENGINES
In this section we show how to eliminate the restriction that engines cannot be nested, thereby allowing engine invocation when an engine is already running. This allows any number of engines to be running at the same time. For the remainder of this section, we refer to an engine that can be nested as a nester to distinguish it from an engine that cannot be nested.
If e is the nester invoked most recently at the time another nester, e’, is invoked, we say that e’ is the child of e. All the nesters running at the time e’ is invoked are said to be the ancestors of e’. Should each tick of a nester be ‘charged’ only to the nester itself, or to the nester and all of its ancestors? We term these alternatives simple and fair nesting, respectively.
Simple nesting is straightforward to implement given unnested engines (using some of the techniques employed in the more involved fair implementation that follows). This may be adequate for some applications of nesters, but not all. For example, in the hiaton stream example a hiaton
should represent a fixed number of ticks of computation expended in evaluating an expression (which is achieved by using a nester to evaluate the entire expression). It should not matter whether or not the computation invokes other nesters in the course of its evaluation. However, with simple nesting it would be possible for a computation to cheat by using other nesters, run as its children, to perform much of its work. In this case the nester mechanism is failing to abstract the use of time in the intended way. We expect the nesting mechanism to be 'fair' in the sense that every tick of computation used by an engine is also charged to its parent, and by extension to all of its ancestors. Also, if nesting is 'abstract', a computation should be able to parcel out the ticks it receives among its children without being aware of whether or not it is running with one or more ancestor nesters.
To see how fair nesting may be achieved, assume a nester \( e_0 \) is given \( t_0 \) ticks while nesters \( e_1, \ldots, e_n \) are running with \( t_1, \ldots, t_n \) ticks remaining, respectively, where \( 0 \leq i < j \leq n \) implies \( e_j \) is an ancestor of \( e_i \). The fairness requirement implies that \( e_0 \)'s computation can proceed for at most \( t = \min(t_0, t_1, \ldots, t_n) \) ticks before a preemption. (A preemption may occur in less than \( t \) ticks, for example if \( e_0 \) invokes another nester with \( t_i \) ticks when it has \( t_j \) ticks left and \( t_j > t_i \).) If \( t = t_k < t_0 \), then the preemption of \( e_0 \) after \( t \) ticks results in the failure procedure \( f_k \) of \( e_k \) being invoked with a new nester \( e \). (The failure procedure of \( e_k \) should not be invoked at this time, since \( t_0 \) ticks have not been expended on \( e_k \)'s computation.) When \( f_k \) is invoked, the running of \( e_0, \ldots, e_{k-1} \) is suspended and their state encapsulated in \( e \). When \( e \) is invoked it should resume running \( e_0, \ldots, e_{k-1} \) with \( t_0 - t_1 - \ldots - t_{k-1} - t \) ticks remaining, respectively.
Providing fair nesting of engines is analogous to recursively virtualizing an operating system. In both cases resources at each node in the spawning tree are divided among offspring in such a way that the offspring are unaware of the division.
Applications for fair nesting include artificial intelligence problems in which engines are used to control the search strategy. When a choice point is reached, a process may be created to explore each of the alternatives. Nesters may be used to implement these processes using time-sharing. If each of the alternatives is equally likely, each of the processes should receive equal time. If some of the processes reach additional choice points and spawn additional processes, these children should share the computation resources of their parent. Thus far, not simple, nesting is required.
A fair nester implementation must record, for each currently running nester, the success and fail procedures associated with its invocation, its parent, and the number of ticks it has remaining. This information is maintained in a record, also referred to as a nester. Thus the system must effectively maintain a list of currently running nesters, ordered from youngest (most recent child) to oldest. If each child completes its computation (indicated by an engine-return) or expires (runs out of ticks) before any of its ancestors expire, then the nester list grows and shrinks in a stack-like manner. The youngest and oldest currently running nesters are the stack top and stack bottom, respectively.
The interest in this implementation is provided by the possibility that the first currently running nester to expire may not be the youngest. The first nester to expire among those currently running is the one with the minimum number of ticks remaining. This nester may be determined by examining the nester stack at the time each new nester is invoked. Nesting is simulated by running each new nester as an (unnested) engine for this minimum number of ticks, with appropriately constructed success and fail procedures.
For example, Fig. 3(a) pictures the nester stack just after nester D has been invoked with 30 ticks by its parent nester C, with 20 ticks remaining. Nester C's parent and grandparent are nesters B and A, with 10 and 40 ticks, respectively, remaining at the time of D's invocation. Nester A was invoked with no other nesters running, so it has no parent (it is the bottom of the nester stack). Nester B has the least number of ticks remaining, so it expires first—10 ticks after D's invocation.
When nester B expires, control returns to its parent, nester A, by passing the fail procedure of nester B a newly formed nester. The nester stack is now broken after nester B, as pictured in Fig. 3(b). The bottom segment, containing only nester A, becomes the new system nester stack. The top segment is recorded in the new nester passed to the fail procedure. The nester record B contains no useful information, but is filled with information again whenever the new nester is invoked.
Suppose that 5 ticks later nester A invokes a new nester with 40 ticks. The resulting nester stack is pictured in Fig. 3(c). Further suppose that after another 4 ticks the nester created at the time nester B expired is invoked with 20 ticks. The nester stack segment pictured on the top of (b) is
now appended to the system nester stack to obtain the stack pictured in (d). The minimum number of ticks in this stack is 10, belonging to nester C. Thus after another 10 ticks the stack is again broken, this time between nesters B and C, with the result pictured in (e).
The append operation that forms stack (d) is of central importance. It has the effect of resuming the nesters in the appended stack segment with the number of ticks they had left at the time their ancestor (the first nester B) ran out of ticks. The append operation must copy the appended stack frames belonging to the resumed nester because this nester, like an engine, may be resumed more than once.
We now define a procedure that makes nesters using make-engine:
\[
\text{(define make-nester}
\text{(lambda (thunk)}
\text{(nestermaker (make-engine thunk) (list any)))))}
\]
where nestermaker, defined in Fig. 4, embodies the behavior just illustrated, and any is an arbitrary value. nestermaker takes an engine and a nester stack that records the nesting context. nestermaker returns a nester which, like all engines, is a procedure that takes a number of ticks, a success procedure, and a fail procedure.
Nester records are represented by a data structure of the form
\[
((\text{ticks success fail}) . \text{parent})
\]
where ticks is the number of ticks remaining for the nester, success and fail are the success and fail procedures of the nester's invocation, and parent points to the invoking nester's record. Each nester record may also be viewed as a nester stack, represented as a list linked by the parent pointers.
When a nester is invoked, it first obtains the continuation of its invocation, which is bound to k. The success and fail continuations of the nester are now constructed by composing k with the success and fail procedures with which the nester was invoked. These are stored, along with the number of ticks, in the last nester of the stack. If a nester is already running, as indicated by a global flag, then an engine-return is performed. If no nester is running, then the running flag is set and the run procedure is called to invoke the engine. run calls find-minpoint to locate the nester record with the minimum number of ticks remaining. (If more than one record has the same number, any of them could be returned; but there is a small efficiency advantage to returning the
(define nes term maker)
(lambda (eng stk)
(lambda (ticks succeed fail)
(call/cc
(lambda (k)
(set-car! (last-pair stk)
(list ticks
(lambda (value ticks) (k (succeed value ticks)))
(lambda (new-eng) (k (fail new-eng)))))
(if running
(engine-return (list "return eng stk")
(begin (define running true)
(run eng stk))))))))
(define run
(lambda (eng stk)
(let ((minpoint (find-minpoint stk)))
(let (((t ticks succeed fail) . parent) minpoint))
(eng ticks
(lambda (value ticks-remaining)
(decrement! stk (— ticks ticks-remaining))
(if (if (pair? value)
(eq? (car value) "return)
false)
(let (((rtn-eng rtn-stk) (cdr value))
(run rtn-eng (append! (copy rtn-stk) stk)))
(let (((t ticks succeed fail) . parent) stk))
(return-from-nester parent
(lambda () (succeed value ticks))))))))
(lambda (new-eng)
(decrement! stk ticks)
(set-cadr minpoint ‘)
(return-from-nester parent
(lambda () (fail (nest term maker new-eng stk))))))))
(define return-from-nester
(lambda (stk thunk)
(if (null? stk)
(begin (define running false)
(thunk))
(run (make-engine thunk) stk)))
Fig. 4. Nested engine implementation.
one closest to the bottom of the stack.) The engine passed to run is now invoked with the number of ticks of the minpoint record.
If an engine-return is performed before the ticks are exhausted, the success procedure of the engine invocation is invoked, which first calls decrement! to update the tick counts in the nest stack. If the engine-return resulted from the nes term maker operation discussed above, this was noted by a "return tag, which is accompanied by a stack segment and engine. run is then invoked with this engine and a stack formed by appending the returned stack segment to the current stack. On the other hand, if the engine-return indicated the normal termination of a nester, return-from-nester is invoked with a thunk that invokes the success procedure of the top nester with the number of ticks remaining for it and the value passed to the engine-return. return-from-nester checks if the stack is empty (i.e., the returning nester is the last one), in which case the running flag is cleared, and the thunk is invoked with no engine running. Otherwise, run is invoked with a new engine that invokes the thunk. The stack is popped when run is called to discard the nester that just returned.
If the engine invoked by run expires instead of returning, all the stack tick counts are decremented. Then the stack is broken at the minpoint by installing the empty stack as the parent of the minpoint (using set-cadr). Finally, return-from-nester is invoked with the bottom part of the broken stack and a thunk. When invoked, this thunk calls the fail continuation with a new nester created by nes term maker from the new engine and the top part of the broken stack.
At the expense of clarity, the efficiency of this implementation can be improved in several ways.
For example, the decrement calls could be avoided (at least until integer overflow becomes a problem) by keeping a count to be subtracted from the nest record tick values before using them.
8. CONCLUSION
We have considered several abstractions for timed preemption, including steppers, engines and nesters. We have shown these to be of equivalent expressive power, but have chosen engines for pragmatic reasons.
To illustrate the use of engines, we have implemented a sample operating system scheduler, hiaton-streams and amb. The last two examples demonstrated the need for nesting of engines. A nesting implementation was presented that charged an engine for the computation ticks used by its offspring.
It is natural to ask what tools might be developed for reasoning about engines. Aside from the real-time character of engines, there would still seem to be difficulties. Until recently there was no satisfactory theory for reasoning about continuations, and continuations are necessary in order to record the context of a preempted engine. However, an algebraic extension of Plotkin’s λΔ-calculus [17], termed the λC-calculus, has been developed that allows us to reason about continuations.
Starting with a tree rewriting system, a reduction system is derived that in turn yields a calculus which is Church–Rosser and has a standard reduction function [18, 19]. These tree rewriting techniques have also been used to develop a related calculus which incorporates assignment statements [20].
It seems possible to associate engine ticks with reductions in a similar reduction system that models engines. However, such an engine semantics would not be a calculus. Since the number of reductions required to reduce an expression to normal form is dependent on the order of reduction, the Church–Rosser property (among others) would not hold. Nonetheless, a reduction semantics might yield useful proof rules; we leave this to future work.
Any engine specification that precisely determines the computation quantity associated with a tick would not be entirely satisfactory. This includes reduction systems such as those discussed above and interpreters which associate ticks with meta-recursion, as in Ref. [21]. We have been careful to leave the exact nature of a tick unspecified. We require only a ‘liveness’ property that prohibits any engine from running forever, and a ‘fairness’ property that associates more ticks with more computation, on the average. Any reasonable metric of computation may be used, including real-time, formal reduction, function invocation and virtual instruction execution. Furthermore, ticks may not always represent the same quantity of computation, even when repeatedly evaluating the same expression. This generality gives implementors of the engine mechanism freedom to make efficient choices, which may be heavily constrained by the computation environment. It also discourages programmers from writing code that is timing dependent. However, this implies that engines are non-deterministic. It appears that a fully satisfactory formal semantics of engines must await the resolution of some difficult problems in the semantics of non-determinism.
In spite of the semantic difficulties posed by engines (many of which are shared with other non-deterministic facilities), we believe there are practical advantages to abstracting mechanisms for timed preemption in any multiprogramming environment. Many process abstraction facilities have been proposed, with a wide variety of design goals. A language designer who chooses one of these facilities, or opts to invent yet another, risks rapid design obsolescence. The resulting language would also be ill-suited to a variety of applications that do not fall within the language’s design goals. Attempts to avoid the latter problem by introducing intricate facilities result in a complex design that is optimally suited to few applications. Unfortunately, any multiprocessing implementation must be designed to support one or more specific process abstractions. However, when multiprocessing is used, there is an alternative to committing a language design to a specific process abstraction facility: an abstraction of timed preemption, such as engines, may be used instead to provide time-sharing implementations of process abstractions.
In a more general context, independent of the notion of process, we feel that languages should provide a means for bounding computation. Some form of timed preemption is implicit in any mechanism for asynchronously imposing computation bounds. Engines abstract this mechanism for bounding computation.
Acknowledgements — We gratefully acknowledge Eugene Kohlbecker’s help in the development of an earlier engine operating system, and his suggestion of the term engine. We thank Bruce Duba, Kent Dybvig, Edward Robertson, Mitchell Wand and anonymous referees for their comments on earlier drafts of this paper.
REFERENCES
About the Author—CHRISTOPHER T. HAYNES received the Ph.D. degree from the University of Iowa in 1982. His interests include control abstraction, data typing, programming environments and logic programming.
About the Author—DANIEL P. FRIEDMAN received the Ph.D. degree from the University of Texas at Austin in 1973. His field is Programming Languages.
|
{"Source-Url": "https://www.cs.indiana.edu/pub/techreports/TR178.pdf", "len_cl100k_base": 9793, "olmocr-version": "0.1.53", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 16002, "total-output-tokens": 11662, "length": "2e13", "weborganizer": {"__label__adult": 0.0003964900970458984, "__label__art_design": 0.0003590583801269531, "__label__crime_law": 0.00031495094299316406, "__label__education_jobs": 0.000812530517578125, "__label__entertainment": 8.535385131835938e-05, "__label__fashion_beauty": 0.00016891956329345703, "__label__finance_business": 0.00021648406982421875, "__label__food_dining": 0.0003952980041503906, "__label__games": 0.0005321502685546875, "__label__hardware": 0.0009317398071289062, "__label__health": 0.0005674362182617188, "__label__history": 0.00027632713317871094, "__label__home_hobbies": 9.709596633911131e-05, "__label__industrial": 0.0004513263702392578, "__label__literature": 0.0004322528839111328, "__label__politics": 0.00030112266540527344, "__label__religion": 0.0006322860717773438, "__label__science_tech": 0.0308685302734375, "__label__social_life": 0.00010097026824951172, "__label__software": 0.004360198974609375, "__label__software_dev": 0.95654296875, "__label__sports_fitness": 0.0003209114074707031, "__label__transportation": 0.0007243156433105469, "__label__travel": 0.0001959800720214844}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 49516, 0.01101]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 49516, 0.53977]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 49516, 0.9068]], "google_gemma-3-12b-it_contains_pii": [[0, 453, false], [453, 4423, null], [4423, 8463, null], [8463, 12388, null], [12388, 16799, null], [16799, 19567, null], [19567, 22694, null], [22694, 26132, null], [26132, 29952, null], [29952, 35395, null], [35395, 37780, null], [37780, 40999, null], [40999, 45633, null], [45633, 49516, null]], "google_gemma-3-12b-it_is_public_document": [[0, 453, true], [453, 4423, null], [4423, 8463, null], [8463, 12388, null], [12388, 16799, null], [16799, 19567, null], [19567, 22694, null], [22694, 26132, null], [26132, 29952, null], [29952, 35395, null], [35395, 37780, null], [37780, 40999, null], [40999, 45633, null], [45633, 49516, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 49516, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 49516, null]], "pdf_page_numbers": [[0, 453, 1], [453, 4423, 2], [4423, 8463, 3], [8463, 12388, 4], [12388, 16799, 5], [16799, 19567, 6], [19567, 22694, 7], [22694, 26132, 8], [26132, 29952, 9], [29952, 35395, 10], [35395, 37780, 11], [37780, 40999, 12], [40999, 45633, 13], [45633, 49516, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 49516, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
fab8adf0da304d28ae4950f4400eff4f856396c9
|
Concurrent use of two programming tools for heterogeneous supercomputers
Javier G. Vasquez
New Jersey Institute of Technology
Follow this and additional works at: https://digitalcommons.njit.edu/theses
Part of the Computer Sciences Commons
Recommended Citation
https://digitalcommons.njit.edu/theses/1211
This Thesis is brought to you for free and open access by the Theses and Dissertations at Digital Commons @ NJIT. It has been accepted for inclusion in Theses by an authorized administrator of Digital Commons @ NJIT. For more information, please contact digitalcommons@njit.edu.
Copyright Warning & Restrictions
The copyright law of the United States (Title 17, United States Code) governs the making of photocopies or other reproductions of copyrighted material.
Under certain conditions specified in the law, libraries and archives are authorized to furnish a photocopy or other reproduction. One of these specified conditions is that the photocopy or reproduction is not to be “used for any purpose other than private study, scholarship, or research.” If a user makes a request for, or later uses, a photocopy or reproduction for purposes in excess of “fair use” that user may be liable for copyright infringement.
This institution reserves the right to refuse to accept a copying order if, in its judgment, fulfillment of the order would involve violation of copyright law.
Please Note: The author retains the copyright while the New Jersey Institute of Technology reserves the right to distribute this thesis or dissertation.
Printing note: If you do not wish to print this page, then select “Pages from: first page # to: last page #” on the print dialog screen.
The Van Houten library has removed some of the personal information and all signatures from the approval page and biographical sketches of theses and dissertations in order to protect the identity of NJIT graduates and faculty.
ABSTRACT
CONCURRENT USE OF TWO PROGRAMMING TOOLS FOR HETEROGENEOUS SUPERCOMPUTERS
by
Javier G. Vasquez
In this thesis, a demonstration of the heterogeneous use of two programming paradigms for heterogeneous computing called Cluster-M and HAsC is presented. Both paradigms can efficiently support heterogeneous networks by preserving a level of abstraction which does not include any architecture mapping details. Furthermore, they are both machine independent and hence are scalable. Unlike, almost all existing heterogeneous orchestration tools which are MIMD based, HAsC is based on the fundamental concepts of SIMD associative computing. HAsC models a heterogeneous network as a coarse grained associative computer and is designed to optimize the execution of problems with large ratios of computations to instructions. Ease of programming and execution speed, not the utilization of idle resources are the primary goals of HAsC. On the other hand, Cluster-M is a generic technique that can be applied to both coarse grained as well as fine grained networks. Cluster-M provides an environment for porting various tasks onto the machines in a heterogeneous suite such that resources utilization is maximized and the overall execution time is minimized. An illustration of how these two paradigms can be used together to provide an efficient medium for heterogeneous programming is included. Finally, their scalability is discussed.
CONCURRENT USE OF TWO PROGRAMMING TOOLS FOR HETEROGENEOUS SUPERCOMPUTERS
by
Javier G. Vasquez
A Thesis
Submitted to the Faculty of
New Jersey Institute of Technology
in Partial Fulfillment of the Requirements for the Degree of
Master of Science in Computer Science
Department of Computer and Information Science
January 1994
CONCURRENT USE OF TWO PROGRAMMING TOOLS FOR HETEROGENEOUS SUPERCOMPUTERS
Javier G. Vasquez
BIOGRAPHICAL SKETCH
Author: Javier G. Vasquez
Degree: Master of Science in Computer Science
Date: January 1994
Undergraduate and Graduate Education:
• Master of Science in Computer Science,
New Jersey Institute of Technology, Newark, NJ, 1994
• Bachelor of Science in Computer Science
Jersey City State College, Jersey City, New Jersey, 1989
Major: Computer Science
This thesis is dedicated to my parents and my family
ACKNOWLEDGMENT
The author wishes to express his sincere gratitude to his supervisor, Dr. Mary M. Eshaghian for her guidance, friendship, and moral support throughout this research.
Special thanks to Dr. Daniel Chao and Dr. David Wang for serving as members of the committee. The author is grateful to the Department of Computer and Information Science for partially funding this research.
The author appreciates the timely help and suggestions from the project group team members Phil Chen, Ajitha Gadangi and Ying-Chieh Jay Wu.
The author is very much grateful to Lisa A. Ryan and her family for their moral support throughout the graduate studies time. The author also wishes to thank Annette Damiano for her professional comments in finalizing this thesis.
# TABLE OF CONTENTS
<table>
<thead>
<tr>
<th>Chapter</th>
<th>Page</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>INTRODUCTION AND BACKGROUND</td>
</tr>
<tr>
<td>2</td>
<td>CLUSTER-M MODEL</td>
</tr>
<tr>
<td>2.1</td>
<td>Cluster-M Specifications</td>
</tr>
<tr>
<td>2.2</td>
<td>Cluster-M Representations</td>
</tr>
<tr>
<td>2.3</td>
<td>Mapping Specifications to Representations</td>
</tr>
<tr>
<td>2.3.1</td>
<td>A mapping methodology</td>
</tr>
<tr>
<td>2.3.2</td>
<td>An Example</td>
</tr>
<tr>
<td>3</td>
<td>HETEROGENEOUS ASSOCIATIVE COMPUTING</td>
</tr>
<tr>
<td>3.1</td>
<td>Instruction Execution</td>
</tr>
<tr>
<td>3.2</td>
<td>HAsC Administration</td>
</tr>
<tr>
<td>3.3</td>
<td>HAsC Instruction Set</td>
</tr>
<tr>
<td>3.4</td>
<td>Associative Instruction Levels</td>
</tr>
<tr>
<td>4</td>
<td>CONCURRENT USE OF CLUSTER-M AND HASC</td>
</tr>
<tr>
<td>4.1</td>
<td>Switching between Cluster-M and HAsC</td>
</tr>
<tr>
<td>4.2</td>
<td>Cluster-M aided HAsC</td>
</tr>
<tr>
<td>5</td>
<td>SCALABILITY</td>
</tr>
<tr>
<td>5.1</td>
<td>Homogeneous Case</td>
</tr>
<tr>
<td>5.2</td>
<td>Heterogeneous Case</td>
</tr>
<tr>
<td>5.2.1</td>
<td>Fundamental Theorem of Scalability</td>
</tr>
<tr>
<td>5.3</td>
<td>Scalability of HAsC and Cluster-M</td>
</tr>
<tr>
<td>6</td>
<td>CONCLUSION AND FURTHER RESEARCH</td>
</tr>
<tr>
<td>REFERENCES</td>
<td>31</td>
</tr>
</tbody>
</table>
LIST OF FIGURES
<table>
<thead>
<tr>
<th>Figure</th>
<th>Page</th>
</tr>
</thead>
<tbody>
<tr>
<td>2.1</td>
<td>5</td>
</tr>
<tr>
<td>2.2</td>
<td>5</td>
</tr>
<tr>
<td>2.3</td>
<td>8</td>
</tr>
<tr>
<td>2.4</td>
<td>9</td>
</tr>
<tr>
<td>2.5</td>
<td>9</td>
</tr>
<tr>
<td>2.6</td>
<td>10</td>
</tr>
<tr>
<td>2.7</td>
<td>12</td>
</tr>
<tr>
<td>3.1</td>
<td>14</td>
</tr>
<tr>
<td>3.2</td>
<td>14</td>
</tr>
<tr>
<td>3.3</td>
<td>20</td>
</tr>
<tr>
<td>4.1</td>
<td>23</td>
</tr>
<tr>
<td>4.2</td>
<td>23</td>
</tr>
<tr>
<td>5.1</td>
<td>27</td>
</tr>
</tbody>
</table>
viii
CHAPTER 1
INTRODUCTION AND BACKGROUND
Heterogeneous Computing (HC)\cite{16, 14} provides an environment where a parallel application is executed utilizing a number of autonomous computers communicating over an intelligent network, and offering more than one type of parallelism. This approach aims at providing high performance by executing portions of code on suitable machines offering similar types of parallelism. The hardware and software requirements of HC can be classified into three layers: network layer, communication layer, and intelligent layer \cite{20}. The network layer deals with the physical aspects of interconnecting the autonomous high performance machines in the system. This includes low level network protocols and machine interfaces. The communication layer provides a uniform system-wide communication mechanism operating above native operating systems to facilitate the exchange of information between different machines. The intelligent layer provides system-wide tools that insure proper and efficient execution of tasks using the heterogeneous suite of computers. The services provided by this layer include language support, task decomposition, mapping and scheduling.
A number of existing parallel programming tools developed for homogeneous systems may be used in the intelligent layer, but may not be suitable for the heterogeneous systems. These tools can be classified into two categories; machine specific and machine independent. Machine specific tools such as Linda \cite{5} and Poker \cite{21} are only suitable for the corresponding architectures they are designed for, and therefore not generic enough to support the heterogeneous networks. For example, Linda \cite{5} is a parallel programming tool developed for shared memory architectures. The tuple space defined in Linda is a logically shared data structuring memory
1
mechanism. Tuple space holds two kinds of tuples: process tuples which are under active evaluation, and data tuples which are passive. Process tuples execute simultaneously, and exchange data by generating, reading, and consuming data tuples. Once a program is written based on Linda, each step must get implemented using the underlying architecture. However, it is difficult to implement Linda on architectures not supporting shared memory structure.
Machine independent programming tools can be further categorized into two groups, with respect to how the mapping of the problem tasks is done onto the target architectures. The first group uses a library of pre-defined routines for mapping [1, 23]. This may not be suitable for HC systems due to the limitation on the number of mapping techniques stored and available in the library. In the second group, the mapping is determined online based on graph matching technique. The mapping problem here is the same as the classic one defined and studied by several researchers over the years [22, 3, 17, 4, 8, 18]. The input to the mapping problem is two graphs. The first graph is called the problem graph which is similar to the data flow representation of the execution process, where each node is a computation task and edges represent dependency and flow of data. The second graph is called the system graph which is a trivial representation of the underlying architecture. The mapping problem is defined as the matching of these two graphs such that the overall execution time is minimized. This problem has been proven to be computationally equivalent to the graph isomorphism problem and hence is an NP-complete optimization problem [3]. Tools that use this approach are not time efficient to be used in heterogeneous computing.
To reduce the complexity of the mapping problem, a number of approaches such as graph contraction and clustering have been studied [7, 2, 15, 24, 25, 18]. However, in all these graph matching based techniques, still the entire problem graph is considered against the entire system graph, which results in an embedded
huge time complexity. In this thesis, we propose to use Cluster-M programming paradigm for heterogeneous computing. Cluster-M, introduced recently in [9], has a mapping module which does multi-level clustering on the problem graph as well as the system graph. Also, presented in this thesis is HAsC programming paradigm [19], which models a heterogeneous network as coarse grained associative computer and is designed to optimize the execution of problems with large ratios of computations to instructions. Ease of programming and execution speed, not the utilization of idle resources are the primary goals of HAsC. On the other hand, Cluster-M is a generic technique that can be applied to both coarse grained as well as fine grained networks. Cluster-M provides an environment for porting various tasks onto the machines in a heterogeneous suite such that resources utilization is maximized and the overall execution time is minimized. We illustrate how these two paradigms can be used together to provide an efficient medium for heterogeneous programming.
The rest of the thesis is organized as follows. In chapter 2, Cluster-M components and mapping methodology are presented. Presentation of HAsC in chapter 3. Introduction of the concurrent use of HAsC and Cluster-M in chapter 4. The definitions of scalability for hardware, tasks, and software are presented in chapter 5. The conclusion of this thesis is described in chapter 6.
Cluster-M is a novel parallel programming model which facilitates the efficient design of highly portable software. Cluster-M has three main components: Cluster-M Specifications, Cluster-M Representations and Cluster-M mapping module [9, 11, 10]. Cluster-M Specifications are machine independent algorithms represented in a multi-layered problem graph, such that each layer represents concurrent computations. A Cluster-M Representation on the other hand, represents a multi-layered partitioning of a system graph corresponding to the topology of the underlying architecture or heterogeneous network. The mapping module then generates an efficient mapping of the Specification graph onto the Representation graph. Using Cluster-M, portable and scalable software can be developed.
2.1 Cluster-M Specifications
A Cluster-M Specification of a problem is a high level machine-independent program that specifies the computation and communication requirements of a solution to a given problem. A Cluster-M Specification can be translated into a graph consisting of multiple levels of clustering. In each level, there is a number of clusters representing concurrent computations. Clusters are merged when there is a need for communication among concurrent tasks. For example, if all $n$ elements of an array are to be squared, each element is placed in a cluster, then the Cluster-M specification would state:
For all $n$ clusters, square the contents.
Figure 2.1 Cluster-M Specification graph of a unary operation on an array of size n.
Note, that since no communication is necessary, there is only one level in the Cluster-M Specification graph as shown in Figure 2.1. The mapping of this Specification to any architecture having n processors would be identical.
The basic operations on the clusters and their contained elements are performed by a set of constructs which form an integral part of the Cluster-M model. For a complete listing and description of these constructs which are essential for writing Cluster-M Specifications, refer to [11, 10]. All these constructs have been implemented in PCN [10, 12]. Below we show an example for computing the associative binary operation $\ast$ of $N$ elements of vector $A$, using the constructs implemented in PCN. The resulting Cluster-M specification will be as follows, where $CMAKE$, $CMERGE$ and $CBI$ are Cluster-M specification constructs. The Cluster-M Specification graph of this example is shown in Figure 2.2.
\begin{verbatim}
ASSOC_BIN(op, N, A, Z) /* op: operation, Z: return value */
int N, A[ ];
{ ; lvl = 0,
Figure 2.2 Cluster-M Specification of associative binary operation.
make_tuple(N, cluster),
{ ; i over 0 .. N-1 ::
{ ; CMAKE(lvl, [A[i]], c),
cluster[i] = c
}
}
}
Binary_Op(cluster, N, op, Z)
Binary_Op(X, N, op, B)
int N, n;
{ ? N > 1 -> { ; n := N / 2,
make_tuple(n, Y),
{ ; i over 0 .. n-1 ::
{ ; BLMERGE(op, X[2 * i], X[2 * i + 1], Z),
Y[i] = Z
}
}
}
}
Binary_Op(Y, n, op, B)
default -> B = X
BLMERGE(op, X1, X2, M)
int e;
{ ; CBI(op, X1, 1, X2, 1, e),
CMERGE(X1, X2, [e], M)
}
}
The above constructs have been implemented using PCN (Program Composition Notation). PCN is a system for developing and executing parallel programs. It comprises of a high-level programming language with C-like syntax, tools for developing and debugging programs in this language, and interfaces to Fortran and C allowing the reuse of existing code in multilingual parallel programs. Programs develop using PCN are portable across many different workstations, networks, parallel computers. The code portability aspect of PCN makes it suitable as an implementation medium for Cluster-M.
2.2 Cluster-M Representations
For every architecture, at least one corresponding Cluster-M Representation graph can be constructed. Cluster-M Representation of an architecture is a multi-level nested clustering of processors. To construct a Cluster-M Representation, initially, every processor forms a cluster, then clusters which are completely connected are merged to form a new cluster. This is continued until no more merging is possible. In other words, at level $LV L$ of clustering, there are multiple clusters such that each cluster contains a collection of clusters from level $LV L - 1$ which form a clique. The highest level consists of only one cluster, if there exists a connecting sequence of communication channels between any two processors of the system. A Cluster-M Representation is said to be complete if it contains all the communication channels and all the processors of the underlying architecture. For example, the Cluster-M Representation of the $n$-cube architecture is as follows: At the lowest level 1, every processor belongs to a cluster which contains just itself. At level $n$, every two processors (clusters) which are connected are merged into the same cluster. At level 2, clusters of previous level which are connected belong to the same cluster, and so on until level $n + 1$. The complete Cluster-M Representation of a 3-cube, a completely
connected system of size 8, and of a system with arbitrary interconnections are shown in Figures 2.3, 2.4 and 2.5, respectively.
An algorithm for generating a Cluster-M Representation for any given architecture has been presented and implemented in [10]. The algorithm has a running complexity of $O(N^3)$ where $N$ is the number of processors.
2.3 Mapping Specifications to Representations
The most challenging task in the Cluster-M model is the mapping of the Specifications onto the fixed Cluster-M Representations of various architectures. Although in some cases this may appear simple, the mapping of certain Specifications may be non-trivial. For example, consider the associative binary operation example of the last section. We assume that it will take one time unit for a single communication along a link. Its mapping onto a 3-cube is shown in Figure 2.6 and is straight forward and can be done in 3 steps.
On the other hand, to map the same onto a ring of size 8 will lead to a greater time complexity since there are not enough communication channels available to support the communication request specified in the Cluster-M Specification. Similarly, there is going to be a slow down if there are not enough processors in the
Figure 2.4 Cluster-M Representation of a completely connected system of size 8.
Figure 2.5 Cluster-M Representation of an arbitrarily connected system of size 8.
Representation available as specified in the Specification. For example, the same problem described above, will take at least twice as much time if it to be mapped on a Cluster-M Representation having half the number of processors. Mismatch of the number and structure of clustering in Cluster-M Specification versus Cluster-M Representation may lead to a significant slow down in performance. In the following section we present an efficient methodology for mapping an arbitrary Specification to an arbitrary Representation.
2.3.1 A mapping methodology
The Cluster-M paradigm simplifies the mapping process by formulating the problem in the form of Cluster-M problem Specification (a layered problem graph) emphasizing its computation and communication requirements independently from the target architecture. Similarly, the Cluster-M Representation of the system emphasizes the topology of the target multi-processor system (a layered system graph). Once
both, the Cluster-M problem Specification and system Representation, are obtained
the mapping process proceeds as follows:
Start from the root of Cluster-M Specification. At level \( i \), there is a number
of clusters. Each cluster has a size \( K \) which is defined by the cumulative sum of
the number of computations involved in all its nested subclusters. On the other
hand, in Cluster-M Representation, there is a collection of subclusters as part of a
Cluster-M Representation of a single connected system. We next look for a number
of clusters in the Representation to match the number of clusters at the \( i \)th level
of the Specification. Furthermore, we select the clusters such that the size of the
corresponding pair matches. The details of this algorithm are beyond the scope of
this paper. For more information, see [6]. As part of the proposed algorithm, several
graph theoretic techniques have been used. In the next section, we give an example
to illustrate the functionality of the mapping module.
2.3.2 An Example
In this section, we present a complete example to illustrate the Cluster-M mapping
methodology presented above.
Figure 2.7 shows the mapping from a Cluster-M Specification to Represen-
tation. First, two clusters at the top level of Specification are mapped onto two
clusters of Representation. The Specification cluster of size 5 is mapped onto the
Representation cluster of the same size, however the Specification cluster of size 4
has to be mapped onto the Representation cluster of size 3 since this is the closest
matching of sizes. Then the same procedure is applied for the clusters at the lower
level of Specification. As shown in step 2 in Figure 2.7, Specification cluster \( a \) is
mapped onto Representation cluster \( H \), which is a single processor. In step 3, Speci-
fication clusters \( b, e, f, g, h \) and \( i \) are mapped onto corresponding processors. Finally
in step 4, Specification cluster \( c \) and \( d \) are both mapped onto processor \( F \).
Figure 2.7 An example for mapping algorithm
Heterogeneous Associative Computing (HAsC) models a heterogeneous network as a coarse grained associative computer. It assumes that the network is organized into a relatively small number of very powerful nodes. Basically, each node is a supercomputer architecture (vector, SIMD, MIMD, etc). Thus each node of the network provides a unique computational capability. There may be more than one node of a specific type in the case that special properties are present. For example, one SIMD node may be specialized for associative processing, a second SIMD node may contain a very powerful internal network configuration.
Figure 3.1 illustrates the logical similarity of an associative machine and a heterogeneous network. In particular, a disk-computer node on a network can be compared to an associative memory-PE cell. That is, effectively, the node's computer is dedicated to processing the data on the node's disk(s). The disk-to-machine data transfer rate is much more efficient than the node-to-node transfer rate, just as the memory-to-PE transfers are much faster than PE-to-PE transfers. Note that the SIMD and network diagrams are quite different from the shared memory MIMD models. The shared memory configurations emphasize the concept that all data is equally accessible from all processors. This is not the case in a heterogeneous network.
HAsC is "layered" in that any node in the HAsC network may again be another network. Thus a HAsC node may be a HAsC cell containing more than one computer, or may be a port to another level of computing in the HAsC network. For example, most nodes may contain general purpose computers in addition to a supercomputer, to function as the node's port to the rest of the HAsC network.
and for file management and other support roles. Figure 3.2 shows a typical HAsC network organization. Such a port, or transponder node will accept a high level command and “translate it” into the commands(s) appropriate for the subnetwork.
Some of the properties of the associative computing paradigm which make it well suited for heterogeneous computing are: i) efficient programming and execution with large data sets and small programs, ii) optimal data placement, iii) scalability, iv) cellular memory allocation, and v) search-process-retrieve synchronism [19].
3.1 Instruction Execution
In conventional machines, instructions are delivered to a CPU and they are executed without question. In HAsC, instructions are broadcast to all of the cells listening to a channel, but each individual cell must determine whether to execute the instruction. This determination is performed as follows: Upon receipt of an instruction, a node “unifies” it with its local instruction set and data files.
The unification process is borrowed from Artificial Intelligence. Several languages such as Prolog and STRAND [13] incorporate the process. HAsC is different in that it uses unification only at the top level. Thus there is only one unification operation per data file, as opposed to one per record or field. This difference is critical in a heterogeneous network where communication of individual data items would be prohibitively expensive.
If there is a match, the appropriate instruction is initiated. The “instruction” may in turn issue more instructions. Thus control is distributed throughout HAsC. That is, a “program” starts by issuing a command from a control node. If a receiving node receives a command that is in effect a subroutine call, it may become a transponder control node. It may first perform some local computations and then start issuing (broadcasting) commands of its own. If the node happens to be a port node, the commands are issued to its subnet as well as to its own network. Thus it is possible for multiple instruction streams to be broadcast simultaneously at several different logical network levels in a HAsC network.
In general, HAsC assumes that data is resident in a cell. As a result, data movement is minimal. However, it is common for one cell to compute a value and broadcast it to other cells. Thus, in general, there is a need to synchronize the arrival of commands and data. There are basically two cases which are handled automatically by the HAsC administrator as a part of the search-process-retrieve protocol.
The normal case is for data to be resident at a cell when the HAsC command arrives. Instruction unification and execution proceeds as described above. HAsC allows data transfers, but protocol insists that the data transfer be complete before any associated commands are broadcast.
The second case involves command parameters. When a command arrives and is unified with resident data at a node, but parameter data is missing, the unified command is stored in a table to wait for the parameter in a synchronism process called a data rendezvous. When parameter data arrives, the rendezvous table is searched for a match. If found, the associated command is executed.
### 3.2 HAsC Administration
HAsC uses network administrators and execution engines to effect the paradigm. Each HAsC network level has a system administrator and each node in a network has its own local administrator. The local administrator monitors network traffic capturing incoming instructions and checking for illegal commands. It is also responsible for maintaining the local HAsC instruction set.
The administrator receives all incoming HAsC instructions from the local network. It then verifies if each instruction is a legal HAsC instruction. If it is, the administrator puts it in the Execution Engine queue. Otherwise, it attempts to identify the source and makes a report to the system administrator. Repeat offenses cause escalating diagnostic actions as determined by the network administrator.
If a Meta HAsC instruction such as (un)install, (un)extend, or (un)augment, is received, it is processed immediately. The Meta instructions will create, modify and delete HAsC instruction from the local HAsC instruction set respectively. The administrator contains logic which prevents it from installing duplicate HAsC instruction. Meta instructions can also modify local data structure definitions.
Since the instruction set can be dynamically expanded by the users, it is possible for two users to install the same instructions. The node administrator distinguishes between the two instructions by a user id and program id which is broadcast with every HAsC instruction.
Instructions can be added at several different logical levels: i) system, ii) project, iii) user. Typical systems level instructions would be data move and formatting commands. Project commands would be project oriented. For example, a numerical analysis project would have a matrix multiply and vector-matrix multiply instructions, while a logic programming project might have specialized logic instructions, such as unification. At the user level, one user might specify a SAXPY operation while another might want a dot product. Scalable libraries may exist at any level, but most commonly at the project level.
Each node/cell has an execution engine which controls instruction execution at that node. The execution engine selects the next instruction, makes the bindings specified by instruction unification and causes the instruction to be executed. The execution engine performs the following tasks:
- Get Next Unified Instruction
- Establish Environment
- Save Local Variables
- Bind Unified Variables
- Execute Unified Instruction
- Restore Environment
Instruction execution may take two basic forms. First the instruction may be a HAsC program which is executed in the transponder mode. Second, the instruction may be a library call written in FORTRAN, C, LISP, etc. In this case, the established environment restrictions, produces the proper interface for the appropriate language.
3.3 HAsC Instruction Set
This section defines the nature of the operations, the instruction format and the instruction synchronization classes of the HAsC instruction set.
HAsC is dynamic. As such, it must allow for a dynamic instruction set and data structure modifications. Thus the HAsC install meta instruction consist of an associative pattern and a body of code. When it is broadcast to the system, all nodes which successfully unify with the instruction gather the body of code and install it on the local node. The extend instruction consists of a pattern and a data definition. Responding nodes add the data definition to the local associations. Extend may add a named row or column to an existing association. Augment can be used to add an entire new association.
The patterns in these instructions contain administrative data. Such as job id, project id, etc. If the node is not participating in the project or job, then it does not unify and the instruction is not installed or the data definition not extended. Uninstall, unextend and unaugment perform the inverse operations.
Basic to the HAsC philosophy is the concept that data when initially loaded into the system is sent to the appropriate node and never moved. While this would be ideal, there will always be a need to move data from one node to another. Accordingly there are a number of HAsC move commands. Move commands can be divided into intra-association and inter-association instructions. Intra-association instructions are very much like expressions in conventional languages and are not discussed here because of lack of space. Inter-association instructions include file I/O as a special case. Inter-association moves must have node identifiers and for I/O, a disk or other peripheral is a legal node.
3.4 Associative Instruction Levels
This section describes a hierarchy of instructions from the highest, most global (least responsive) to the most local (most responsive). HAsC will perform most efficiently if the programs are written using top level commands. The lower the level of command, the more inter-node communication is required. Five levels of instruction coupling are required to implement all of the HAsC statements.
The communication and synchronization are built into the HAsC instruction. There is no need for the programmer to be aware of the degree of instruction communication. The five levels of instructions are presented here to more clearly delineate the relationship between associative and heterogeneous computing.
The highest level of instruction synchronization is pure associative data parallelism and involves the use of the local channel registers only - i.e. there is no global coupling. There are two types of top level instructions: i) ones which execute based on the channel register content only, such as logical and arithmetic expressions and ii) ones which set the channel register. Data parallel logical expressions (associative searchers) can be used to set the channel registers and are "automatically" incorporated into many HAsC statements. Thus a data parallel WHERE consists of only an associative search, followed by a sequence of data parallel expressions. It is a top level instruction. Top level instructions execute in real time and require no global response or communication. Most computation is done at the top level.
Figure 3.3 gives some examples of instruction synchronization. In Figure 3.3, $ is the parallel marker and is read as a plural. That is, A$ is read as As. Result$ is a data parallel pronoun referring to the last performed data parallel computation. "It" is a reduction pronoun referring to the last performed reduction. The top level synchronization shows the programming style for algebraic expressions supported by HAsC.
The second level of instruction coupling requires only global synchronism. Prime examples are the data transfer and I/O commands. I/O is always local to the virtual PE, but in general the virtual PE’s may be quite different physically and therefore I/O times may vary dramatically requiring synchronization before the next HAsC command is issued. Again, the programmer need not be aware of the synchronization requirements of this class of instructions. The synchronization is automatic. The programmer only recognizes the need for I/O or data movement.
The third level of complexity consist of simple responder commands. These commands require the ORing of the responder results of all PEs (i.e. an OR reduction). On a SIMD this is a single instruction. In HAsC, it is the simplest form of a HAsC reduction communication. The instructions at this level, such as ANY, are used to check for error conditions, or determine whether special case computing needs to be done.
The fourth level is random selection. The HAsC commands in Figure 3.3 at this level consist of an associative search, followed by the selection of a responder by the "first reduction" operation. The data object of the selected responder is broadcast to the entire HAsC network for further processing.
The fifth level is iteration. The only use for iteration at the top level of HAsC is for user interaction. For example, a typical program might be one which allows the user to interactively specify kernels to be convolved with an image and to review the results, as shown in Figure 3.3.
HAsC is a programming paradigm designed to facilitate the utilization of heterogeneous networks. The parallel associative programming techniques are well suited for this purpose.
CHAPTER 4
CONCURRENT USE OF CLUSTER-M AND HASC
As shown in the previous sections, HAsC is most suitable for coarse grain heterogeneous parallel computing. It is to ease programming and increase execution speed, while not taking into account resource utilization. Cluster-M, on the other hand, provides both coarse grain and fine grain mapping in a clustered fashion. It aims at maximizing both execution speed as well as resource utilization. Therefore, both paradigms can be used concurrently to achieve a better overall performance. In the following, we show two possible concurrent use of these two paradigms.
4.1 Switching between Cluster-M and HAsC
Before we run an application task on a HAsC system, we first generate Cluster-M Specifications of that task, which are multi-level clusters preserving information of computation and communication at each step. Since all the clusters of the same level represent concurrent computations at a certain step, therefore this set of clusters can be sent to the HAsC control unit, and then be broadcast to HAsC nodes (Figure 4.1). Each node then decides which clusters out of all the clusters received are most suitable to itself, according to the type of parallelism labeled to each cluster. After all the nodes finish computation of the corresponding clusters, the results are sent back to control unit. Then at the next level clusters are fetched to the control unit to start next step computations. Therefore, there is a switching between Cluster-M and HAsC at each clustering level of Cluster-M Specification.
Cluster-M mapping can be applied to HAsC in two ways. First, Cluster-M mapping can be used to decide where the data is to be mapped onto before HAsC computation begins so that the overall execution time is minimized. Secondly, Cluster-M mapping can be used to decide the fine grain mapping of HAsC nodes as shown in figure 4.2.
CHAPTER 5
SCALABILITY
One of the basic issues related to and addressed in both HAsC and Cluster-M, as well as many HPC (High Performance Computing) and MPP (Massively Parallel Processing) schemes, is that of scalability. Scalability is often understood differently by different authors. For our purposes we will consider scalability to refer to hardware, tasks, and software in roughly analogous fashion. In addition scalability may refer to both homogeneous or heterogeneous architectures.
5.1 Homogeneous Case
The homogeneous case refers to multiple machines which are of the same basic architectural type, typically various-sized versions of the same vendor product. For example an eight processor CRAY is a hardware example of "scaled"-up version of a two-processor CRAY.
Definition 1 We define the hardware scalability function, $\chi(a,b)$, between two homogeneous architectures, $a$ (the larger) and $b$ (the smaller), to be the rational-valued function giving the size multiple of $a$ over $b$. In the example above, the eight-processor Cray has a $\chi = 4$ over the two-processor.
Task scalability is more complex. What is typically implied is the ability to take a task (algorithm plus data) executing on a small machine and execute the "same" task on a "scaled"-up machine, using the additional resources of the larger machine, with performance reasonably close to $\chi$. One ambiguity in this concept is what we mean by the "same" task. If it means only the same algorithm, but with possibly different, i.e., larger data, then tasks often "scale", particularly if the
scaling factor of the data size is equal to \( \chi \). However, if we follow the definition of task given above, fixed data and algorithm, then tasks often do not scale, even on scaled up homogeneous hardware. To give a simple example, suppose we are computing a pixel-based imagery problem on a SIMD machine in which both the number of pixels and the number of processors is 1K. If we scaled-up to a 16K processor (\( \chi = 16 \)), typically this task would not scale, i.e., it would not be able to exploit the additional 15K processors, and we would get no increased performance. However if our original task had started with a 16K pixel problem, we would typically be able to scale in performance, on the 16K machine over the 1K machine.
**Definition 2** We define task scalability, between two homogeneous architectures, \( a \) (the larger) and \( b \) (the smaller), to be the potential to exploit the inherent hardware scalability between them on some task of a size that fills \( a \).
Software scalability refers to the ability to exploit task and hardware scalability, with little or no changes, other than parameters.
**Definition 3** We define the software scalability function, \( \sigma(a, b) \), in the case of software scalability between two homogeneous architectures, \( a \) (the larger) and \( b \) (the smaller), to be the real-valued function giving the increase in performance of \( a \) over \( b \). Typically we do expect some increase in performance but we do not generally (at least in the homogeneous case) expect “super-linear” performance, i.e.,
\[ 1 \leq \sigma(a, b) \leq \chi(a, b). \]
In most cases we expect \( \sigma \) to be a simple multiple of \( \chi \), i.e.,
\[ \sigma(a, b) = \lambda \times \chi(a, b), \] where \( 1/\chi(a, b) \leq \lambda \leq 1.0 \). If \( \lambda \) is close to 1.0, i.e., \( \lambda = 1 - \varepsilon \), we usually feel we have scaled up well.
Many examples exist of scaling up in this homogeneous sense, though, since it depends on a problem data size large enough to “fill” the large machine, it thus sometimes depends on an unrealistically large data size. In particular it appears to
us that some of the most recent HPC machines are “scalable” only in the sense that they could run matrix or other similar scientific problems of a size that no one is yet ready to do.
5.2 Heterogeneous Case
The heterogeneous case is clearly more complicated, though it is also the case in which we can aspire to the ultimate in heterogeneous computing potential, i.e., to achieve $\sigma$'s significantly greater than $\chi$; this is what we mean by super-linear performance. In the heterogeneous case, there may be no commonality between two different architectures, so that the only way to talk about “scaling” is based on the performance potential. That means, we will have two different scalability standards, namely peak MFLOPS (in either fixed 6:3 or 32 bit mode) or GBS (“gibbs”), billions of bits per second (processed). Using this, we can extend the $\chi$ function to the heterogeneous case. For example if we had a large vector machine, $a$, capable of processing 8.7 billion bits per second or 8.7 GBS, and a small SIMD machine, $b$, of 1.3 GBS, then $\chi(a, b) = 8.7/1.3 = 6.69$. Having extended the hardware concept of scalability to heterogeneous cases, the task and software scalability follow immediately.
5.2.1 Fundamental Theorem of Scalability
To understand this theorem, we need to look at the figure 5.1.
We consider there to be at least four levels at which a task is defined. One is at the overall functional level, here considered to be the problem “Find a datum”. Next, below this is the approach. By “approach” we mean something at a higher level than algorithm, perhaps meta-algorithm would be another term. In any case, for this problem, there is a radical difference in the approach for SIMD machine, used associatively (see [19]) or non-SIMD machines. In the former case, we can use simple associative search, which is $O(1)$; in the latter case we would typically use a sort, then
search operation, i.e., the asymptotic performance is bounded by $\Omega(\log n)$. For the associative search on a suitable SIMD machine, there is really only one instruction “find datum”, so that there is no room for differing algorithmic or code variations. However in the case of non-SIMD, non-associative sort and search, there are many variations possible. For example, depending on data, parameters, architecture, etc., we could use a number of different search techniques, and similarly we could use a number of different coding schemes for each algorithm.
In this context, most researchers, when describing “scalability”, certainly do not mean that the specific code is heterogeneously scalable, and generally do not mean that the algorithm is heterogeneously scalable. For example, a matrix times vector operation might best be done with a SAXPY style algorithm on one machine and an SDOT on another. At the same time, the term “scalability” almost never applies to the functional level, since this is far too general to have any real meaning (in the usual context of scalability). WHAT IS ALMOST ALWAYS INTENDED IS THAT THE TERM “SCALABILITY” APPLY TO THE APPROACH LEVEL. However the example above shows that this is inadequate to support efficient
Figure 5.1 Hierarchical breakdown of a task
MPP/HPC performance. That is, a “scalable” approach to finding data would almost certainly be based on the non-SIMD, non-associative approach of “sort, then search”. This might get maximal performance on non-SIMD machines, and might also work on SIMD, but certainly not optimally! That is the scalable approach is $\Omega(\log n)$, whereas the non-scalable SIMD version is $O(1)$. This example illustrates two thing:
a. A case where the non-scalable (at the approach level) SIMD implementation is inherently more effective than the scalable approach implemented on the same machine.
b. In this case suppose the non-SIMD machine has a hardware scalability factor of $\kappa$ over the SIMD, i.e., $\chi_{\text{non-SIMD,SIMD}} = \kappa$. However if $n$ (the data size) is large enough, i.e., $n \geq 2^n$, then the SIMD machine would have a task scalability OVER the non-SIMD, i.e, $\sigma_{\text{SIMD,non-SIMD}} \geq O(\log n/\kappa)$. That is we have hardware scalability one way, and task/software scalability the other! In other words the scalable approach is inherently ineffective in this case. Thus we get:
**Theorem 1** Issues of hardware, algorithmic, and software scalability are inherently insufficient to exploit the potential of HPC in heterogeneous parallel environments.
### 5.3 Scalability of HAsC and Cluster-M
Both programming paradigms presented in this paper are machine independent as explained in detail and are therefore scalable. In HAsC, a program is broadcast to the entire network, the individual nodes determine locally which instructions to execute. The global broadcasting approach means that there is no need to know how nodes are interconnected in the network, or how data is distributed across the nodes. This allows data files to be analyzed dynamically at run time as they enter the HAsC system and to be directed to the node(s) (i.e. computers) best suited to process them. Broadcasting allows scalability. That is, the hardware can be expanded or
modified and the problem can be changed without having to reprogram or recompile the basic HAsC program. New nodes consisting of new machines with installed HAsC software can be added to a network at any time, and at any location. HAsC is not dependent on any physical machine or network configuration. This is because the instruction broadcast, cell memory organization and associative searching allows the removal of any reference to data set size and type from the program. The basic component of a HAsC command is to “process all data which matches the following specifications.” Changes in file sizes and data types are handled automatically at the node level. Similarly, Cluster-M is also scalable. When a new machine is added to the heterogeneous networks, a new Cluster-M representation of the new suite can be generated and a Cluster-M specification can be efficiently executed without any change. Also, an appropriate new mapping function can be computed to map the Cluster-M specification to the new Cluster-M representation.
CHAPTER 6
CONCLUSION AND FURTHER RESEARCH
In this thesis, two programming paradigms for heterogeneous computing called Cluster-M and HAsC has been presented. HAsC models a heterogeneous network as a coarse grained associative computer. In HAsC a program is broadcast to the entire network, the individual node determines which instruction to execute. Broadcast allows scalability. Cluster-M also allows scalability since programs written using Cluster-M are machine independent and can be efficiently mapped and ported among different systems. Both mechanisms were discussed in detail and their scalability and merits for heterogeneous computing were studied. Concurrent use of HAsC and Cluster-M was also presented. Cluster-M paradigm can be used to aid the shortcomings of HAsC, while HAsC can be used when the associative computing features is more desirable.
REFERENCES
REFERENCES
(Continue)
|
{"Source-Url": "https://digitalcommons.njit.edu/cgi/viewcontent.cgi?article=2210&context=theses", "len_cl100k_base": 10334, "olmocr-version": "0.1.53", "pdf-total-pages": 44, "total-fallback-pages": 0, "total-input-tokens": 72019, "total-output-tokens": 13023, "length": "2e13", "weborganizer": {"__label__adult": 0.00034117698669433594, "__label__art_design": 0.0005297660827636719, "__label__crime_law": 0.0003936290740966797, "__label__education_jobs": 0.00395965576171875, "__label__entertainment": 0.00011307001113891602, "__label__fashion_beauty": 0.0001908540725708008, "__label__finance_business": 0.0004906654357910156, "__label__food_dining": 0.00037384033203125, "__label__games": 0.0007758140563964844, "__label__hardware": 0.0030994415283203125, "__label__health": 0.0006818771362304688, "__label__history": 0.0004532337188720703, "__label__home_hobbies": 0.00016963481903076172, "__label__industrial": 0.0007138252258300781, "__label__literature": 0.0004353523254394531, "__label__politics": 0.00032591819763183594, "__label__religion": 0.0006127357482910156, "__label__science_tech": 0.228515625, "__label__social_life": 0.00011461973190307616, "__label__software": 0.0127716064453125, "__label__software_dev": 0.74365234375, "__label__sports_fitness": 0.00033020973205566406, "__label__transportation": 0.0008182525634765625, "__label__travel": 0.00023114681243896484}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 51192, 0.03906]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 51192, 0.66258]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 51192, 0.90709]], "google_gemma-3-12b-it_contains_pii": [[0, 705, false], [705, 1799, null], [1799, 2027, null], [2027, 3464, null], [3464, 3794, null], [3794, 3794, null], [3794, 3886, null], [3886, 4263, null], [4263, 4316, null], [4316, 5080, null], [5080, 6007, null], [6007, 6300, null], [6300, 8171, null], [8171, 10275, null], [10275, 11714, null], [11714, 13163, null], [13163, 14358, null], [14358, 14837, null], [14837, 16804, null], [16804, 18046, null], [18046, 18209, null], [18209, 19168, null], [19168, 21187, null], [21187, 21231, null], [21231, 22967, null], [22967, 23536, null], [23536, 25525, null], [25525, 27405, null], [27405, 29073, null], [29073, 30860, null], [30860, 32857, null], [32857, 33828, null], [33828, 34597, null], [34597, 36162, null], [36162, 36490, null], [36490, 38078, null], [38078, 40241, null], [40241, 42160, null], [42160, 43465, null], [43465, 45451, null], [45451, 46488, null], [46488, 47353, null], [47353, 49371, null], [49371, 51192, null]], "google_gemma-3-12b-it_is_public_document": [[0, 705, true], [705, 1799, null], [1799, 2027, null], [2027, 3464, null], [3464, 3794, null], [3794, 3794, null], [3794, 3886, null], [3886, 4263, null], [4263, 4316, null], [4316, 5080, null], [5080, 6007, null], [6007, 6300, null], [6300, 8171, null], [8171, 10275, null], [10275, 11714, null], [11714, 13163, null], [13163, 14358, null], [14358, 14837, null], [14837, 16804, null], [16804, 18046, null], [18046, 18209, null], [18209, 19168, null], [19168, 21187, null], [21187, 21231, null], [21231, 22967, null], [22967, 23536, null], [23536, 25525, null], [25525, 27405, null], [27405, 29073, null], [29073, 30860, null], [30860, 32857, null], [32857, 33828, null], [33828, 34597, null], [34597, 36162, null], [36162, 36490, null], [36490, 38078, null], [38078, 40241, null], [40241, 42160, null], [42160, 43465, null], [43465, 45451, null], [45451, 46488, null], [46488, 47353, null], [47353, 49371, null], [49371, 51192, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 51192, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 51192, null]], "pdf_page_numbers": [[0, 705, 1], [705, 1799, 2], [1799, 2027, 3], [2027, 3464, 4], [3464, 3794, 5], [3794, 3794, 6], [3794, 3886, 7], [3886, 4263, 8], [4263, 4316, 9], [4316, 5080, 10], [5080, 6007, 11], [6007, 6300, 12], [6300, 8171, 13], [8171, 10275, 14], [10275, 11714, 15], [11714, 13163, 16], [13163, 14358, 17], [14358, 14837, 18], [14837, 16804, 19], [16804, 18046, 20], [18046, 18209, 21], [18209, 19168, 22], [19168, 21187, 23], [21187, 21231, 24], [21231, 22967, 25], [22967, 23536, 26], [23536, 25525, 27], [25525, 27405, 28], [27405, 29073, 29], [29073, 30860, 30], [30860, 32857, 31], [32857, 33828, 32], [33828, 34597, 33], [34597, 36162, 34], [36162, 36490, 35], [36490, 38078, 36], [38078, 40241, 37], [40241, 42160, 38], [42160, 43465, 39], [43465, 45451, 40], [45451, 46488, 41], [46488, 47353, 42], [47353, 49371, 43], [49371, 51192, 44]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 51192, 0.13495]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
868da3b2082c44ed7a2d2d6a6b21ca106fc8894d
|
Learning Reverse Engineering
Petter Djupfeldt and Lucas Taubert
Bachelor of Science Thesis
Stockholm, Sweden 2012
Learning Reverse Engineering
PETTER DJUPFELDT
and LUCAS TAUBERT
DD143X, Bachelor's Thesis in Computer Science (15 ECTS credits)
Degree Progr. in Computer Science and Engineering 300 credits
Royal Institute of Technology year 2012
Supervisor at CSC was Alexander Baltatzis
Examiner was Mårten Björkman
URL: www.csc.kth.se/utbildning/kandidatexjobb/datateknik/2012/
djupfeldt_petter_OCH_taubert_lucas_K12018.pdf
Kungliga tekniska högskolan
Skolan för datavetenskap och kommunikation
KTH CSC
100 44 Stockholm
URL: www.kth.se/csc
Abstract
Reverse engineering is the process of translating compiled programs to source code, and analyzing the resulting code. It is useful, since without knowing the inside of a program it is very difficult to build onto it, create software that can interact well with it, or simply create a similar program of your own.
The challenge of reverse engineering lies within the fact that a lot of information contained within the source code of a program is destroyed in the compilation process, and re-obtaining it is done through different kinds of analyses, of which some are discussed within.
To delve into this subject, we tried to reverse a few applications and games with different approaches, tools and methods, to find out how the most information could be acquired.
We found a set of methods that were optimal to us, and allowed us to modify a computer game both in runtime, and to edit the compiled bytecode to completely change the game’s behaviour. The methods require some knowledge about software engineering, but they provide a good framework for a beginner to start reversing on their own.
Referat
Att lära sig reverse engineering
Utmaningen med reverse engineering är att mycket av den information som finns i ett programs källkod går förlorad när den kompileras, och att återskapa denna genom olika sorters analys, av vilka en del diskuteras i rapporten.
För att lära oss om det här ämnet gav vi oss på att reverse:a några applikationer och spel. Vi använde olika infallsvinklar, verktyg och metoder för att ta reda på hur vi skulle få ut så mycket information som möjligt.
Statement of collaboration
Report writing and literature study
We contributed equally to writing the report. Most of it is written by us in collaboration, and we cannot really attribute any single part to one person. The literature study was also done by both of us in collaboration where both of us read most of the material, but L. Taubert had more focus on the ownedcore forums, while P. Djupfeldt focused on the paper about Deobfuscation.
Block Attacks
Analysis
The analysis of Block Attacks is primarily done by L. Taubert. He did all the analysis with Cheat Engine and furthermore the debugging with OllyDbg. P. Djupfeldt was responsible for the parts where we used TSearch.
Coding
Gaden and the hacked executable file were written by L. Taubert.
Spelunky
The analysis of Spelunky was primarily done by P. Djupfeldt, in both Cheat Engine and TSearch. L. Taubert did some minor research with Cheat Engine.
Minesweeper
The Minesweeper analysis was done by P. Djupfeldt.
# Contents
## 1 Introduction
1.1 Purpose .............................................................. 1
1.2 Problem statement .................................................. 2
## 2 Background
2.1 Terms ................................................................. 3
2.2 Literature study ..................................................... 3
2.2.1 x86 Disassembly .................................................. 3
2.2.2 Reversing: Secrets of Reverse Engineering ................. 4
2.2.3 Deobfuscation - Reverse Engineering Obfuscated Code ... 4
2.2.4 Ownedcore Memory Editing Forums .......................... 4
2.3 Tools ................................................................. 5
2.3.1 OllyDbg 2.01 alpha .............................................. 5
2.3.2 HxD 1.7.7.0 ..................................................... 5
2.3.3 TSearch 1.6b ..................................................... 5
2.3.4 Cheat Engine 6.1 ............................................... 5
2.3.5 BlackMagic and C# ............................................. 6
2.4 Problems ............................................................ 6
2.4.1 Obfuscated code .................................................. 6
2.4.2 Optimized code .................................................. 6
2.4.3 Pointers .......................................................... 7
2.5 Solution approaches ............................................... 7
2.5.1 Already known data ............................................. 7
2.5.2 Debugging ....................................................... 8
2.5.3 Scanning ........................................................ 8
## 3 Method
3.1 Our approach ....................................................... 11
3.2 Practical results .................................................... 11
3.2.1 Spelunky ......................................................... 11
3.2.2 Minesweeper for Windows 7 ................................. 13
Chapter 1
Introduction
Reverse engineering is the art of translating compiled software to source code. Often though, you only search for specific key parts of the software, to gain some knowledge of either how to modify it, or in order to learn more about the software.
Our goal with this project is to learn how to perform some basic reverse engineering. We also want to evaluate different methods and tools for reverse engineering, and see what you can do with the knowledge of a reversed program.
Our primary focus will be on reversing games. We will try to gain as much information as possible about the construction of the game by using different tools and methods, and see how much we can manipulate the games behaviour with that newfound knowledge.
1.1 Purpose
One of the prime examples of where reverse engineering is used is cheating in games. By first analyzing how a game works, and then using methods to modify this code at runtime or even permanently modify the program, you can gain significant advantages within the game.
Outside the domain of computer games, you can also gain insight in how to create your own software, if you for example find an obscure program which does interesting things, but not enough for your goals. With knowledge of reverse engineering you could then analyze it, and use its methods to create your own solution with pieces of code or ideas from that software.
It is also possible to use reverse engineering to achieve interoperability. If there is no public API to some software, but you still want to use it, it is possible to reverse the software to create some kind of outside interface to it. A famous case of this was in 1990 where a game developer named Accolade reversed Sega’s Genesis platform to create games for it without having to negotiate with Sega about the proper licences. [3, p. 18][5]
The list of possibilities goes on, we recommend continued reading in Reversing - Secrets of Reverse Engineering for the interested, Eilam provides a really good
briefing on the subject in the first chapter of the book.
1.2 Problem statement
The main problem is that translating from machine readable byte code to human readable source code is not as simple as the other way around. The high level code contains a lot more information, which is lost in the translation, for instance names of classes, variables, functions, et cetera. The structure is lost as well, and all the comments and documentation. Code gets squeezed together, and if the coder is really trying to make the final compiled code unreadable, there can be clusters of pointers and memory allocations that switches locations at different launches of the software.
On the other hand, since a computer will have to be able to read the program, it logically has to be possible for a human to read it as well. It can be hard, but it is always possible.
Because a compiled program is so large, and contains so much information, it is impossible to just manually look through the code if you actually want to gather information. What you have to do instead is using different tools to understand the code. We will discuss some of them, and how we used them, in this paper.
Chapter 2
Background
Since reverse engineering is a quickly evolving subject, where new methods arise quickly, we have split our research into two parts. First the basic research, which includes reading up on how programs are stored in a computer, how compilers work, and how the assembly language works. This is our primary literature study. The second part is experimenting, where our primary focus lies, that will be described in later chapters.
To give the reader a good overview of our starting point, we will also include information about some terms that will be used throughout this paper, and a reference to the tools we will use.
2.1 Terms
Reversing: Performing reverse engineering on software. Memory editing: Changing the behaviour of running software by editing the allocated memory for that program during runtime. [Assembly/DLL] Injection: Changing or evaluating the behaviour of running software by programming your own code which you insert into the running program, forcing it to run that code. Static program analysis: Analysing a program without executing it; can be reading source code, byte code, etc. Dynamic program analysis: Analysing a program during runtime, e.g. with a debugger.
2.2 Literature study
2.2.1 x86 Disassembly
This book in an online wikibook (open source book), which goes in-depth about disassembly programs written for the x86 assembly language. The level of expertise expected by the reader is decent knowledge about the assembly language, and some understanding of how computers and operating systems works.
CHAPTER 2. BACKGROUND
It goes through the subjects of different compilers, different assembly languages, discusses tools for disassembly and decompiling, and several problems you can run into while decompiling or disassembling programs, and often how to solve them.
This is a wikibook, which means that anyone can modify it. On the other hand, anyone can also correct errors contained within, so in most cases the reliability can be trusted. If we found anything that did not make sense, we looked to another source for confirmation.
2.2.2 Reversing: Secrets of Reverse Engineering
Reversing is a book containing a lot of information of reversing. The author brings up information of why and when reverse engineering is useful, what can be achieved with it, and explains on a highly technical level different methods of reverse engineering.
The book is written by a professional software engineer and reverser, and the target audience is a person with extensive knowledge of software development, and skills in lower level languages, and program structures.
It contained very good information, but it was also aimed at people with knowledge sometimes quite far outside our own expertise, which made some of the information difficult to absorb. The primary information we gathered from this book was the more introductory information of how reversing is used.
2.2.3 Deobfuscation - Reverse Engineering Obfuscated Code
This is a scientific paper about how to deal with obfuscation techniques. It describes a few techniques for obfuscating code and how to deal with them as a reverser, and evaluates the results of these deobfuscation techniques. It concludes that obfuscation is possible to bypass.
As it is a scientific paper on a narrow subject within reverse engineering, it is very formally written and requires at least some understanding of the subject beforehand. A grasp of formal logic is also needed to absorb the entirety of the paper, but not necessary to understand the basic ideas.
2.2.4 Ownedcore Memory Editing Forums
This is a section of a large cheating forums for computer games, primarily massive multiplayer online games like World of Warcraft. In this section, the reversing parts of game cheating is explained, discussed and researched in depth. You can find texts written by complete beginners asking for help, but also texts by very skilled programmers who explain methods and contribute to discussions with their knowledge.
In here we found more substantial methods of what you can do with knowledge from reversing. The focus within this forum lies more on the part after you have reversed a piece of software to gain information about it, and what you can do with
2.3 Tools
2.3.1 OllyDbg 2.01 alpha
OllyDbg is a debugger. This software grants the possibility to debug compiled programs, with all that comes with it. Setting breakpoints, monitor values, and everything else that you can expect from a debugger is possible with this.
Editing the program memory to change program functionality is also possible with this tool, if you only want your changes to apply within the scope of one runtime.[6]
2.3.2 HxD 1.7.7.0
HxD is a freeware hex editor. The only end we use this program for is editing compiled files. It opens an .exe file, to show its bytecode in hexadecimal form, all of which is open for editing.
When a certain piece of code that should be changed is isolated within a program, this software is excellent for the purpose of just entering the file and replacing that code, with its good navigational controls and search methods.[7]
2.3.3 TSearch 1.6b
TSearch is a freeware tool for scanning and editing memory addresses, performing code injections and debugging. It also contains functionality for generating trainers, and some other functions such as a built-in calculator and hexadecimal to decimal converter.
TSearch primary use is for cheating in games by editing them during run time or generating trainers for them, but it can open and edit any program currently running in memory, not just games.[8]
2.3.4 Cheat Engine 6.1
Cheat Engine is an open source tool designed for modifying single-player games during runtime, to modify the difficulty by changing some key parameters. It has matured a lot since its release in 2000 though, and can now be used for a lot of advanced reversing, not only in games.
CHAPTER 2. BACKGROUND
It comes with a scanning feature for finding variables or code, a debugger, a disassembler, and additional useful tools for reversing programs. It also contains an interesting tool for especially cheating: A trainer generator. Based on the memory values that you have saved within Cheat Engine after scanning, you can automatically create a trainer, which modifies these values when you press buttons or hotkeys.[10]
2.3.5 BlackMagic and C#
BlackMagic is a library written in C# which uses Windows API calls to gain access to running processes under Windows. With this library, reading and writing to memory allocated to specified processes becomes very straightforward.
BlackMagic also contains other tools, such as injecting assembly code into a running process, and other functions.[9]
2.4 Problems
2.4.1 Obfuscated code
A measure a developer might take to protect its code is obfuscating it. Code obfuscation is an umbrella term for a number of techniques for making code harder to reverse. These include changing the structure of the program, its logic, data and layout, without changing its functionality. This can be done either manually, by renaming functions and variables or making confusing function calls, or automatically, by using an obfuscation tool.
Obfuscated code is a problem for a reverser because it is harder to read for a human. Changing the layout of the code is no problem for a computer, it can still read it perfectly. But for a human, not having a clear, structured path to follow turns confusing very quickly. Likewise, adding pointless algorithms will make it much harder for a human to understand what the program does. Adding these pointless functions of course comes with a cost: the program might require more processing power and become slower. [3, p. 327]
The methods above are used for obfuscating the code during runtime, but might not be as big a problem during a static program analysis. A method for making static program analysis harder is introducing execution paths that are not authentic. While they will never be executed during runtime, they will introduce false information into the result of a static analysis. [2]
2.4.2 Optimized code
When a program is written in a high-level language, like C++, the programmer often goes through a lot of effort to structure and organize the program code. Code is put into classes and structs, with thought-out variable names and extensive documentation. Function names are selected with great care, so that you know
2.5. SOLUTION APPROACHES
exactly what the different functions actually do, even if you did not write them yourself.
When this is compiled, most of it goes away. All the variable names, class names and comments disappear completely. But it does not end there. Most high-level compilers do not stop at just translating code to its low-level counterpart, they also optimize the code, to increase the speed at runtime. They might change methods using tail recursion to iteration, reform boolean expressions within if-clauses, and generally just change snippets of code to mathematically equivalent pieces that run faster on a machine, but is a lot harder for a human to read.
So when you compile and decompile a program, you do not only lose all the structural conveniences, but you also receive a result that is structured different from the original result. [1, p. 63]
2.4.3 Pointers
Pointers are variables which does not explicitly store a value, but instead an address in the memory. They can become a problem for reversers when they are used extensively, and in chains (pointer pointing to a pointer, pointing to a pointer, etc.). The reason is that a value can reside at a location in memory, but when this value is edited, it is instantly overwritten (or even ignored) since the real information was stored at a pointer at a completely different place.
It also poses a problem for when you want to use the memory addresses within a trainer software, since the base pointer could change its address, which results in the interesting data being stored at different places in the memory at different launches, even related to the base address of the program.
2.5 Solution approaches
A lot of these approaches are not found in our literature, and hence no sources are provided. Instead, we have found these solutions by experimenting, in many cases with guidance from our literature, in others just by using the functions provided by the tool, and in all by using our knowledge about software engineering and problem solving; and our common sense.
2.5.1 Already known data
When you want to reverse a well-known program or a program which is built upon a well-known platform, you rarely have to do all the work alone. Even if the structure of programs like, for instance, popular games and operating systems can be very complex, there are often resources available by people who have found bits and pieces of the code, and made it publicly available. This can include the structure of object management, key memory locations within the program, or bits of the engine that can be used and modified.
A good example of this is within the ownedcore forums, where you can find extensive amounts of information about most popular games, especially World of Warcraft. With all the information available there, it takes a programmer only a few hours to set up a running trainer for that game, which can perform tasks, or display normally hidden data, within the game automatically.
2.5.2 Debugging
A debugger is a very useful, perhaps even mandatory, tool for performing dynamic program analysis. Debugging is done by loading the program you wish to reverse into the debugger. Using the debugger you then set a breakpoint instruction in the code of the program. When it executes this instruction it is paused and control is handed to the debugger. You can now use the debugger to go through the program step by step, instruction by instruction. A good debugger will not only show you the instructions, but also the relevant registers and program stack, and what they contain at that point of execution.
All this information is very valuable for understanding exactly what a program does. By observing what happens both in the program and in memory when a certain instruction is called with a certain value, you get an idea of what that segment of code is for. With enough time this will let you piece together the entirety of the program.
While it is possible to glean some of the same information from just reading disassembled code as from debugging, it would be a very arduous task. A debugger helps the reverser keep track of exactly what is happening, with no risk of jumping to the wrong instruction. A good debugger will also give the reverser access to information not normally available, such as relevant system statistics and data, and data only available at runtime. [3, p. 116]
2.5.3 Scanning
Another useful method for finding interesting data within a program is scanning. Instead of stepping through the code, scanning is used for finding specific pieces and values of the code. If you know something about how data or code will behave, you can scan an open process for that behaviour. We will provide some examples of how scanning with Cheat Engine and TSearch works.
Finding specific value that changes
This is the most simple scan you can do. You know exactly what value you are looking for. To perform this scan, you simply put in the value and data type, and scan the process for that value. Since an open process contains a lot of data, it is probable that this value is found several times. You then trigger a change of this value (for example type a new row in a document handler, or trigger something that yields score in a game) and scan for the new value within the results. This process is repeated until the value is found.
2.5. SOLUTION APPROACHES
Other types of value scans
If you do not know the value, you will have to use other kinds of scans. For example, to find your location within a 3d game world, you might know when you move to different places, but you have no idea of knowing your exact position.
For this event, we scan for an unknown initial value, which we then filter with a lot of "value changed", "value increased", "value decreased" and "value unchanged" scans after each other. We can for example repeatedly scan for value unchanged when we stand still to remove a lot of values which naturally change (enemies moving, time ticking, and so on). We can move forward, and search for an increase, move backwards and search for a decrease, and so on. This process is more time consuming, and often harder, but with some patience it can yield very interesting results.
Code scans
Sometimes you are interested more in some piece of code instead of a stored value. In Cheat Engine, you can search for this too. Two scans perfect for this are "find what accesses this address" and "find what writes to this address". Their functionality is exactly what it sounds like. They lock onto a memory location, and finds pieces of code which accesses or writes to this address.
In TSeach, this is called "AutoHack". You enable the debugger and select a memory address you wish to AutoHack. The debugger then checks which instructions access that address and displays them in the AutoHack window.
This code can then be debugged, replaced, changed, analyzed or used for any other ends.
Chapter 3
Method
Here we will begin to cover the second part of our research, the experimenting. We will go through our experiences, to give a hint of what worked out well for us, and what did not work out so well at all.
3.1 Our approach
When we started out with this project, neither of us knew much about reverse engineering. Both of us could use high level programming language - such as Java and C++ - and we had basic knowledge about lower level language - such as assembly and C.
The first method we used was debugging some simple programs with OllyDbg. We attempted to follow the code, and look at the program’s output while stepping through the debugger. This was not very viable though, and only really worked if you had a clue of where to begin, or if the program was very simple, such as something just printing "Hello World!".
After that we attempted to find values with Cheat Engine and TSearch instead. We experimented with these tools to utilize their functionality, to make some sense of the programs we were trying to reverse. This yielded a lot more results than the debugging, and made the debugging so much more relevant with more knowledge about the program. We will explain our results in detail below.
3.2 Practical results
3.2.1 Spelunky
Spelunky is a randomized adventure game written in GML (Game maker language). It is a 2d game with the player controlled character running around in dungeons, collecting treasures and defeating enemies.
CHAPTER 3. METHOD
Scanning
By using Cheat Engine, we found the locations of various variables, for example health, number of bombs, current level, et cetera. Of note was that all these values were stored in 8 byte double types, even though many of them only could contain values 1-99.
After discovering the x and y coordinates for the player we also found, by experimenting and looking at the assembly code, that it appears that the instructions for storing these coordinates are always stored at the same space in the memory. Although, after further digging, we realized that these instructions were just the end of a long chain of events. What actually happened was that these lines of code were called every time some data should be written. In other words, it was used for pretty much everything.
Runtime memory editing
During an attempt to hack the game to give you infinite health by replacing the address where health is stored with a NOP instruction something interesting was discovered. Understandably, replacing a value with null made the game crash. The interesting part is that the crash report showed each line where an error occurred in plain source code. This could help with understanding just how the game works. That it shows plain source code might be specific to GML, but it shows that something as simple as crashing a program can reveal information about how it works.
Debugging
When we attempted to use TSearch’s debugger to look at the disassembled code and find the instructions for writing to the address where health is stored, Spelunky crashed. This was a recurring problem for TSearch with Spelunky, and prevented any deeper study of memory addresses using this tool, as we would need the debugger to investigate any pointers to the address.
Difficulties
At different runs of the program, the variables we had discovered appeared at different locations in the memory. Our first thought was that they were simply offsets, and our next approach was to see them as offsets from the base address of the game. This, however, did not prove successful either. Not only did they appear at different memory locations, they also appeared at different offsets seen from the base address of the game.
Solutions
The way variables appeared at different locations in memory might hint to them being stored in a similar way as more advanced games, like World of Warcraft (WoW). As found in numerous threads in the ownedcore forum, WoW uses an
3.2. PRACTICAL RESULTS
object manager to store data about objects (for instance the player). This object manager appears at different locations in the memory space at different launches, but there are pointers at a fixed offset that points towards this manager object. We attempted to find such a pointer, but with no success.
We then decided to abandon the research of this game, since it was compiled in a way that made advanced reversing very difficult, and beyond our scope.
3.2.2 Minesweeper for Windows 7
Minesweeper is a staple game for Windows. The goal of the game is to find all the mines without clicking on any of them. You discover mines by clicking on tiles. Tiles either hide mines, numbers or empty space. If you click on a mine, you lose. The numbers tell you how many mines are adjacent to that tile. To help you, you can tag a tile with either a flag or a question mark.
Scanning
By using Cheat Engine, we discovered the value for deciding how many mines should appear on a custom field. We also discovered that each tile is an object, and an un-clicked tile has one of several states: blank underneath, a number underneath, un-flagged, flagged or question marked.
TSearch was unable to detect Minesweeper as a running process, and thus unable to open it.
Runtime memory editing
on the field, but by directly editing the memory address for mines we discovered while scanning, we managed to fill the entire field with mines. This revealed something interesting about this version of Minesweeper: You cannot lose on the first click. No matter where you clicked, you won immediately, as that tile was always safe and every other was a mine.
We also edited the state of one of the tiles using the data we found while scanning. The states are all decided by the values 0-11 at the memory address; 0 being blank, 1-8 being a number, 9 unflagged, 10 flagged and 11 question marked. We were unable to find the value that decided if there was a mine hidden underneath, though. This might be connected to the fact that mine locations are not decided until after the first click, as shown above.
It was, however, possible to turn a mine into something else. By playing the game normally, you can become certain that a tile must hide a mine. Once this has been ascertained, it is possible to change the state of the tile, and there would be no mine. Once a tile had been revealed though, it was no longer possible to change its value with this method.
CHAPTER 3. METHOD
Debugging
We did not attempt any in-depth debugging of Minesweeper.
Difficulties
The greatest difficulty with Minesweeper was that it has very little visible data, and thus makes it a bit difficult to get an entry point for scanning.
Solutions
We had to start from what little information was available, such as the number of mines. To get any more interesting data, careful use of the scanning tools was required. We scanned for a likely range of values, and then performed an action to influence the values we were looking for, and repeated this method until we found something interesting.
3.2.3 Block Attacks - Rise of the Blocks
Block attacks is a puzzle game, quite similar to Tetris. The player sees a 6x14 matrix of different colored blocks, and the objective is to match them in lines or columns with three or more by swapping blocks, two at a time. You control a "crosshair" with the keyboard, which will aim on two horizontally adjacent blocks, and by clicking the "fire"-button, those two blocks swap places.
You receive score when you remove blocks and when a new row of blocks is pushed up. The speed also increases as you progress.
Scanning
We started out by finding the score value. Since the game displays the value, and it changes in a controllable manner, it was very easy to just search for this value using the methods of Cheat Engine.
To see how memory allocation worked, we restarted the game after finding this value, and attempted to access the same memory location. It resulted in the score value again, and through this we learned that the game uses static memory allocation within its assigned virtual space.
This was very good news, since it meant that we could use the same address values each time we ran the program. All the values we found had this kind of storage, which would allow us to modify pretty much the whole program without using any advanced scanning methods during runtime.
Using this knowledge, we performed more advanced scans, to find even more interesting values. One of the most key pieces of memory was where the game field itself is stored. Since the game is relatively simple, and the idea makes sense, we assumed that the blocks would be stored in a matrix of integers, where the number in the integer was 0 for an empty field, and another number for each of the colors.
3.2. PRACTICAL RESULTS
Figure 3.1. The main playing window of Block Attacks. This scene shows how four green blocks soon will be removed because they have been lined up.
Based on this assumption we first scanned for zeros, then we narrowed down the results by performing multiple scans for unchanged values while the game field had not pushed up a new row, since the game field should look identical until it did. When the game field had been pushed up, we scanned for a changed value, and then started the process over. This scan resulted in some strange addresses, but actually gave results. We received three addresses, which corresponded to three randomly located blocks on the field.
We also used TSearch in an attempt to replicate the results of the scan for block addresses we did with Cheat Engine. Once we had found one block address, it was easy to find the addresses for the first and last block in the matrix: by first making a list of all possible block states and their values we searched for the values of the first blocks in two adjacent columns. By finding these, we could calculate how far apart these addresses were in memory and thus how many rows each column contains. By applying some simple math we quickly found the start and end points of the matrix.
During a first, failed, attempt to find the block addresses with TSearch we
CHAPTER 3. METHOD
discovered something interesting. By chance we found the memory address for the
game’s crosshair y-position. From this and the way other data has been stored
in the game memory we deduced that the x-position should be located at a nearby
memory address. We assumed that it would be stored at the address just before the
y-position, since this is how x- and y-positions are usually defined. This assumption
turned out to be correct, and this knowledge let us gain full control of the crosshair.
Runtime memory editing
By changing the block values found during scanning, we discovered that zero actu-
ally was one of the colors, and that an empty field was represented by the maximum
value or a 32-bit unsigned integer, equalling -1. Some tweaking of these values re-
vealed the location of the first block, and how exactly they were stored.
The blocks were stored in a matrix, where the first value was the bottom left
block. The next value was the block on top of it, so the columns were stored
together. Our first attempts at modifying these values assumed that they used a
matrix of the same size as the playing field: 6x14. We then proceeded to clear this
matrix within Gaden, but received some strange results. Some further investigation
revealed that the matrix was actually 30 blocks high, even though it never filled up
that much by natural means.
We used TSearch to locate the value for chains, the number of success-
ively deleted lines with one move, in the game. Finding this value proved easy, but
we were unable to do anything with it directly, as it is constantly rewritten while
the game plays. In an attempt to change this we replaced a segment of code with no
operations. This led to something odd occurring in the game. The blocks stopped
rising and the score counter started going up at a startling rate. Undoing the code
replacement did nothing. The game was still playable, though it would run out of
blocks as no new ones would appear on the field. We tried to understand why this
happened, but because of limitations in TSearch we were unable to fully study the
entire code segment.
Debugging
When the scanning process was complete, a lot of memory locations were acquired.
To proceed with the reversing we then used the debugging tools in both Cheat
Engine and OllyDbg.
From our scanning process, we knew the location of the piece of code that
changes the type of block that appears at the bottom row, and we used this memory
location as starting point in our debug. By stepping through the code from this
point, we found the piece of code that changes the type of block that will appear
the next turn. This piece of code and its location were noted, to be used in hex
modifications.
3.2. PRACTICAL RESULTS
Hex editing
With the code for replacing the bottom blocks isolated, we modified it. By using the hex editor HxD we located this bytecode within the executable file and replaced it with bytecode representing assembly nop-operations (operations that does nothing at all). This modified executable was then saved as a different file. When launched and played, this game would randomize colors for each column, but never change these colors. This resulted in the game just going on forever, the rows could not get up since they removed themselves every three \(\frac{1}{2}\) pops.
Gaden
With all this information about the structure and storage within the game, we could proceed with creating a trainer of our own to this particular game. We named this trainer software “Gaden”, an acronym for “Game Administration Engine”. We wrote it in C# with references to BlackMagic. By using everything we learned about the game, we created an admin-like panel for the game. Here we could change scores, manually swap any two pieces we wanted to, change score, and use a “panic button”, which fills up the whole game field with blocks of the same color, that then gets removed by the game as matches, and provides an empty field.
The second and more advanced part of Gaden consists of an Artificial Intelligence (AI), which simulates actual gameplay of this game. Even though we do have access to all of the playing field, we decided to create a program which actually follows the rules, and plays the game. To accomplish this, the engine scans the playing field for three blocks of the same color which is located in adjacent rows. It then simply goes through the two lower rows, aligning these blocks to the location of the topmost block. This simple strategy is repeated until there are no more triples to be found, and then Gaden switches over to randomly swapping pieces, to create new opportunities.
This method proved to be quite efficient, and with a delay between each move long enough for it to still seem humanly slow it could solve puzzles at very high speeds. And when the delay was set to zero, Gaden could, with decent luck, solve puzzles at any speed.
There are plenty of optimizations available for this AI, but the goal was not to create a perfect AI for this game. We were more interested in demonstrating what can be done when you have information about the structure and internal workings of a program.
Difficulties
We had very few difficulties with Block Attacks, since it was such a simple program with very little, if anything, done to obfuscate its code.
CHAPTER 3. METHOD
Figure 3.2. The GUI of our trainer when it is attached to the Block Attacks process. The buttons will cause Gaden to interact with the game in different manners.
Solutions
With a lack of difficulties, we simply applied what we had learned during the previous test on other games, and yielded satisfactory results.
Chapter 4
Discussion
4.1 Tool evaluation
4.1.1 Cheat Engine and TSearch
Cheat Engine
In addition to performing its expected duties with grace, Cheat Engine surprised in a positive manner by having very advanced features for reversing, coming with a debugger equipped with automatic assembly injection, and other very useful reversing tools.
The graphical user interface (GUI) is also well matured, and all buttons can be hovered for an explanation of what they do.
TSearch
One of the first things one notices when running TSearch is that it is not immediately apparent what every button does. Some are obvious, such as the big Open Process button, but others are near impossible to tell what they do just from looking at them. All buttons in the main window give a short description upon mouse over, but none in one of the other windows, the AutoHack window, where one looks at the disassembled code and performs code injections. These buttons required looking up in the TSearch manual to understand.
A limit in TSearch is that it, despite what the manual says, cannot open any running process. What decides if a process can be opened in TSearch is uncertain, as it cannot open any active program running and only a few passively running processes.
Conclusion
In conclusion, it is readily apparent that Cheat Engine is a better tool than TSearch. There was nothing TSearch could do that Cheat Engine could not do just as well, or better, except having a built-in calculator and hexadecimal to decimal converter.
These tools are however not very necessary, as the standard Windows calculator can perform these functions.
4.1.2 OllyDbg 2.01 alpha
OllyDbg is a very competent debugger. We did not use it overly much, mostly because the debugging step comes when you have pretty significant knowledge about the software already, and want to go in-depth. For our purposes when we needed a debugger, we found no flaws. The user interface was excellent, loading times were relatively fast and breakpoint management smooth.
4.1.3 HxD 1.7.7.0
We only used this tool for replacing source code, and for that end it performed great. It opened up, modified and changed the program instantly (as opposed to OllyDbg) since no debugger has to be attached, and the navigation was excellent.
4.1.4 C# with BlackMagic
BlackMagic was a great library for managing running processes. The compilation of the provided source code was quick, and linking to the completed DLLs was easy. We did not delve into advanced coding, and any high-level programming language could probably have sufficed. Although, since we worked in Windows, C# had relatively straightforward access to the necessary API-calls, which made this choice a good one, and removed a lot of configuration time from our project.
4.2 Method evaluation
4.2.1 Static analysis
We have used very little static analysis during this project, partially because it is a very daunting task to go through pure decompiled code, but also because the method did not lend itself very well to what we tried to do, i.e. editing games during run time. It is likely to be more useful for fully reversing a program, as that requires a deeper understanding of the program structure than what we did.
4.2.2 Dynamic analysis
Scanning
Scanning worked very well for us, and is the method we used the most throughout the project. We used it a lot in conjunction with debugging, where we would scan for a value we were interested in and then using a debugging tool to see how the value was changed by the program. Experimenting with values and memory addresses we found through scanning often gave us a better understanding of the program...
we were working on. An example is when we discovered the true size of the block matrix in Block Attacks. We had first assumed that the matrix was 6x14 blocks large, the same size as the visible playing field, but by scanning and changing values we discovered that it was in fact 6x30 blocks large.
We used scanning as an entry point method. Even with no knowledge at all about a program, scanning based on qualified guesses provided very usable results, which we noted and built more research on.
Debugging
Debugging is very useful for finding details about how code executes. With knowledge about the program’s structure, and some detailed information about where information is stored, breakpoints can be set in the program, and code analyzed.
Debugging worked out great when we had information to base the process on, and often yielded very interesting results. For example, we learned how variables are accessed and modified by debugging. By debugging, we could also find memory locations that we needed, by starting the debug process in a place we had already found by scanning, and following its trace.
We also attempted debugging as an entry point for reversing, which did not work out well. Programs are simply too large, debugging without any information about where to start becomes too much of a tedious and futile process.
4.2.3 Hex modifications
The hex modifications we performed were very simple. They were the last step in our chain, and when we got to it we already knew what code we had, and what code we needed. We had already analyzed the variables, debugged the program, and tried replacing some assembly lines during runtime. Only the final modifications were left to do here.
A pitfall can be to attempt to perform hex modification too early. Modifying even one assembly instruction to something invalid might completely break the program, so you have to possess the exact knowledge required before attempting this step, it is no method of analysis.
Chapter 5
Conclusions
5.1 Tool evaluation summary
The art of reverse engineering is a vast subject, and we have only touched the surface of it in this project. There are many tools available and we have tested and evaluated only a few of them, most notably Cheat Engine and TSearch.
At first glance the two programs appeared to be equally good, with only aesthetic differences and some very minor functional differences, TSearch having some tools for performing calculations in decimal and hexadecimal. But once we pushed their limits a bit more we noticed some serious flaws in TSearch, and became more and more positively disposed towards Cheat Engine.
OllyDbg, HxD and BlackMagic all worked as expected, and we think they are all excellent tools in the repository for a beginning reverser.
5.2 Method evaluation summary
The method that yielded best results for us was using a chain of tools in order, proceeding to the next step after sufficient knowledge had been acquired. The chain which worked out best for us is explained below.
The first thing to do was always scanning. This was done to isolate pieces of data and/or code within a running program, which could then be analyzed further. If the scan was thorough, knowledge about the structure within the program, some source code and methods of storage were all obtained.
The second step was debugging the running program. With the knowledge from scanning, breakpoints could be set, and code further analyzed. Particularly interesting pieces of code were read through, and knowledge of details not revealed in the scanning process could be obtained.
The third step was the modification. This could be done in two ways, either modifying the program itself with an hex editor, or creating an external program which would attach to the process in order to modify and/or read the program data in runtime.
5.3 Final conclusion
Reverse engineering is a very deep and interesting field, and we have learned a lot about the subject throughout the course of this project. We are, however, still no experts, as the subject is far too complex to fully absorb during the relatively short time we had for this project.
There are several applications for the art of reversing, many of them in the field of computer security. By using reversing techniques you can find out how a program works on a more fundamental level than by just using it and looking at what is presented to you while you use it normally. This can of course reveals holes in a program’s security, and this can be used for a number of purposes by a reverser: from cheating in games, such as we have done in our experiments and shown in this report, to attacking businesses that rely on secure software. By using reversing techniques to detect these security holes a company will be able to deliver a safer product.
Bibliography
Appendix
AI code
using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
using System.Threading;
using Magic;
namespace BlockAtHACKs
{
class AI
{
private uint[,] boardMatrix;
private int xSize, ySize;
private BlackMagic BMage;
public static int SLEEPTIME = 100;
private static int SLEEP_BETWEEN_CLEAR = 1000;
public void init(int xsize, int ysize, BlackMagic BM)
{
BMage = BM;
xSize = xsize;
ySize = ysize;
boardMatrix = new uint[xSize, ySize];
UpdateBoard();
}
public void Scramble(int times)
{
SLEEPTIME = SLEEPTIME / 4;
Random rng = new Random();
for (int i = 0; i < times; i++)
{
MoveCursorSlowly((uint)rng.Next(12), (uint)rng.Next(5));
SwapAtCursor();
}
SLEEPTIME = SLEEPTIME * 4;
}
}
}
public bool ClearRow()
{
uint targetx = 11;
uint targety = 0;
bool found = false;
while (targetx >= 2 && !found)
{
targety = 0;
while (targety <= 5 && !found)
{
if (IsTopOfTriple(targetx, targety))
{
found = true;
System.Console.WriteLine("Found "+BMage.ReadUInt(GetMatrixMemPos(targetx,targety))
+ " at x: " + targetx + ", y: " + targety);
}
targety++;
}
targetx--;
}
targety--;
targetx++;
System.Console.WriteLine("Target X: " + targetx + ", Target Y: " + targety);
if (!found)
return false;
// x, y = FindMatching() >= 2
// color = Read(x,y)
uint color = BMage.ReadUInt(GetMatrixMemPos(targetx,targety));
// location1 = FindLocation(x-1,y,color)
uint firsty = FindBlockOnRow(targetx-1, color);
// location2 = FindLocation(x-2,y,color)
uint secondy = FindBlockOnRow(targetx-2, color);
System.Console.WriteLine("First Y: " + firsty + ", Second Y : " + secondy + ", Color: " + color);
MoveCursorSlowly(targetx-1, targety);
System.Console.WriteLine("Movement complete to place of origin");
MoveCursorDir('d');
Thread.Sleep(SLEEPTIME);
MoveBlockBetween(firsty, targety);
System.Console.WriteLine("Movement complete from first row ".");
MoveCursorDir('d');
Thread.Sleep(SLEEPTIME);
MoveBlockBetween(secondy, targety);
System.Console.WriteLine("Movement complete from second row ".");
Thread.Sleep(SLEEP_BETWEEN_CLEAR);
return true;
}
public bool IsTopOfTriple(uint x, uint y)
{
if (NumberMatchingBelow(x, y) >= 2)
return true;
return false;
}
public void UpdateBoard()
{
//for (uint x = 0; x < xSize; x++)
//{
// for (uint y = 0; y < ySize; y++)
// {
// boardMatrix[x, y] = BMage.ReadUInt((int)GetMatrixMemPos(x, y));
// }
//}
}
public void MoveCursorDir(char dir)
{
uint curx = BMage.ReadUInt((int)Form1.Globals.XHAIR_X);
uint cury = BMage.ReadUInt((int)Form1.Globals.XHAIR_Y);
switch (dir)
{
case 'u':
MoveCursor(curx + 1, cury);
break;
case 'd':
MoveCursor(curx - 1, cury);
break;
case 'l':
MoveCursor(curx, cury - 1);
break;
case 'r':
MoveCursor(curx, cury + 1);
break;
}
}
private uint FindBlockOnRow(uint row, uint block)
{
for (uint i = 0; i <= 6; i++)
{
if (BMage.ReadUInt(GetMatrixMemPos(row, i)) == block)
return i;
}
return 100;
}
public void MoveBlockBetween(uint from, uint to)
// from 6 to 5
if (from > to) // We're going to move the block left. Align
MoveCursorSlowly(0, from - 1, false);
else // Moving right, align on the spot.
MoveCursorSlowly(0, from, false);
while (to > from) // Move the block some steps right
{
from+=1;
SwapAtCursor();
MoveCursorDir('r');
Thread.Sleep(SLEEPTIME);
}
while (to < from) // Move the block some steps left
{
from-=1;
SwapAtCursor();
MoveCursorDir('l');
Thread.Sleep(SLEEPTIME);
}
public void MoveCursorSlowly(uint x, uint y, bool movex = true)
{
if (x > 12)
x = 12;
if (y > 4)
y = 4;
uint curx = BMage.ReadUInt((uint)Form1.Globals.XHAIR_X);
uint cury = BMage.ReadUInt((uint)Form1.Globals.XHAIR_Y);
while (curx != x && movex)
{
if (curx > x)
{
MoveCursorDir('d');
curx--;
} else if (curx < x)
{
MoveCursorDir('u');
curx++;
}
Thread.Sleep(SLEEPTIME);
}
while (cury != y)
{
if (cury > y)
{
MoveCursorDir('l');
cury--;
} else if (cury < y)
{
MoveCursorDir('r');
cury++;
}
Thread.Sleep(SLEEPTIME);
}
public void SwapAtCursor()
{
uint x = BMage.ReadUInt((uint)Form1.Globals.XHAIR_X);
uint y = BMage.ReadUInt((uint)Form1.Globals.XHAIR_Y);
x++;
uint oldleft = BMage.ReadUInt(GetMatrixMemPos(x, y));
uint oldright = BMage.ReadUInt(GetMatrixMemPos(x, y + 1));
if ((oldleft > 5 && oldleft < 4000000000) || (oldright > 5 && oldright < 4000000000))
return;
BMage.WriteUInt(GetMatrixMemPos(x, y), oldright);
BMage.WriteUInt(GetMatrixMemPos(x, y + 1), oldleft);
}
public void MoveSelectedToPos(uint x, uint y, uint block)
{
// Finding closest block
uint pos = 0;
for (uint col = 0; col <= 6; col++)
{
if (BMage.ReadUInt(GetMatrixMemPos(col, y)) == block)
{
pos = col;
break;
}
}
}
public int NumberMatchingBelow(uint x, uint y)
{
int matching = 0;
uint block = BMage.ReadUInt(GetMatrixMemPos(x, y));
if (block > 100)
return 0;
for (uint row = x - 1; row >= 1 && row < 100; row--)
{
if (NumberInRow(row, block) > 0)
matching++;
else
return matching;
}
return matching;
}
private int NumberInRow(uint x, uint block)
{
int matching = 0;
for (uint y = 0; y <= 6; y++)
{
matching += (BMage.ReadUInt(GetMatrixMemPos(x, y)) == block) ? 1 : 0;
}
return matching;
}
public void MoveCursor(uint x, uint y)
{
if (x < 0 || x > 12 || y < 0 || y > 4)
return;
BMage.WriteUInt((uint)Form1.Globals.XHAIR_X, x);
BMage.WriteUInt((uint)Form1.Globals.XHAIR_Y, y);
}
public void Swap(uint x, uint y)
{
if (x < 0 || x > 12 || y < 0 || y > 4)
return;
MoveCursor(x, y);
uint leftPos = GetMatrixMemPos(x + 1, y);
uint rightPos = GetMatrixMemPos(x + 1, y + 1);
uint oldLeft = BMage.ReadUInt(leftPos);
uint oldRight = BMage.ReadUInt(rightPos);
BMage.WriteUInt(leftPos, oldRight);
BMage.WriteUInt(rightPos, oldLeft);
}
public uint GetMatrixMemPos(uint x, uint y)
{
return (uint)(Form1.Globals.BOARD_PTR) + x * 4 + y * 4 * 30;
}
Gaden code
using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using System.Threading;
using Magic;
namespace BlockAtHACKs
{
public partial class Form1 : Form
{
/* Form methods */
public Form1()
{
InitializeComponent();
}
private void LoadButton_Click(object sender, EventArgs e)
{
if (IsLoaded)
{
return;
}
Initialize();
}
private void PanicButton_Click(object sender, EventArgs e)
{
ClearField();
}
private void SolveButton_Click(object sender, EventArgs e)
{
AInt.ClearRow();
}
private void SolveAllButton_Click(object sender, EventArgs e)
{
while (AInt.ClearRow()) ;
}
}
}
private void SolveForeverButton_Click(object sender, EventArgs e)
{
while (true)
{
if (!AInt.ClearRow())
AInt.Scramble(1);
}
}
private void SwapButton_Click(object sender, EventArgs e)
{
//RandomSwap();
try
{
AInt.Swap((uint)Convert.ToInt32(textBox1.Text), (uint)Convert.ToInt32(textBox2.Text));
}
catch (System.FormatException exc)
{
RandomSwap();
}
}
private void ReadButton_Click(object sender, EventArgs e)
{
textBox3.Text = Environment.NewLine + AInt.NumberMatchingBelow(Convert.ToUInt32(textBox1.Text), Convert.ToUInt32(textBox2.Text));
//SetDebug(debug);
}
private void ActionButton_Click(object sender, EventArgs e)
{
int newSpeed = Convert.ToInt32(textBox1.Text);
A1.SLEEPTIME = newSpeed;
}
public static string debug;
private BlackMagic BMage;
private A1 AInt;
private bool IsLoaded;
private int[,] boardMatrix;
public enum Globals
{
SCORE = 0x0028A538,
SPEED = 0x0028A4F4,
BOARD_PTR = 0x0028A1A8,
XHAIR_X = 0x0028A520,
XHAIR_Y = 0x0028A51C
};
public void Initialize()
{
BMage = new BlackMagic();
if(!BMage.OpenProcessAndThread(SProcess.
GetProcessFromProcessName("block_attack")))
{
Console.WriteLine("Yes you are fucked!");
IsLoaded = false;
return;
}
IsLoaded = true;
AInt = new AI();
AInt.init(30, 6, BMage);
Console.WriteLine("Attached to le Blocks!");
Form1.ActiveForm.Text = "Gaden - Attached at " + BMage.
MainModule.BaseAddress;
}
private void RandomSwap()
{
Random random = new Random();
uint x = (uint)random.Next(13);
uint y = (uint)random.Next(5);
AInt.Swap(x, y);
}
private void ClearField()
{
int offset = 0x0;
for (int i = 0; i < 30; i++)
{
for (int j = 0; j < 6; j++)
{
BMage.WriteInt((uint)(Globals.BOARD_PTR + offset), 0);
offset += 0x4;
}
}
}
public void SetDebug(string text)
{
Text_Debug.Text = text;
}
}
|
{"Source-Url": "http://www.csc.kth.se/utbildning/kandidatexjobb/datateknik/2012/rapport/djupfeldt_petter_OCH_taubert_lucas_K12018.pdf", "len_cl100k_base": 12807, "olmocr-version": "0.1.48", "pdf-total-pages": 44, "total-fallback-pages": 0, "total-input-tokens": 73573, "total-output-tokens": 15782, "length": "2e13", "weborganizer": {"__label__adult": 0.0005092620849609375, "__label__art_design": 0.0003094673156738281, "__label__crime_law": 0.0004181861877441406, "__label__education_jobs": 0.0015621185302734375, "__label__entertainment": 0.00010144710540771484, "__label__fashion_beauty": 0.00020492076873779297, "__label__finance_business": 0.0001519918441772461, "__label__food_dining": 0.0004172325134277344, "__label__games": 0.0029010772705078125, "__label__hardware": 0.001178741455078125, "__label__health": 0.00036978721618652344, "__label__history": 0.00028395652770996094, "__label__home_hobbies": 0.00010991096496582033, "__label__industrial": 0.0003578662872314453, "__label__literature": 0.0003638267517089844, "__label__politics": 0.0002112388610839844, "__label__religion": 0.0005083084106445312, "__label__science_tech": 0.0083160400390625, "__label__social_life": 0.0001062154769897461, "__label__software": 0.004467010498046875, "__label__software_dev": 0.97607421875, "__label__sports_fitness": 0.0004341602325439453, "__label__transportation": 0.00045418739318847656, "__label__travel": 0.00021791458129882812}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 60049, 0.02478]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 60049, 0.46867]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 60049, 0.91712]], "google_gemma-3-12b-it_contains_pii": [[0, 116, false], [116, 648, null], [648, 1756, null], [1756, 2906, null], [2906, 3891, null], [3891, 3891, null], [3891, 5879, null], [5879, 5879, null], [5879, 7897, null], [7897, 9074, null], [9074, 10635, null], [10635, 13336, null], [13336, 15006, null], [15006, 17541, null], [17541, 20147, null], [20147, 22900, null], [22900, 24473, null], [24473, 24473, null], [24473, 25948, null], [25948, 28410, null], [28410, 30880, null], [30880, 33233, null], [33233, 34589, null], [34589, 37323, null], [37323, 39921, null], [39921, 40256, null], [40256, 41779, null], [41779, 43935, null], [43935, 45916, null], [45916, 45916, null], [45916, 47786, null], [47786, 48757, null], [48757, 50092, null], [50092, 50092, null], [50092, 51084, null], [51084, 52663, null], [52663, 53720, null], [53720, 54942, null], [54942, 56146, null], [56146, 57070, null], [57070, 58051, null], [58051, 59112, null], [59112, 60049, null], [60049, 60049, null]], "google_gemma-3-12b-it_is_public_document": [[0, 116, true], [116, 648, null], [648, 1756, null], [1756, 2906, null], [2906, 3891, null], [3891, 3891, null], [3891, 5879, null], [5879, 5879, null], [5879, 7897, null], [7897, 9074, null], [9074, 10635, null], [10635, 13336, null], [13336, 15006, null], [15006, 17541, null], [17541, 20147, null], [20147, 22900, null], [22900, 24473, null], [24473, 24473, null], [24473, 25948, null], [25948, 28410, null], [28410, 30880, null], [30880, 33233, null], [33233, 34589, null], [34589, 37323, null], [37323, 39921, null], [39921, 40256, null], [40256, 41779, null], [41779, 43935, null], [43935, 45916, null], [45916, 45916, null], [45916, 47786, null], [47786, 48757, null], [48757, 50092, null], [50092, 50092, null], [50092, 51084, null], [51084, 52663, null], [52663, 53720, null], [53720, 54942, null], [54942, 56146, null], [56146, 57070, null], [57070, 58051, null], [58051, 59112, null], [59112, 60049, null], [60049, 60049, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 60049, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 60049, null]], "pdf_page_numbers": [[0, 116, 1], [116, 648, 2], [648, 1756, 3], [1756, 2906, 4], [2906, 3891, 5], [3891, 3891, 6], [3891, 5879, 7], [5879, 5879, 8], [5879, 7897, 9], [7897, 9074, 10], [9074, 10635, 11], [10635, 13336, 12], [13336, 15006, 13], [15006, 17541, 14], [17541, 20147, 15], [20147, 22900, 16], [22900, 24473, 17], [24473, 24473, 18], [24473, 25948, 19], [25948, 28410, 20], [28410, 30880, 21], [30880, 33233, 22], [33233, 34589, 23], [34589, 37323, 24], [37323, 39921, 25], [39921, 40256, 26], [40256, 41779, 27], [41779, 43935, 28], [43935, 45916, 29], [45916, 45916, 30], [45916, 47786, 31], [47786, 48757, 32], [48757, 50092, 33], [50092, 50092, 34], [50092, 51084, 35], [51084, 52663, 36], [52663, 53720, 37], [53720, 54942, 38], [54942, 56146, 39], [56146, 57070, 40], [57070, 58051, 41], [58051, 59112, 42], [59112, 60049, 43], [60049, 60049, 44]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 60049, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-25
|
2024-11-25
|
972746cc1d43758d190166d86cd867f4454f54b0
|
The Mystery Machine: End-to-end Performance Analysis of Large-scale Internet Services
Michael Chow, University of Michigan; David Meisner, Facebook, Inc.; Jason Flinn, University of Michigan; Daniel Peek, Facebook, Inc.; Thomas F. Wenisch, University of Michigan
https://www.usenix.org/conference/osdi14/technical-sessions/presentation/chow
The Mystery Machine: End-to-end performance analysis of large-scale Internet services
Michael Chow∗, David Meisner†, Jason Flinn∗, Daniel Peek†, Thomas F. Wenisch∗
University of Michigan∗ Facebook, Inc.†
Abstract
Current debugging and optimization methods scale poorly to deal with the complexity of modern Internet services, in which a single request triggers parallel execution of numerous heterogeneous software components over a distributed set of computers. The Achilles’ heel of current methods is the need for a complete and accurate model of the system under observation: producing such a model is challenging because it requires either assimilating the collective knowledge of hundreds of programmers responsible for the individual components or restricting the ways in which components interact.
Fortunately, the scale of modern Internet services offers a compensating benefit: the sheer volume of requests serviced means that, even at low sampling rates, one can gather a tremendous amount of empirical performance observations and apply “big data” techniques to analyze those observations. In this paper, we show how one can automatically construct a model of request execution from pre-existing component logs by generating a large number of potential hypotheses about program behavior and rejecting hypotheses contradicted by the empirical observations. We also show how one can validate potential performance improvements without costly implementation effort by leveraging the variation in component behavior that arises naturally over large numbers of requests to measure the impact of optimizing individual components or changing scheduling behavior.
We validate our methodology by analyzing performance traces of over 1.3 million requests to Facebook servers. We present a detailed study of the factors that affect the end-to-end latency of such requests. We also use our methodology to suggest and validate a scheduling optimization for improving Facebook request latency.
1 Introduction
There is a rich history of systems that understand, optimize, and troubleshoot software performance, both in practice and in the research literature. Yet, most of these prior systems deal poorly with the complexities that arise from modern Internet service infrastructure. Complexity comes partially from scale: a single Web request may trigger the execution of hundreds of executable components running in parallel on many different computers. Complexity also arises from heterogeneity; executable components are often written in different languages, communicate through a wide variety of channels, and run in execution environments that range from third-party browsers to open-source middleware to in-house, custom platforms.
In this paper, we develop performance analysis tools for measuring and uncovering performance insights about complex, heterogeneous distributed systems. We apply these tools to the Facebook Web pipeline. Specifically, we measure end-to-end performance from the point when a user initiates a page load in a client Web browser, through server-side processing, network transmission, and JavaScript execution, to the point when the client Web browser finishes rendering the page.
Fundamentally, analyzing the performance of concurrent systems requires a model of application behavior that includes the causal relationships between components; e.g., happens-before ordering and mutual exclusion. While the techniques for performing such analysis (e.g., critical path analysis) are well-understood, prior systems make assumptions about the ease of generating the causal model that simply do not hold in many large-scale, heterogeneous distributed systems such as the one we study in this paper.
Many prior systems assume that one can generate such a model by comprehensively instrumenting all middleware for communication, scheduling, and/or synchronization to record component interactions [1, 3, 13, 18, 22, 24, 28]. This is a reasonable assumption if the software architecture is homogeneous; for instance, Dapper [28] instruments a small set of middleware components that are widely used within Google.
However, many systems are like the Facebook systems we study; they grow organically over time in a culture that favors innovation over standardization (e.g., “move fast and break things” is a well-known Facebook slogan). There is broad diversity in programming languages, communication middleware, execution environments, and scheduling mechanisms. Adding instrumentation retroactively to such an infrastructure is a Herculean task. Further, the end-to-end pipeline includes client software such as Web browsers, and adding detailed instrumentation to all such software is not feasible.
Other prior systems rely on a user-supplied schema that expresses the causal model of application behav-
ior [6, 31]. This approach runs afoul of the scale of modern Internet services. To obtain a detailed model of end-to-end request processing, one must assemble the collective knowledge of hundreds of programmers responsible for the individual components that are involved in request processing. Further, any such model soon grows stale due to the constant evolution of the system under observation, and so constant updating is required.
Consequently, we develop a technique that generates a causal model of system behavior without the need to add substantial new instrumentation or manually generate a schema of application behavior. Instead, we generate the model via large-scale reasoning over individual software component logs. Our key observation is that the sheer volume of requests handled by modern services allows us to gather observations of the order in which messages are logged over a tremendous number of requests. We can then hypothesize and confirm relationships among those messages. We demonstrate the efficacy of this technique with an implementation that analyzes over 1.3 million Facebook requests to generate a comprehensive model of end-to-end request processing.
Logging is an almost-universally deployed tool for analysis of production software. Indeed, although there was no comprehensive tracing infrastructure at Facebook prior to our work, almost all software components had some individual tracing mechanism. By relying on only a minimum common content for component log messages (a request identifier, a host identifier, a host-local timestamp, and a unique event label), we unified the output from diverse component logs into a unified tracing system called UberTrace.
UberTrace’s objective is to monitor end-to-end request latency, which we define to be the time that elapses from the moment the user initiates a Facebook Web request to the moment when the resulting page finishes rendering. UberTrace monitors a diverse set of activities that occur on the client, in the network and proxy layers, and on servers in Facebook data centers. These activities exhibit a high degree of concurrency.
To understand concurrent component interactions, we construct a causality model from a large corpus of UberTrace traces. We generate a cross-product of possible hypotheses for relationships among the individual component events according to standard patterns (currently, happens-before, mutual exclusive, and first-in-first-out relationships). We assume that a relationship holds until we observe an explicit contradiction. Our results show that this process requires traces of hundreds of thousands of requests to converge on a model. However, for a service such as Facebook, it is trivial to gather traces at this scale even at extremely low sampling frequencies. Further, the analysis scales well and runs as a parallel Hadoop job.
Thus, our analysis framework, The Mystery Machine derives its causal model solely from empirical observations that utilize only the existing heterogeneous component logs. The Mystery Machine uses this model to perform standard analyses, such as identifying critical paths, slack analysis, and outlier detection.
In this paper, we also present a detailed case study of performance optimization based on results from The Mystery Machine. First, we note that whereas the average request workload shows a balance between client, server, and network time on the critical path, there is wide variance in this balance across individual requests. In particular, we demonstrate that Facebook servers have considerable slack when processing some requests, but they have almost no slack for other requests. This observation suggests that end-to-end latency would be improved by having servers produce elements of the response as they are needed, rather than trying to produce all elements as fast as possible. We conjecture that this just-in-time approach to response generation will improve the end-to-end latency of requests while not substantially degrading the latency of requests that currently have considerable slack.
Implementing such an optimization is a formidable task, requiring substantial programming effort. To help justify this cost by partially validating our conjecture, we use The Mystery Machine to perform a “what-if” analysis. We use the inherent variation in server processing time that arises naturally over a large number of requests to show that increasing server latency has little effect on end-to-end latency when slack is high. Yet, increasing server latency has an almost linear effect on end-to-end latency when slack is low. Further, we show that slack can be predicted with reasonable accuracy. Thus, the case study demonstrates two separate benefits of The Mystery Machine: (1) it can identify opportunities for performance improvement, and (2) it can provide preliminary evidence about the efficacy of hypothesized improvements prior to costly implementation.
2 Background
In the early days of the Web, a request could often be modeled as a single logical thread of control in which a client executed an RPC to a single Web server. Those halcyon days are over.
At Facebook, the end-to-end path from button click to final render spans a diverse set of systems. Many components of the request are under Facebook’s control, but several components are not (e.g., the external network and the client’s Web browser). Yet, users care little about who is responsible for each component; they simply desire that their content loads with acceptable delay.
A request begins on a client with a user action to retrieve some piece of content (e.g., a news feed). After
DNS resolution, the request is routed to an Edge Load Balancer (ELB) [16]. ELBs are geo-distributed so as to allow TCP sessions to be established closer to the user and avoid excessive latency during TCP handshake and SSL termination. ELBs also provide a point of indirection for better load balancing, acting as a proxy between the user and data center.
Once a request is routed to a particular data center, a Software Load Balancer routes it to one of many possible Web servers, each of which runs the HipHop Virtual Machine runtime [35]. Request execution on the Web server triggers many RPCs to caching layers that include Memcache [20] and TAO [7]. Requests also occasionally access databases.
RPC responses pass through the load-balancing layers on their way back to the client. On the client, the exact order and manner of rendering a Web page are dependent on the implementation details of the user’s browser. However, in general, there will be a Cascading Style Sheet (CSS) download stage and a Document Object Model rendering stage, followed by a JavaScript execution stage.
As with all modern Internet services, to achieve latency objectives, the handling of an individual request exhibits a high degree of concurrency. Tens to hundreds of individual components execute in parallel over a distributed set of computers, including both server and client machines. Such concurrency makes performance analysis and debugging complex. Fortunately, standard techniques such as critical path analysis and slack analysis can tame this complexity. However, all such analyses need a model of the causal dependencies in the system being analyzed. Our work fills this need.
3 \textit{ÜberTrace}: End-to-end Request Tracing
As discussed in the prior section, request execution at Facebook involves many software components. Prior to our work, almost all of these components had logging mechanisms used for debugging and optimizing the individual components. In fact, our results show that individual components are almost always well-optimized \textit{when considered in isolation}.
Yet, there existed no complete and detailed instrumentation for monitoring the end-to-end performance of Facebook requests. Such end-to-end monitoring is vital because individual components can be well-optimized in isolation yet still miss opportunities to improve performance when components interact. Indeed, the opportunities for performance improvement we identify all involve the interaction of multiple components.
Thus, the first step in our work was to unify the individual logging systems at Facebook into a single end-to-end performance tracing tool, dubbed \textit{ÜberTrace}. Our basic approach is to define a minimal schema for the information contained in a log message, and then map existing log messages to that schema.
\textit{ÜberTrace} requires that log messages contain at least:
1. A unique request identifier.
2. The executing computer (e.g., the client or a particular server)
3. A timestamp that uses the local clock of the executing computer
4. An event name (e.g., “start of DOM rendering”).
5. A task name, where a task is defined to be a distributed thread of control.
\textit{ÜberTrace} requires that each <event, task> tuple is unique, which implies that there are no cycles that would cause a tuple to appear multiple times. Although this assumption is not valid for all execution environments, it holds at Facebook given how requests are processed. We believe that it is also a reasonable assumption for similar Internet service pipelines.
Since all log timestamps are in relation to local clocks, \textit{ÜberTrace} translates them to estimated global clock values by compensating for clock skew. \textit{ÜberTrace} looks for the common RPC pattern of communication in which the thread of control in an individual task passes from one computer (called the client to simplify this explanation) to another, executes on the second computer (called the server), and returns to the client. \textit{ÜberTrace} calculates the server execution time by subtracting the latest and earliest server timestamps (according to the server’s local clock) nested within the client RPC. It then calculates the client-observed execution time by subtracting the client timestamps that immediately succeed and precede the RPC. The difference between the client and server intervals is the estimated network round-trip time (RTT) between the client and server. By assuming that request and response delays are symmetric, \textit{ÜberTrace} calculates clock skew such that, after clock-skew adjustment, the first server timestamp in the pattern is exactly 1/2 RTT after the previous client timestamp for the task.
The above methodology is subject to normal variation in network performance. In addition, the imprecision of using existing log messages rather than instrumenting communication points can add uncertainty. For instance, the first logged server message could occur only after substantial server execution has already completed, leading to an under-estimation of server processing time and an over-estimation of RTT. \textit{ÜberTrace} compensates by calculating multiple estimates. Since there are many request and response messages during the processing of a higher-level request, it makes separate RTT and clock
skew calculations for each pair in the cross-product of requests. It then uses the calculation that yields the lowest observed RTT.
Timecard [23] used a similar approach to reconcile timestamps and identified the need to account for the effects of TCP slow start. Our use of multiple RTT estimates accomplishes this. Some messages such as the initial request are a single packet and so are not affected by slow start. Other messages such as the later responses occur after slow start has terminated. Pairing two such messages will therefore yield a lower RTT estimate. Since we take the minimum of the observed RTTs and use its corresponding skew estimate, we get an estimate that is not perturbed by slow start.
Due to performance considerations, Facebook logging systems use statistical sampling to monitor only a small percentage of requests. UberTrace must ensure that the individual logging systems choose the same set of requests to monitor; otherwise the probability of all logging systems independently choosing to monitor the same request would be vanishingly small, making it infeasible to build a detailed picture of end-to-end latency. Therefore, UberTrace propagates the decision about whether or not to monitor a request from the initial logging component that makes such a decision through all logging systems along the path of the request, ensuring that the request is completely logged. The decision to log a request is made when the request is received at the Facebook Web server; the decision is included as part of the per-request metadata that is read by all subsequent components. UberTrace uses a global identifier to collect the individual log messages, extracts the data items enumerated above, and stores each message as a record in a relational database.
We made minimal changes to existing logging systems in order to map existing log messages to the UberTrace schema. We modified log messages to use the same global identifier, and we made the event or task name more human-readable. We added no additional log messages. Because we reused existing component logging and required only a minimal schema, these logging changes required approximately one person-month of effort.
4 The Mystery Machine
The Mystery Machine uses the traces generated by UberTrace to create a causal model of how software components interact during the end-to-end processing of a Facebook request. It then uses the causal model to perform several types of distributed systems performance analysis: finding the critical path, quantifying slack for segments not on the critical path, and identifying segments that are correlated with performance anomalies. The Mystery Machine enables more targeted analysis by exporting its results through a relational database and graphical query tools.
4.1 Causal Relationships Model
To generate a causal model, The Mystery Machine first transforms each trace from a collection of logged events to a collection of segments, which we define to be the execution interval between two consecutive logged events for the same task. A segment is labeled by the tuple <task, start_event, end_event>, and the segment duration is the time interval between the two events.
Next, The Mystery Machine identifies causal relationships. Currently, it looks for three types of relationships:
1. Happens-before (→) We say that segment A happens-before segment B (A → B) if the start event timestamp for B is greater than or equal to the end event timestamp for A in all requests.
2. Mutual exclusion (∨) Segments A and B are mutually exclusive (A ∨ B) if their time intervals never overlap.
3. Pipeline (≫) Given two tasks, t1 and t2, there exists a data dependency between pairs of segments of the two tasks. Further, the segment that operates on data element d1 precedes the segment that operates on data element d2 in task t1 if and only if the segment that operates on d1 precedes the segment that operates on d2 in task t2 for all such pairs of segments. In other words, the segments preserve a FIFO ordering in how data is produced by the first task and consumed by the second task.
We summarize these relationships in Figure 1. For each relationship we provide a valid example and at least one counterexample that would contradict the hypothesis.

<table>
<thead>
<tr>
<th>Relationship</th>
<th>Example</th>
<th>Counterexample</th>
</tr>
</thead>
<tbody>
<tr>
<td>Happens Before</td>
<td><img src="example-url" alt="Example" /></td>
<td><img src="counterexample-url" alt="Counterexample" /></td>
</tr>
<tr>
<td>Mutual Exclusion</td>
<td><img src="example-url" alt="Example" /></td>
<td><img src="counterexample-url" alt="Counterexample" /></td>
</tr>
<tr>
<td>Pipeline</td>
<td><img src="example-url" alt="Example" /></td>
<td><img src="counterexample-url" alt="Counterexample" /></td>
</tr>
</tbody>
</table>
We use techniques from the race detection literature to map these static relationships to dynamic happens-before relationships. Note that mutual exclusion is a static property; e.g., two components A and B that share a lock are mutually exclusive. Dynamically, for a particular request, this relationship becomes a happens-before relationship: either A → B or B → A, depending on the order of execution. Pipeline relationships are similar. Thus, for any given request, all of these static relationships can be expressed as dynamic causal relationships between pairs of segments.
4.2 Algorithm
The Mystery Machine uses iterative refinement to infer causal relationships. It first generates all possible hypotheses for causal relationships among segments. Then, it iterates through a corpus of traces and rejects a hypothesis if it finds a counterexample in any trace.
Step 1 of Figure 2 illustrates this process. We depict the set of hypotheses as a graph where nodes are segments ("S" nodes are server segments, "N" nodes are network segments and "C" nodes are client segments) and edges are hypothesized relationships. For the sake of simplicity, we restrict this example to consider only happens-before relationships; an arrow from A to B shows a hypothesized “A happens before B” relationship.
The “No Traces” column shows that all possible relationships are initially hypothesized; this is a large number because the possible relationships scale quadratically as the number of segments increases. Several hypotheses are eliminated by observed contradictions in the first request. For example, since S2 happens after S1, the hypothesized relationship, S2 → S1, is removed. Further traces must be processed to complete the model. For instance, the second request eliminates the hypothesized relationship, N1 → N2. Additional traces prune new hypotheses due to the natural perturbation in timing of segment processing; e.g., perhaps the second user had less friends, allowing the network segments to overlap due to
Figure 3: Hypothesis Refinement. This graph shows the growth of number of hypothesized relationships as a function of requests analyzed. As more requests are analyzed, the rate at which new relationships are discovered and removed decreases and eventually reaches a steady-state. The total number of relationships increases over time due to code changes and the addition of new features.
shorter server processing time.
The Mystery Machine assumes that the natural variation in timing that arises over large numbers of traces is sufficient to expose counterexamples for incorrect relationships. Figure 3 provides evidence supporting this hypothesis from traces of over 1.3 million requests to the Facebook home page gathered over 30 days. As the number of traces analyzed increases, the observation of new counterexamples diminishes, leaving behind only true relationships. Note that the number of total relationships changes over time because developers are continually adding new segments to the pipeline.
4.3 Validation
To validate the causal model produced by the Mystery Machine, we confirmed several specific relationships identified by the Mystery Machine. Although we could not validate the entire model due to its size, we did substantial validation of two of the more intricate components: the interplay between JavaScript execution on the client and the dependencies involved in delivering data to the client. These components have 42 and 84 segments, respectively, as well as 2,583 and 10,458 identified causal relationships.
We confirmed these specific relationships by examining source code, inserting assertions to confirm model-derived hypotheses, and consulting relevant subsystem experts. For example, the system discovered the specific, pipelined schedule according to which page content is delivered to the client. Further, the model correctly reflects that JavaScript segments are mutually exclusive (a known property of the JavaScript execution engine) and identified ordering constraints arising from synchronization.
4.4 Analysis
Once The Mystery Machine has produced the causal model of segment relationships, it can perform several types of performance analysis.
4.4.1 Critical Path
Critical path analysis is a classic technique for understanding how individual components of a parallel execution impact end-to-end latency [22, 32]. The critical path is defined to be the set of segments for which a differential increase in segment execution time would result in the same differential increase in end-to-end latency.
The Mystery Machine calculates the critical path on a per-request basis. It represents all segments in a request as a directed acyclic graph in which the segments are vertices with weight equal to the segment duration. It adds an edge between all vertices for which the corresponding segments have a causal relationship. Then, it performs a transitive reduction in which all edges $A \rightarrow C$ are recursively removed if there exists a path consisting of $A \rightarrow B$ and $B \rightarrow C$ that links the two nodes.
Finally, The Mystery Machine performs a longest-path analysis to find the critical path from the first event in the request (the initiation of the request) to the last event (which is typically the termination of some JavaScript execution). The length of the critical path is the end-to-end latency of the entire request. If there are equal-length critical paths, the first discovered path is chosen.
We illustrate the critical path calculation for the two example requests in Step 2 of Figure 2. Each request has a different critical path even though the dependency graph is the same for both. The critical path of the first request is $\{S_1, S_2, N_2, C_2\}$. Because $S_2$ has a long duration, all dependencies for $N_2$ and $C_2$ have been met before they start, leaving them on the critical path. The critical path of the second request is $\{S_1, N_1, C_1, C_2\}$. In this case, $S_2$ and $N_2$ could have longer durations and not affect end-to-end latency because $C_2$ must wait for $C_1$ to finish.
Typically, we ask The Mystery Machine to calculate critical paths for large numbers of traces and aggregate the results. For instance, we might ask how often a given segment falls on the critical path or the average percentage of the critical path represented by each segment.
4.4.2 Slack
Critical path analysis is useful for determining where to focus optimization effort; however, it does not provide any information about the importance of latency for segments off the critical path. The Mystery Machine provides this information via slack analysis.
We define slack to be the amount by which the duration of a segment may increase without increasing the
end-to-end latency of the request, assuming that the duration of all other segments remains constant. By this definition, segments on the critical path have no slack because increasing their latency will increase the end-to-end latency of the request.
To calculate the slack for a given segment, $S$, The Mystery Machine calculates $CP_{\text{start}}$, the critical path length from the first event in the request to the start of $S$ and $CP_{\text{end}}$ the critical path length from the end of $S$ to the last event in the request. Given the critical path length for the entire request ($CP$) and the duration of segment $S$ ($D_S$), the slack for $S$ is $CP - CP_{\text{start}} - D_S - CP_{\text{end}}$. The Mystery Machine’s slack analysis calculates and reports this value for every segment. As with critical path results, slack results are typically aggregated over a large number of traces.
4.4.3 Anomaly detection
One special form of aggregation supported by The Mystery Machine is anomaly analysis. To perform this analysis, it first classifies requests according to end-to-end latency to identify a set of outlier requests. Currently, outliers are defined to be requests that are in the top 5% of end-to-end latency. Then, it performs a separate aggregation of critical path or slack data for each set of requests identified by the classifiers. Finally, it performs a differential comparison to identify segments with proportionally greater representation in the outlier set of requests than in the non-outlier set. For instance, we have used this analysis to identify a set of segments that correlated with high latency requests. Inspection revealed that these segments were in fact debugging components that had been returned in response to some user requests.
4.5 Implementation
We designed The Mystery Machine to automatically and continuously analyze production traffic at scale over long time periods. It is implemented as a large-scale data processing pipeline, as depicted in Figure 4.
UberTrace continuously samples a small fraction of requests for end-to-end tracing. Trace data is collected by the Web servers handling these requests, which write them to Scribe, Facebook’s distributed logging service. The trace logs are stored in tables in a large-scale data warehousing infrastructure called Hive [30]. While Scribe and Hive are the in-house analysis tools used at Facebook, their use is not fundamental to our system.
The Mystery Machine runs periodic processing jobs that read trace data from Hive and calculate or refine the causal model based on those traces. The calculation of the causal model is compute-intensive because the number of possible hypotheses is quadratic with the number of segments and because model refinement requires traces of hundreds of thousands of requests. Therefore, our implementation parallelizes this step as a Hadoop job running on a compute cluster. Infrequently occurring testing and debugging segments are automatically removed from the model; these follow a well-defined naming convention that can be detected with a single regular expression. The initial calculation of the model analyzed traces of over 1.3 million requests collected over 30 days. On a Hadoop cluster, it took less than 2 hours to derive a model from these traces.
In practice, the model must be recomputed periodically in order to detect changes in relationships. Parallelizing the computation made it feasible to recompute the model every night as a regularly-scheduled batch job.
In addition to the three types of analysis described above, The Mystery Machine supports on-demand user queries by exporting results to Facebook’s in-house analytic tools, which can aggregate, pivot, and drill down into the results. We used these tools to categorize results by browser, connection speed, and other such dimensions; we share some of this data in Section 5.
4.6 Discussion
A key characteristic of The Mystery Machine is that it discovers dependencies automatically, which is critical because Facebook’s request processing is constantly evolving. As described previously, The Mystery Machine assumes a hypothesized relationship between two segments until it finds a counterexample. Over time, new segments are added as the site evolves and new features are added. The Mystery Machine automatically finds the dependencies introduced by the new segments by hy-
Simply summing delay measured at each system component (“Summed Delay”) ignores overlap and underestimates the importance of server latency relative to the actual mean critical path (“Critical Path”).
Excluding new segments, the rate at which new relationships are added levels off. The rate at which relationships are removed due to counterexamples also levels off. Thus, the model converges on a set of true dependencies. The Mystery Machine relies on UberTrace for complete log messages. Log messages, however, may be missing for two reasons: the component does no logging at all for a segment of its execution or the component logs messages for some requests but not others. In the first case, The Mystery Machine cannot identify causal relationships involving the unlogged segment, but causal relationships among all other segments will be identified correctly. When a segment is missing, the model overestimates the concurrency in the system, which would affect the critical path/slack analysis if the true critical path includes the unlogged segment. In the second case, The Mystery Machine would require more traces in order to discover counterexamples. This is equivalent to changing the sampling frequency.
5 Results
We demonstrate the utility of The Mystery Machine with two case studies. First, we demonstrate its use for aggregate performance characterization. We study live traffic, stratifying the data to identify factors that influence which system components contribute to the critical path. We find that the critical path can shift between three major components (servers, network, and client) and that these shifts correlate with the client type and network connection quality.
This variation suggests one possible performance optimization for Facebook servers: provide differentiated service by prioritizing service for connections where the server has no slack while deprioritizing those where network and client latency will likely dominate. Our second case study demonstrates how the natural variance across a large trace set enables testing of such performance hypotheses without expensive modifications to the system under observation. Since an implementation that provided differential services would require large-scale effort to thread through hundreds of server components, we use our dataset to first determine whether such an optimization is likely to be successful. We find that slack, as detected by The Mystery Machine, indeed indicates that slower server processing time minimally impacts end-to-end latency. We also find that slack tends to remain stable for a particular user across multiple Facebook sessions, so the observed slack of past connections can be used to predict the slack of the current connection.
5.1 Characterizing End-to-End Performance
In our first case study, we characterize the end-to-end performance critical path of Web accesses to the home.php Facebook endpoint. The Mystery Machine analyzes traces of over 1.3 million Web accesses collected over 30 days in July and August 2013.
Importance of critical path analysis. Figure 5 shows mean time breakdowns over the entire trace dataset. The breakdown is shown in absolute time in the left graph, and as a percent of total time on the right. We assign segments to one of five categories: Server for segments on a Facebook Web server or any internal service accessed from the Web server over RPC, Network for segments in which data traverses the network, DOM for...
browser segments that parse the document object model, CSS for segments processing cascading style sheets, and JavaScript for JavaScript segments. Each graph includes two bars: one showing the stacked sum of total processing time in each component ignoring all concurrency (“Summed Delay”) and the other the critical path as identified by The Mystery Machine (“Critical Path”).
On average, network delays account for the largest fraction of the critical path, but client and server processing are both significant. JavaScript execution remains a major bottleneck in current Web browsers, particularly since the JavaScript execution model admits little concurrency. The comparison of the total delay and critical path bars reveals the importance of The Mystery Machine—by examining only the total latency breakdown (e.g., if an engineer were profiling only one system component), one might overestimate the importance of network latency and JavaScript processing on end-to-end performance. In fact, the server and other client processing segments are frequently critical, and the overall critical path is relatively balanced across server, client, and network.
High variance in the critical path. Although analyzing the average case is instructive, it grossly oversimplifies the performance picture for the home.php endpoint. There are massive sources of latency variance over the population of requests, including the performance of the client device, the size of the user’s friend list, the kind of network connection, server load, Memcache misses, etc. Figure 6 shows the cumulative distribution of the fraction of the critical path attributable to server, network, and client portions. The key revelation of these distributions is that the critical path shifts drastically across requests—any of the three components can dominate delay, accounting for more than half of the critical path in a non-negligible fraction of requests.
Variance is greatest in the contribution of the network to the critical path, as evidenced by the fact that its CDF has the least curvature. It is not surprising that network delays vary so greatly since the trace data set includes access to Facebook over all sorts of networks, from high-speed broadband to cellular networks and even some dial-up connections. Client processing always accounts for at least 20% of the critical path. After content delivery, there is a global barrier in the browser before the JavaScript engine begins running the executable components of the page, hence, JavaScript execution is a factor in performance measurement. However, the client rarely accounts for more than 40% of the critical path. It is unusual for the server to account for less than 20% of the critical path because the initial request processing before the server begins to transmit any data is always critical.
Noticing this high variance in the critical path was very valuable to us because it triggered the idea of differentiated services that we explore in Section 5.2.
Stratification by connection type. We first consider stratifying by the type of network over which a user connects to Facebook’s system, as it is clear one would expect network latency to differ, for example, between cable modem and wireless connections. Facebook’s edge load balancing system tags each incoming request with a network type. These tags are derived from the network type recorded in the Autonomous System Number database for the Internet service provider responsible for the originating IP address. Figure 7 illustrates the critical path breakdown, in absolute time, for the four largest connection type categories. Each bar is annotated with the fraction of all requests that fall within that connection type (only a subset of connection types are shown, so the percentages do not sum to 100%).
Perhaps unsurprisingly, these coarse network type classifications correlate only loosely to the actual performance of the network connection. Mobile connections show a higher average network critical path than the other displayed connection types, but the data is otherwise inconclusive. We conclude that the network type reported by the ASN is not very helpful for making performance predictions.
Stratification by client platform. The client platform is included in the HTTP headers transmitted by the browser along with each request, and is therefore also available at the beginning of request processing. The
Figure 7: Critical path breakdowns stratified by browser, platform, connection type, and computed bandwidth
Client operating system is a hint to the kind of client device, which in turn may suggest relative client performance. Figure 7 shows a critical path breakdown for the five most common client platforms in our traces, again annotated with the fraction of requests represented by the bar. Note that we are considering only Web browser requests, so requests initiated by Facebook cell phone apps are not included. The most striking feature of the graph is that Mac OS X users (a small minority of Facebook connections at only 7.1%) tend to connect to Facebook from faster networks than Windows users. We also see that the bulk of connecting Windows users still run Windows 7, and many installations of Windows XP remain deployed. Client processing time has improved markedly over the various generations of Windows. Nevertheless, the breakdowns are all quite similar, and we again find insufficient predictive power for differentiating service time by platform.
**Stratification by browser.** The browser type is also indicated in the HTTP headers transmitted with a request. In Figure 7, we see critical paths for the four most popular browsers. Safari is an outlier, but this category is strongly correlated with the Mac OS X category. Chrome appears to offer slightly better JavaScript performance than the other browsers.
**Stratification by measured network bandwidth.**
All of the preceding stratifications only loosely correlate to performance—ASN is a poor indication of network connection quality, and browser and OS do not provide a reliable indication of client performance. We provide one more example stratification where we subdivide the population of requests into five categories directly from the measured network bandwidth, which can be deduced from our traces based on network time and bytes transmitted. Each of the categories are equally sized to represent 20% of requests, sorted by increasing bandwidth (p80 is the quintile with the highest observed bandwidth). As one would expect, network critical path is strongly correlated to measured network bandwidth. Higher bandwidth connections also tend to come from more capable clients; low-performance clients (e.g., smart phones) often connect over poor networks (3G and Edge networks).
6.2 Differentiated Service using Slack
Our second case study uses *The Mystery Machine* to perform early exploration of a potential performance optimization—differentiated service—without undertaking the expense of implementing the optimization.
The characterization in the preceding section reveals that there is enormous variation in the relative importance of client, server, and network performance over the population of Facebook requests. For some requests, server segments form the bulk of the critical path. For these requests, any increase in server latency will result in a commensurate increase in end-to-end latency and a worse user experience. However, after the initial critical segment, many connections are limited by the speed at which data can be delivered over the network or rendered by the client. For these connections, server execution can be delayed to produce data as needed, rather than as soon as possible, without affecting the critical path or the end-to-end request latency.
We use The Mystery Machine to directly measure the slack in server processing time available in our trace dataset. For simplicity of explanation, we will use the generic term “slack” in this section to refer to the slack in server processing time only, excluding slack available in any other types of segments.
Figure 8 shows the cumulative distribution of slack for the last data item sent by the server to the client. The graph is annotated with a vertical line at 500 ms of slack. For the purposes of this analysis, we have selected 500 ms as a reasonable cut-off between connections for which service should be provided with best effort (< 500 ms slack), and connections for which service can be deprioritized (> 500 ms). However, in practice, the best cut-off will depend on the implementation mechanism used to deprioritize service. More than 60% of all connections exhibit more than 500 ms of slack, indicating substantial opportunity to defer server processing. We find that slack typically increases monotonically during server processing as data items are sent to the client during a request. Thus, we conclude that slack is best consumed equally as several segments execute, as opposed to consuming all slack at the start or end of processing.
Validating Slack Estimates It is difficult to directly validate The Mystery Machine’s slack estimates, as we can only compute slack once a request has been fully processed. Hence, we cannot retrospectively delay server segments to consume the slack and confirm that the end-to-end latency is unchanged. Such an experiment is difficult even under highly controlled circumstances, since it would require precisely reproducing the conditions of a request over and over while selectively delaying only a few server segments.
Instead, we turn again to the vastness of our trace data set and the natural variance therein to confirm that slack estimates hold predictive power. Intuitively, small slack implies that server latency is strongly correlated to end-to-end latency; indeed, with a slack of zero we expect any increase in server latency to delay end-to-end latency by the same amount. Conversely, when slack is large, we expect little correlation between server latency and end-to-end latency; increases in server latency are largely hidden by other concurrent delays. We validate our notion of slack by directly measuring the correlation of server and end-to-end latency.
Figure 9 provides an intuitive view of the relationship for which we are testing. Each graph is a heat map of server generation time vs. end-to-end latency. The left graph includes only requests with the lowest measured slack, below 25 ms. There are slightly over 115,000 such requests in this data set. For these requests, we expect a strong correlation between server time and end-to-end time. We find that this subset of requests is tightly clustered just above the \( y = x \) (indicated by the line in the figure), indicating a strong correlation. The right figure includes roughly 100,000 requests with the greatest slack (above 2500 ms). For these, we expect no particular relationship between server time and end-to-end time (except that end-to-end time must be at least as large as slack, since this is an invariant of request processing).
As reported slack increases, the correlation between total server processing time and end-to-end latency weakens, since a growing fraction of server segments are non-critical. Indeed, we find the requests dispersed in a large cloud above $y = x$, with no correlation visually apparent.
We provide a more rigorous validation of the slack estimate in Figure 10. Here, we show the correlation coefficient between server time and end-to-end time for equally sized buckets of requests sorted by increasing slack. Each block in the graph corresponds to 5% of our sample, or roughly 57,000 requests (buckets are not equally spaced since the slack distribution is heavy-tailed). As expected, the correlation coefficient between server and end-to-end latency is quite high, nearly 0.8, when slack is low. It drops to 0.2 for the requests with the largest slack.
**Predicting Slack.** We have found that slack is predictive of the degree to which server latency impacts end-to-end latency. However, *The Mystery Machine* can discover slack only through a retrospective analysis. To be useful in a deployed system, we must predict the availability or lack of slack for a particular connection as server processing begins.
One mechanism to predict slack is to recall the slack a particular user experienced in a prior connection to Facebook. Previous slack was found to be more useful in predicting future slack than any other feature we studied. Most users connect to Facebook using the same device and over the same network connection repeatedly. Hence, their client and network performance are likely to remain stable over time. The user id is included as part of the request, and slack could be easily associated with the user id via a persistent cookie or by storing the most recent slack estimate in Memcache [20].
We test the hypothesis that slack remains stable over time by finding all instances within our trace dataset where we have multiple requests associated with the same user id. Since the request sampling rate is exceedingly low, and the active user population is so large, selecting the same user for tracing more than once is a relatively rare event. Nevertheless, again because of the massive volume of traces collected over the course of 30 days of sampling, we have traced more than 1000 repeat users. We test a simple classifier that predicts a user will experience a slack greater than 500 ms if the slack on their most recent preceding connection was also greater than 500 ms. Figure 11 illustrates the result. The graph shows a scatter plot of the first slack and second slack in each pair; the line at $y = x$ indicates slack was identical between the two connections. Our simple history-based classifier predicts the presence or absence of slack correctly 83% of the time. The shaded regions of the graph indicate cases where we have misclassified a connection. A type I error indicates a prediction that there is slack available for a connection when in fact server performance turns out to be critical—8% of requests fall in this category. Conversely, a type II error indicates a prediction that a connection will not have slack when in fact it does, and represents a missed opportunity to throttle service—9% of requests fall in this category.
Note that achieving these results does not require frequent sampling. The repeated accesses we study are often several weeks separated in time, and, of course, it is likely that there have been many intervening unsampled requests by the same user. Sampling each user once every few weeks would therefore be sufficient.
**Potential Impact.** We have shown that a potential performance optimization would be to offer differentiated service based on the predicted amount of slack available per connection. Deciding which connections to service is equivalent to real-time scheduling with deadlines.
By using predicted slack as a scheduling deadline, we can improve average response time in a manner similar to the earliest deadline first real-time scheduling algorithm. Connections with considerable slack can be given a lower priority without affecting end-to-end latency. However, connections with little slack should see an improvement in end-to-end latency because they are given scheduling priority. Therefore, average latency should improve. We have also shown that prior slack values are a good predictor of future slack. When new connections are received, historical values can be retrieved and used in scheduling decisions. Since calculating slack is much less complex than servicing the actual Facebook request, it should be feasible to recalculate the slack for each user approximately once per month.
## 6 Related Work
Critical path analysis is an intuitive technique for understanding the performance of systems with concurrent activity. It has been applied in a wide variety of areas such as processor design [26], distributed systems [5], and Internet and mobile applications [22, 32].
Deriving the critical path requires knowing causal dependencies between components throughout the entire end-to-end system. A model of causal dependencies can be derived from comprehensively instrumenting all middleware for communication, scheduling, and/or synchronization to record component interactions [1, 3, 9, 13, 15, 18, 22, 24, 28]. In contrast to these prior systems, The Mystery Machine is targeted at environments where adding comprehensive new instrumentation to an existing system would be too time-consuming due to heterogeneity (e.g., at Facebook, there is a great number of scheduling, communication, and synchronization schemes used during end-to-end request processing) and deployment feasibility (e.g., it is not feasible to add new instrumentation to client machines or third-party Web browser code). Instead, The Mystery Machine extracts a causal model from already-existing log messages, relying only on a minimal schema for such messages.
Sherlock [4] also uses a “big data” approach to build a causal model. However, it relies on detailed packet traces, not log messages. Packet traces would not serve our purpose: it is infeasible to collect them on user clients, and they reveal nothing about the interaction of software components that run on the same computer (e.g., JavaScript), which is a major focus of our work. Observing a packet sent between A and B inherently implies some causal relationship, while The Mystery Machine must infer such relationships by observing if the order of log messages from A and B obey a hypothesized invariant. Hence, Sherlock’s algorithm is fundamentally different: it reasons based on temporal locality and infers probabilistic relationships; in contrast, The Mystery Machine uses only message order to derive invariants (though timings are used for critical path and slack analysis).
The lprof tool [36] also analyzes log messages to reconstruct the ordering of logged events in a request. It supplements logs with static analysis to discover dependencies between log points and uses those dependencies to differentiate events among requests. Since static analysis is difficult to scale to heterogeneous production environments, The Mystery Machine used some manual modifications to map events to traces and leverages a large sample size and natural variation in ordering to infer causal dependencies between events in a request.
In other domains, hypothesizing likely invariants and eliminating those contradicted by observations has proven to be a successful technique. For instance, likely invariants have been used for fault localization [25] and diagnosing software errors [12, 21]. The Mystery Machine applies this technique to a new domain.
Many other systems have looked at the notion of critical path in Web services. WebProphet [17] infers Web object dependencies by injecting delays into the loading of Web objects to deduce the true dependencies between Web objects. The Mystery Machine instead leverages a large sample size and the natural variation of timings to infer the causal dependencies between segments. WProf [32] modifies the browser to learn browser page load dependencies. It also injects delays and uses a series of test pages to learn the dependencies and applies a critical path analysis. The Mystery Machine looks at end-to-end latency from the server to the client. It automatically deduces a dependency model by analyzing a large set of requests. Google Pagespeed Insight [14] profiles a page load and reports its best estimate of the critical path from the client’s perspective. The Mystery Machine traces a Web request from the server through the client, enabling it to deduce the end-to-end critical path.
Chen et al. [11] analyzed end-to-end latency of a search service. They also analyzed variation along the server, network, and client components. The Mystery Machine analyzes end-to-end latency using critical path analysis, which allows for attributing latency to specific components and performing slack analysis.
Many other systems have looked at automatically discovering service dependencies in distributed systems by analyzing network traffic. Orion [10] passively observes network packets and relies on discovering service dependencies by correlating spikes in network delays. The Mystery Machine uses a minimum common content tracing infrastructure finds counterexamples to disprove causal relationship dependencies. WISE [29] answers “what-if” questions in CDN configuration. It uses machine learning techniques to derive important features that affect user response time and uses correlation to de-
rive dependencies between these features. Butkiewicz et al. [8] measured which network and client features best predicted Web page load times across thousands of websites. They produced a predictive model from these features across a diverse set of Web pages. The Mystery Machine aims to characterize the end-to-end latency in a single complex Web service with a heterogeneous client base and server environment.
The technique of using logs for analysis has been applied to error diagnosis [2, 34, 33] and debugging performance issues [19, 27].
7 Conclusion
It is challenging to understand an end-to-end request in a highly-concurrent, large-scale distributed system. Analyzing performance requires a causal model of the system, which The Mystery Machine produces from observations of component logs. The Mystery Machine uses a large number of observed request traces in order to validate hypotheses about causal relationships.
Acknowledgements
We thank the anonymous reviewers and our shepherd, Willy Zwaenepoel, for comments that improved this paper. We also thank Claudiu Gheorghe, James Ide, and Okay Zed for their help and support in understanding the Facebook infrastructure. This research was partially supported by NSF awards CNS-1017148 and CNS-1421441. The views and conclusions contained in this document are those of the authors and should not be interpreted as representing NSF, Michigan, Facebook, or the U.S. government.
References
|
{"Source-Url": "https://www.usenix.org/system/files/conference/osdi14/osdi14-paper-chow.pdf", "len_cl100k_base": 10993, "olmocr-version": "0.1.50", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 48029, "total-output-tokens": 14218, "length": "2e13", "weborganizer": {"__label__adult": 0.0003352165222167969, "__label__art_design": 0.00046634674072265625, "__label__crime_law": 0.0004117488861083984, "__label__education_jobs": 0.001972198486328125, "__label__entertainment": 0.00030684471130371094, "__label__fashion_beauty": 0.00018453598022460935, "__label__finance_business": 0.0007085800170898438, "__label__food_dining": 0.0004131793975830078, "__label__games": 0.000904083251953125, "__label__hardware": 0.0021266937255859375, "__label__health": 0.0006861686706542969, "__label__history": 0.0006313323974609375, "__label__home_hobbies": 9.649991989135742e-05, "__label__industrial": 0.0004799365997314453, "__label__literature": 0.0006852149963378906, "__label__politics": 0.0003399848937988281, "__label__religion": 0.00041556358337402344, "__label__science_tech": 0.382568359375, "__label__social_life": 0.00016605854034423828, "__label__software": 0.059112548828125, "__label__software_dev": 0.5458984375, "__label__sports_fitness": 0.0002684593200683594, "__label__transportation": 0.0006427764892578125, "__label__travel": 0.0002880096435546875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 66839, 0.02021]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 66839, 0.19369]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 66839, 0.90853]], "google_gemma-3-12b-it_contains_pii": [[0, 343, false], [343, 5183, null], [5183, 10835, null], [10835, 16162, null], [16162, 20853, null], [20853, 22873, null], [22873, 27625, null], [27625, 32025, null], [32025, 35507, null], [35507, 39943, null], [39943, 42557, null], [42557, 46600, null], [46600, 50465, null], [50465, 56187, null], [56187, 61328, null], [61328, 66839, null]], "google_gemma-3-12b-it_is_public_document": [[0, 343, true], [343, 5183, null], [5183, 10835, null], [10835, 16162, null], [16162, 20853, null], [20853, 22873, null], [22873, 27625, null], [27625, 32025, null], [32025, 35507, null], [35507, 39943, null], [39943, 42557, null], [42557, 46600, null], [46600, 50465, null], [50465, 56187, null], [56187, 61328, null], [61328, 66839, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 66839, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 66839, null]], "pdf_page_numbers": [[0, 343, 1], [343, 5183, 2], [5183, 10835, 3], [10835, 16162, 4], [16162, 20853, 5], [20853, 22873, 6], [22873, 27625, 7], [27625, 32025, 8], [32025, 35507, 9], [35507, 39943, 10], [39943, 42557, 11], [42557, 46600, 12], [46600, 50465, 13], [50465, 56187, 14], [56187, 61328, 15], [61328, 66839, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 66839, 0.0266]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
484e1dd6d17b0b72eb669216b71d343926e2f269
|
Towards Analyzing Contributions from Software Repositories to Optimize Issue Assignment
Vasileios Matsoukas, Themistoklis Diamantopoulos, Michail D. Papamichail and Andreas L. Symeonidis
Electrical and Computer Engineering Dept., Aristotle University of Thessaloniki
Thessaloniki, Greece
vmatsouk@ece.auth.gr, thdiaman@issel.ee.auth.gr, mpapamic@issel.ee.auth.gr, asymeon@eng.auth.gr
Abstract—Most software teams nowadays host their projects online and monitor software development in the form of issues/tasks. This process entails communicating through comments and reporting progress through commits and closing issues. In this context, assigning new issues, tasks or bugs to the most suitable contributor largely improves efficiency. Thus, several automated issue assignment approaches have been proposed, which however have major limitations. Most systems focus only on assigning bugs using textual data, are limited to projects explicitly using bug tracking systems, and may require manually tuning parameters per project. In this work, we build an automated issue assignment system for GitHub, taking into account the commits and issues of the repository under analysis. Our system aggregates feature probabilities using a neural network that adapts to each project, thus not requiring manual parameter tuning. Upon evaluating our methodology, we conclude that it can be efficient for automated issue assignment.
Index Terms—automated issue assignment, GitHub issues, issue triaging
I. INTRODUCTION
Software teams nowadays host their code in online repositories and collaborate using issue tracking systems in order to implement features, fix bugs, plan releases, etc. This new paradigm has brought forth an abundance of data that contain valuable information about software projects, including not only source code (commits), but also information concerning project planning and monitoring (issues/bugs), and developer productivity. We argue that these data can be mined for improving the software development process.
Apart from documenting and orchestrating the resolution of bugs, the issues tracked by online repositories may signify and prioritize feature requests, keep track of development tasks or even organize release cycles. When a new issue is created in such as system, a member of the team (known also as triager) assigns it to a team member (or generally a contributor of the project). The process of optimally assigning tasks to team members is far from trivial, as it requires good knowledge of the project as well as the team, taking into account the past experience and the current status of each team member individually. As a result, task assignment may be time-consuming and even hard, considering that the triager may not have clear knowledge of every detail of the project, and, even when he/she does, he/she may still have to spend considerable time and effort to do the optimal assignment.
To solve this problem, several approaches for automating issue assignment have been proposed. Most of these approaches use data from bug tracking systems, such as Bugzilla1, and employ various techniques to recommend the assignment of new issues/bugs according to past assignment data [1]–[6]. Other systems focus on building contributor profiles based on existing contributions and subsequently using them to find the most suitable engineer for the new task at hand [7]–[10]. The data and the techniques used vary in each case; some are based on text data and machine learning models [1], [2], while others may also incorporate information from source code [7]–[9], or use more sophisticated algorithms including semi-supervised models [3] or even deep learning [5], [6].
Although the aforementioned solutions are effective under certain scenarios, they also have important limitations. First of all, these approaches focus mainly on bugs, therefore they have not been evaluated on the broader scope of task/feature assignments. Secondly, they are largely based on properly using a dedicated bug tracking system, and as a result they may not be applicable in projects where the software team does not follow a strict process. In the same context, these approaches are often applied on a per-project basis and thus they may require manual parameter tuning. Finally, certain contemporary approaches consider only information related to bug tracking, without taking into account the EngOps/DevOps aspects of software development (e.g. commits, mentions) that could be useful for task assignment.
In this paper, we build a task assignment recommender that confronts the above limitations. Our system employs a dataset extracted from GitHub2, including issues from a diverse set of repositories with varying number of contributors. Using GitHub issues favors the applicability of our system in cases where a more strict dedicated bug tracking system is not available. Apart from issues and issue comments, our system further takes into account the commits of each project and employs a model that aggregates all possible information to provide an assignment probability for each contributor of the project. Our model, which is based on a support vector machines classifier and a neural network, is fully adaptable to the project under analysis, producing efficient recommendations without manual parameter tuning.
The rest of this paper is organized as follows. Section II reviews the related work in the area of automated task
1https://www.bugzilla.org/
2https://github.com/
assignment and further probes on the limitations of existing approaches. Our methodology for building a new task assignment recommender is presented in Section III. Section IV evaluates our approach and illustrates its application on a software project. Any threats to the validity are discussed in Section V. Finally, Section VI concludes this work and provides interesting insight for future research.
II. RELATED WORK
As already noted, the continuously increasing need for building better software, while remaining on-schedule and on-budget, has driven the research community towards constructing methodologies able to optimize the software development process. In this context and given the constant turn towards collaborative software development, several research efforts are directed towards harnessing contributions’ information for optimizing the software development process. These research efforts target various different outcomes, such as determining the workload required to resolve bugs [11], identifying development characteristics for optimizing team building [13] or even performing more effective prioritization of requirements [14].
Among the aforementioned directions and given its vital importance towards effective project management, the challenge of determining the most appropriate developer to undertake a certain task (which is also widely known as “issue, task or bug triaging”) has drawn a lot of attention. To that end, the majority of the proposed methodologies employs data originating from bug tracking systems in order to train classification models capable of identifying the developer that best fits to a certain task based on historical data regarding previous assignments.
One of the first approaches towards automated bug triaging is that of Murphy and Cubranic [1], who employ data from bug reports that originate from the development of Eclipse project. According to their methodology, each bug report corresponds to a certain document containing a specific vocabulary and is assigned to a certain class of the problem, which refers to the corresponding developer. Using Naive Bayes for training their classifier based on the historical data, the achieved accuracy is around 30%.
Building on top of the aforementioned approach, Anvik et al. [2] identify the need to incorporate additional information (such as the current workload or the vacation schedule) into the bug assignment procedure in order to strengthen the effectiveness of the constructed models. Upon selecting Support Vector Machines for training their classifiers, they were able to improve the accuracy of the bug assignments. However, their approach does not result in hard assignments, but acts as a semi-automated assignment recommender providing a list with the top relevant developers where the triager is responsible to make the final assignments. In addition, given that information regarding labels and assignees is not always available for historical data, which results in significant information loss, Xuan et al. [3] employ semi-supervised learning using Expectation Maximization in order to incorporate into their dataset bug reports without labels/assignment information. Additional approaches that employ machine learning for automated bug assignments involve multi-label classification [4] or even deep learning using Recurrent and Convolutional Neural Networks [5], [6].
Other than focusing on machine learning, there are also approaches that perform assignments based on information regarding the individual skills and the expertise of developers, as identified by their contributions to the source code itself. Such approaches analyze code modifications and bug occurrences using natural language processing techniques in order to extract the expertise of the candidate developers as reflected in the dominant concepts of the code they modify and thus match them with the ones of the upcoming task [7]–[9]. Finally, there are also approaches that employ the DevOps principles and make use of the roles of individual developers as a contributing factor in order to assign to them the available tasks [10].
Another interesting direction in the area of automated bug triaging involves the analysis of the cases when a certain bug is re-assigned to a different developer due to the fact that it was not resolved in the first place, a procedure known as “bug tossing”. To that end, there are approaches that harness information originating from tossing graphs and employ Markov Chain Models in order to recommend the most suitable developer using goal-oriented paths [15]–[17].
The approaches discussed in the previous paragraphs are effective in certain use cases, however they also exhibit several inherent limitations. Most of the approaches analyzed depend on bug tracking systems and focus mainly on assigning bugs based on textual information from past assignments [1], [2]. As a result, their use on broader scenarios, including e.g. the use of different issue tracking systems or the existence of issues beyond the narrow scope of bugs, is limited. Furthermore, certain systems may require manual parameter tuning to be applied on different projects, further impairing their applicability, while others do not take into account the commits of the project [4]–[6]. Finally, topic modeling approaches, which focus mainly on the problem of developer profiling [7]–[9], and bug tossing approaches [15]–[17], though interesting, deviate from the scope of this work.
In this work we propose a system that is applicable on the broader task/issue assignment scenario. Our system extracts information from GitHub commits, issues, and issue comments, and processes it to generate a set of features that are aggregated and modeled to provide efficient task/issue assignments. As the data may be sparse (and there may also exist missing data), we demonstrate how useful recommendations can be provided even when a software team employs a less rigorous issue tracking system, like GitHub. Furthermore, our system is applicable on a large set of diverse projects, as it exhibits automated parameter tuning to adapt to the specifics of each software project.
III. METHODOLOGY
In this section we present our methodology for automated issue assignment.
A. Overview
The architecture of our system is shown in Figure 1. The main functionality is summarized in four steps, which involve data extraction, data preprocessing, feature extraction, and scoring/recommendation.
The data used for our analysis are extracted from a database of contribution data [18], which includes the individual contributions of more than 60,000 contributors involved in 3,000 repositories of GitHub. The data reside in a MongoDB instance and are organized in collections, which include information on issues, issue comments, commits, repository statistics, developer activity metrics, etc. Indicatively, the database contains more than 800,000 issues, 1,800,000 comments and 3,900,000 commits.
Our choice of this dataset is supported by the fact that it is diverse and covers a wide range of scenarios. As the dataset relies on GitHub, we focus on issues that are raised from contributors in software teams (i.e., used to track features or bugs) as well as external contributors (e.g., developers using an API). Furthermore, the repositories of the dataset rank among the most popular ones, as indicated by their star count, and therefore bear a high level of acceptance as well as respect the principles of modern software development. As they also vary in size and complexity, they cover a wide range of scenarios, allowing our approach to be rather domain-agnostic.
As a first step, we create a set of filtering rules according to which we determine the repositories and developers contributing to our analysis. In addition, we apply data labeling as well as text preprocessing techniques to remove unwanted textual content. Having constructed a clean and reliable dataset, we move on to build features in order to model the contributions of developers. The features employed are issue titles, bodies, labels as well as developers’ comments and commits. Finally, we combine those features using two models, a simple average model and a neural network model. Thus, our methodology can be applied on repositories to determine the suitability of each contributor in terms of undertaking a certain issue. The components outlined in Figure 1 are presented in detail in the following subsections.
B. Data Selection
The first step entails the selection of appropriate repositories in order to build the ground truth for the forthcoming analysis. The filters applied to create our dataset, as well as the labeling method, are discussed in this subsection.
At first, upon investigation, we chose to filter out repositories with less than 50 and more than 5000 issues. The lower threshold ensures that the data are enough for effectively applying data mining techniques, while the upper one is chosen to cut out immense repositories. Moreover, we dismiss repositories with only 1 contributor, since in this case issue assignment is trivial. We further refine our selection by demanding the number of issues to be at least 10 times larger than the contributors of the repository. This choice guarantees a representative number of issues to conduct our analysis. Finally, we require that developers have a minimum support of 15 previously closed issues, so that there is a sufficient background to model their contribution.
The next step of dataset construction involves selecting the class attribute of an issue, which essentially means that we try to identify the developer who actually resolved it. Here, there are two fields that we can make use of, namely the ‘assignee’ and ‘closed_by’ fields. Consequently, we have to consider which field is superior and what happens in case of missing values. The answer to the first question lies on the working principles of GitHub. The ‘assignee’ field is set by the author of the issue, implying that he/she considers a contributor as the most suitable to undertake that issue. Therefore, this field can be considered a reliable source for labeling an issue. On the other side, the ‘closed_by’ field indicates which contributor has closed the issue. However, given the specifics of the development process followed in each project, the person who closed the issue is not necessarily the one who really worked for its resolution (i.e., the team
Fig. 1. Overview of the System Architecture
leader only closes issues given that closing an issue refers to the acceptance of the respective functionality), thus we should be cautious when using it as ground truth for labeling the issue. The second challenge concerns missing values on those fields. It is possible that the author omits to set an ‘assignee’, or that an already closed issue lacks the name of the developer who closed it. What we propose as a labeling method is based on a three tier decision mechanism:
- If the ‘assignee’ field bears a valid username, then it is chosen as class attribute, as we deem such an explicit choice as highly reliable.
- If the former does not happen, then we examine the ‘closed_by’ field. We consider this field reliable, when the respective contributor does not focus on the operations part of DevOps [13]. Operations’ engineers might confirm the delivery of tasks, and thus close issues without really being involved in their resolution. To find out whether this may be the case, we check if the commits made by the contributor at hand surpass the issues closed by him/her. If the commits are more than the closed issues, then we deem the contributor as dev-oriented and thus use the ‘closed_by’ field as a class attribute. Otherwise, the label is considered invalid and the issue is discarded.
- If both fields are empty, then the issue is discarded from the analysis.
C. Data Preprocessing
The main components of an issue are its title and body. Those components are the two main features used for assessing developers’ involvement. Before, however, applying any text mining techniques, it is important to preprocess the data in order to filter out any noise and extract any useful information. To do so, we employ the NLTK [19] library and perform a series of preprocessing steps. At first, we remove any html tags and perform tokenization to remove any punctuation. After that, we discard numbers, single characters as well as stopwords found in the English stopwords list of NLTK. Finally, we convert all terms to lowercase and perform lemmatization. An example result of applying our text preprocessing mechanism is shown in Table I. It is evident, that after the preprocessing stage, the text content emphasizes better on the issues’ key terms.
| TABLE I
TEXT PREPROCESSING EXAMPLE |
<table>
<thead>
<tr>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>Input</td>
</tr>
<tr>
<td>Posted this <a href=<a href="https://github.com/jediwhale/fistsharp/issues/79">https://github.com/jediwhale/fistsharp/issues/79</a> > issue </a> on the fistsharp github site. If the output table of a fixture contains more columns than the input, Fitnesse throws an exception when the number of rows in the two tables is equal.</td>
</tr>
</tbody>
</table>
D. Feature Extraction and Scoring
In order to measure a contributor’s suitability for undertaking an issue, we evaluate his/her contribution based on a set of extracted features. The purpose is to examine how closely is a contributor connected to a new issue from a semantic point of view. In our work, these features can be divided in two logical categories. The first refers to issue-oriented features and comprises components extracted from issue reports. This way we evaluate how a new issue relates to the issues that the contributor resolves. The issue-oriented features employed are the title, the body, and the labels of the issue. The second category focuses on contributor-oriented features that are derived from tracking developer activities. In this aspect, we attempt to discover the contributor’s interest to certain types of issues. The contributor-oriented features consist of the contributor’s comments and commits. The rest of this subsection describes how these features are extracted and used in ranking developers.
1) Issue Text: Issue text components, namely title and body, provide meaningful insight regarding the issues’ nature as well as to the technical field they belong to. For example, an issue about GUI, screens, aesthetics etc. is probably well-suited to a front-end developer, whereas an issue about databases, SQL, nodes would better match a back-end developer. The purpose behind using issue text features is to quantify the connection between the problem description and the contributors’ technical area.
The quantification requires that the text data are transformed to a representation that allows similarity comparisons. To do so, we employ a Vector Space Model, where each term is a dimension of the model and each document (title or body) is a vector of the model. We create two models, one for the issue titles and one for the issue bodies. The weight of each term in a vector is determined using term frequency-inverse document frequency (tf-idf). Thus, for our collection of documents $D$, the weight of a term $t$ in a document $d$ is computed as follows:
$$tfidf(t, d, D) = tf(t, d) \cdot idf(t, D)$$
where $tf(t, d)$ is the term frequency (tf) of term $t$ in document $d$, while $idf(t, D)$ is the inverse document frequency (idf) of term $t$ in the collection of documents $D$. The term frequency is computed as the square root of the number of times the term appears in the document, while the inverse document frequency is used to model how common are the terms in all the documents of the collection. The inverse document frequency of a term $t$ in a document collection $D$ is computed as follows:
$$idf(t, D) = 1 + \log \frac{1 + |D|}{1 + |D_t|}$$
where $|D_t|$ is the number of documents containing the term $t$. Using this factor, very common terms, which may not be useful for training our model, are penalized.
The vectorized representation of issue titles and bodies is used to train a classifier, which will then be able to categorize new issues to a contributor of the repository. For this
multiclass classification problem, we used Support Vector Machines (SVM), and specifically the ‘One-Vs-Rest’ approach. Concerning the parameters of the SVM, since repositories may have quite diverse data, a set of parameters that may perform well in one repository may underperform in another. As a result, the parameters are selected for each repository individually using a parameter grid. The grid adds flexibility as to the choice of the kernel, the choice of the regularization constant C, the gamma factor and the degree of the algorithm (not all parameters apply to all kernels). The grid values of the parameters are shown in Table II, where the value ‘scale’ for gamma is 1 divided by the product of the number of features with the number of data samples.
### Table II
<table>
<thead>
<tr>
<th>Kernel</th>
<th>C</th>
<th>Gamma</th>
<th>Degree</th>
</tr>
</thead>
<tbody>
<tr>
<td>Linear</td>
<td>[0.1, 1, 10, 100]</td>
<td>–</td>
<td>–</td>
</tr>
<tr>
<td>Polynomial</td>
<td>[0.1, 1, 10, 100]</td>
<td>–</td>
<td>[2, 3, 4]</td>
</tr>
<tr>
<td>RBF</td>
<td>[0.1, 1, 10, 100]</td>
<td>['scale', 0.001, 0.0001]</td>
<td>-</td>
</tr>
</tbody>
</table>
The optimal configuration is chosen through grid search with 5-fold cross validation, optimizing the F-measure, computed for all issues. Upon training the SVM model, we compute the assignment probability of the issue under analysis to each contributor in the repository. This probability is calculated as the distance from the separating hyperplane and forms the contributor’s score in title and body features.
2) Issue Labels: Attaching labels to issues implies the author’s intention to classify the issue in a specific category. This action can be mined to investigate whether there is a pattern connecting the labels with the issues that a developer resolves.
For each repository under analysis, we initially extract all its labels. For each label, we examine the training issues and keep track of the contributor and the number of issues closed with that label. We apply this procedure for every label, and create a global index for the repository. When a new issue arises, the contributor receives a score which is the relative frequency of his/her involvement in that label. In case an issue has multiple labels, we first sum the frequencies of the label. In case an issue has multiple labels, we first sum the frequencies of the label. In case an issue has multiple labels, we first sum the frequencies of the label. In case an issue has multiple labels, we first sum the frequencies of the label.
$$\text{cosssim}(d_1, d_2) = \frac{d_1 \cdot d_2}{|d_1| \cdot |d_2|} = \frac{\sum_{i=1}^{N} w_{t_i,d_1} \cdot w_{t_i,d_2}}{\sum_{i=1}^{N} w_{t_i,d_1}^2 + \sum_{i=1}^{N} w_{t_i,d_2}^2}$$
where $w_{t_i,d}$ is the tf-idf score of term $t_i$ in document $d$ (computed using (1)) and $N$ is the total number of terms. Finally, upon computing the similarities/values of all past issue titles with the title of the new issue, we compute their mean value. The mean is applied to issues with 10% larger value than the smallest observed, so that it is not distorted by the small or zero cosine similarity values. This value represents the appearance of the new issue. We impose this limit to mitigate the possibility of disorienting the model with developers that may have retired or change specialization field. Then, we employ the tf-idf model built for titles in subsection III-D1 to calculate the cosine similarity of the titles of the commented issues with the title of the new issue. The cosine similarity is defined for two issue titles/documents $d_1$ and $d_2$ as:
3) Issue Comments: GitHub allows users to initiate a discussion on an issue by adding comments to it. A contributor’s participation to the issue commenting thread typically implies his/her contribution to its resolution. It is also frequently the case that the resolver of the issue belongs to the commenters list. Given this observation, the contributors’ comments can be a useful feature for issue assignment. However, when a new issue arises, the only known fields are its title, body and labels. Consequently, we can only make use of contributors’ past commented issues and attempt to find out their connection with the new issue. Therefore, to evaluate the significance of issue comments in our methodology, we attempt to measure the connection between the past issue titles that developers comment on and the title of the new issue.
For each contributor, we retrieve the issues on which the contributor has commented in a 365-day time window since the appearance of the new issue. We impose this limit to mitigate the possibility of disorienting the model with developers that may have retired or change specialization field. Then, we employ the tf-idf model built for titles in subsection III-D1 to calculate the cosine similarity of the titles of the commented issues with the title of the new issue.
time window, thus receiving a zero score. Contributor ‘audris’ has commented on only 1 issue with a low but non zero similarity score. ‘Zillode’ has commented in 3 issues, getting a higher mean similarity score, as the titles of the relevant commented issues resemble closely the testing issue title.
4) Contributor Commits: Commits may prove quite useful for predicting the assignment of issues, since the aim of commits by contributors is issue resolution. Similarly to comments, the process for the commits takes into account the past commits of the contributor and compares them with new issues.
Thus, initially, for each contributor we retrieve the most recent commits. Once again, the retrieval concerns commits submitted in a 365-day window until the new issue, to capture the activity of contributors up to the latest time. After that, we compute the cosine similarity between the commit message of those commits and the title of the new issue. We use a vector space model similar to that of subsection III-D1, and, instead of the average, we take into account the maximum value. We use the maximum since when a contributor has even one commit message similar to an issue comment, he/she probably has worked in a relevant part of the source code. So, the fact that the contributor may have worked also in other parts should not influence the assignment.
An example is given in the Table V, where an issue retrieved from the repository “syncthing_syncthing-android”, titled (after preprocessing) “restart config updated fix Zillode” is assigned to contributor ‘Zillode’.
For each contributor, we extract their commits in the relevant time window. In Table V, for the contributors that have many commits, only the 3 most relevant are shown (in terms of maximum similarity score). As one may notice, ‘catfriend’ has no commits in the past time window, thus getting a zero score. Moreover, ‘audris’ and ‘wweich’ only have 1 and 2 commits respectively, which are weakly related with the issue in question. ‘Zillode’, on the other hand, has multiple commits, one of which with a commit message strongly related to the title of the issue in question, therefore getting the highest score in the commits section.
### Table IV: Example of Contributors with Issue Comments
<table>
<thead>
<tr>
<th>Issue ID</th>
<th>Title</th>
<th>Score</th>
</tr>
</thead>
<tbody>
<tr>
<td>audris</td>
<td>syncthing work sd card</td>
<td>0.0913</td>
</tr>
<tr>
<td>87</td>
<td>syncthing work sd card</td>
<td>0.0913</td>
</tr>
<tr>
<td>catfriend</td>
<td>syncthing binary crashed error code</td>
<td>0.0000</td>
</tr>
<tr>
<td>Zillode</td>
<td>syncthing binary crashed error code</td>
<td>0.4709</td>
</tr>
<tr>
<td>164</td>
<td>use different default port syncthing gui</td>
<td>1.0000</td>
</tr>
<tr>
<td>288</td>
<td>syncthing binary crashed error code</td>
<td>0.3165</td>
</tr>
<tr>
<td>wweich</td>
<td>syncthing binary crashed error code</td>
<td>0.0000</td>
</tr>
<tr>
<td></td>
<td>syncthing work sd card</td>
<td>-</td>
</tr>
</tbody>
</table>
### Table V: Example of Contributors with Commits
<table>
<thead>
<tr>
<th>Commit ID</th>
<th>Message</th>
<th>Score</th>
</tr>
</thead>
<tbody>
<tr>
<td>audris</td>
<td>103 Support new UR value (fixes #471)</td>
<td>0.0830</td>
</tr>
<tr>
<td>catfriend</td>
<td>210 Updated Phone, 7 inch tablet and Android TV</td>
<td>0.3135</td>
</tr>
<tr>
<td></td>
<td>screen shots (ref #422)</td>
<td></td>
</tr>
<tr>
<td>wweich</td>
<td>206 Update phone screenshots + new 7 inch tablet</td>
<td>0.2168</td>
</tr>
<tr>
<td></td>
<td>screen shots (ref #422)</td>
<td></td>
</tr>
</tbody>
</table>
E. Recommending Issue Assignments
As already mentioned, the objective of our system is to recommend issue assignments. The final assignment recommendation is based on all five features discussed in the previous section, namely those based on the title, body, labels, comments and commits. Taking into account all available data, we initially create an aggregation approach based on averaging all scores for every contributor. In this case, the recommended assignee is the one who has the highest score.
Up to this point, we have built a model that considers versatile sources of information to perform issue assignment. However, it is not guaranteed that averaging over all five features can lead to the best possible model. In fact, each team may embrace a different way of building software, thus leading to a unique feature combination/weighting. Therefore, we propose a scheme that can cope with the challenges of various repositories. In practice, we attempt to build a model capable of adjusting the importance of those features according to each repository. We refrain from rule-based techniques, as these may result in certain thresholds that are not always adaptable and thus do not conform to different repositories. Instead, we build an adaptable model that provides output that is specific to the characteristics of each repository.
We employ a multilayer perceptron with one hidden layer. Our neural network receives as input the contributor’s scores and calculates their influence on the output, which is a score in the range [0, 1]. Designing an efficient neural network is a challenging task. The main obstacle to overcome is the ability of the network to generalize efficiently not only in one, but in a number of repositories. To address this challenge, in our work, we propose a reconfigurable network architecture, where a set of the network parameters is dynamically adjusted to meet the needs of each repository. We try to keep the variable set as small as possible, to reduce computational complexity. Therefore the network consists of some constant and some variable/configurable parameters, which are shown in Tables VI and VII, respectively (parameters refer to the Keras API).
https://github.com/fchollet/keras
Upon experimentation, the constant parameters of Table VI have been proven to have stable performance among repositories. In contrast, we detected that three parameters, namely the L2 regularization, the batch size, and train epochs significantly affect the network performance. Specifically, the L2 regularization imposes a penalty to network weights, therefore playing an important role to the generalization ability. The batch size determines how many samples are received by the network before the update of its weights. We found that some repositories are favored by a slower rate, while others require a faster one. Last but not least, the train epochs are also a configurable parameter. However, this parameter is automatically tuned by an early stopping mechanism which monitors the validation loss function. If there is no decrease at least by 0.01 for two consecutive epochs, then the network terminates the training process. The early stopping technique helps avoid overfitting on the training data, while allowing an extra configurable parameter without computational load. All in all, the design choices of the configurable parameters are shown in Table VII. The optimal configuration is chosen through exhaustive grid search with a 5-fold cross validation technique, which is commonly used for assessing bug triaging systems [9], [10]. In practice, the testing set is split in ordered folds of the same size, and the system makes predictions on the issues of the current fold, considering all the previous issues. In the next round, the current fold is incorporated in the training set, and we make predictions for the next fold etc. In this way, the system is able to continuously update its knowledge on recent issues. For the purposes of incremental learning, the issues are sorted in chronological order based on their ‘created-at’ attribute. In our work, we begin with 70% of the issues as training set, while the test fold length is set to 5%. Consequently, the system goes through 6 rounds of training and after each round, the training set is extended by the testing fold. Finally, we impose the restriction that the training set has at least one sample from each class in the initial round, so that the prediction is meaningful. If this requirement is not met, the system skips this training round and moves on to the next fold.
As issue assignment is in fact a multiclass classification problem, the number of contributors in a repository is a crucial factor to our model’s performance. It is generally expected that larger teams will impose a higher level of difficulty to our problem compared to smaller ones. In Figure 2, we present the distribution of contributors in the analyzed repositories. As it is observed, the repositories ranging from 2 to 5 contributors make up the 53% of total repositories. For 6 to 15 contributors, we also gather an acceptable number of repositories, although the samples are more limited as it gets harder for such a number of contributors to meet our filtering criteria.

As a result, we focus our evaluation on three axes. First, we study the accuracy of our methods for repositories with different numbers of contributors. Next, we evaluate our approach against the different baselines outlined above, using the main classification metrics (precision, recall, accuracy, and
F-measure). Finally, we present an example analysis on a repository to better illustrate the effectiveness of our approach in a qualitative manner.
### A. Contributor Analysis
The first part of our evaluation focuses on the impact of the different number of contributors on the performance of our models. Specifically, Figure 3 captures the mean accuracy achieved by the various configurations, depending on the number of contributors in the repository. As one may notice, using only title and body features, we achieve relatively poor predictions in the range of 2 to 4 contributors. By gradually incorporating all the features, the model exhibits a boost in performance achieving accuracy values close to 0.8 and 0.7, for 2 and 3/4 contributors respectively. The application of the neural network, with the optimal feature scheme, further increases the effectiveness of the system. In the range from 5 to 10 developers, we observe that the combination of all features maintains a stable accuracy response close to 0.55, while the optimized model achieves slightly better performance.
Finally, in the range from 11 to 15 developers, the performance begins to show a reasonable descending trend as the model has to predict many classes. Some outbreaks are observed in this zone, as a result of the few repository samples that make this analysis sensitive to outliers. By the term “outlier”, we refer to repositories that exhibit significant differences in the way they utilize issues information against the expected behavior. E.g. a repository where contributors use comments instead of assignees to perform assignments exhibits a falsified behavior, which is reflected in the analyzed features and thus is expected to achieve poor performance. Although using only title and body leads to low accuracy scores, the full-feature and optimized models maintain a decent performance in that range. And of course, even with a baseline combination of features, the results surpass those of a random classifier for the corresponding number of developers. For instance, a random-based model would yield assignment accuracy equal to 0.2 for a team of 5 contributors, whereas our model is on average almost three times more accurate.
### B. Classification Evaluation
In the second part of our evaluation, we assess the different configurations in terms of accuracy, precision, recall and F-measure. Figure 4 depicts the performance of each configuration on the aforementioned classification metrics, averaged over all repositories. At first glance, the results confirm that incorporating features from different sources leads to a more robust model, and that the optimal feature scheme further boosts the performance of our system.


In the context of the task assignment challenge, accuracy can be seen as the global percentage of successful assignments, and precision as the percentage of the suggested developers who actually worked on the issue. In specific, these two metrics appear to follow the same trend, with our best configuration achieving a value of 0.6. However, the most remarkable aspect of our system is its performance regarding the recall metric. In practice, our system on average achieves recall values close to 0.8. Considering that recall is perceived as the percentage of developers who worked on the issue and were actually recommended, our system can become a valuable guide in recommending issue assignments. Last but not least, the F-measure, which is the harmonic mean of precision and recall, further confirms our assumptions about the effectiveness of the various configurations.
### C. Repository Analysis
To conclude our evaluation, we conduct a qualitative analysis to confirm that our system can prove useful in practice. For the purpose of this case study, we have chosen the repository ‘airlift/airlift’ as it exhibits typical project characteristics. The project initiated in 2010, has more than 700 (both open and closed) issues by the time the database was updated, and has a core of 5 major contributors that satisfy our filtering criteria. In fact, the repository has a total of 37 contributors, but the...
Fig. 5. Repository analysis, which includes (a) the number of issues assigned and predicted per contributor, (b) the confusion matrix of our model, and (c) the distribution of issues (assigned and predicted) on all contributors of the repository.
corruption of other than those 5 members is negligible, and thus reasonably set aside by our model.
As already mentioned, our model tests on the latest 30% of total issues. This corresponds to approximately 210 test issues in our case, which eventually fall down to 174 after eliminating open and untracked issues. The number of issues assigned and predicted per contributor is shown in Figure 5a. At first glance, we may note that our system manages to efficiently capture the workload capacity among the team members. Specifically, the system maintains the balance, assigning more issues to prominent members, while not abusing those with less regular activity. The equivalent confusion matrix for those assignments is given in Figure 5b. As shown in that matrix, our model correctly recommends most of the assignments as the majority of observations fall in the main diagonal. There are also some misclassified issues, which however should not necessarily be treated as unreasonable assignments. In fact, although several issues that should be assigned to contributors 2, 4, or 5 are assigned to contributor 3 by our model, these issues could be resolved by either of these contributors in terms of knowledge and skills.
Figure 5c depicts the distribution of issues (assigned and predicted) to all contributors, in the timeline of the project. It is evident that contributor 3 is the most involved contributor, and undertakes the majority of issues. Clearly, our model detects the skills of this contributor and effectively shifts the balance towards him/her. At the same time, the model neither neglects nor overloads contributors 2, 4 and 5, which have less, yet regular contribution. Therefore, our model exhibits its capability to cope with imbalanced classes. This is further confirmed by inspecting the first third of the timeline where the assignments are highly accurate, despite some misclassifications during the later progress of the project. As already argued, however, these are issues that could have equally been resolved by either of the developers. Lastly, contributor 1 is neither assigned nor recommended any issues throughout that period, which verifies that the model has acted correctly in that case as well.
On top of the above analysis and in an effort to assess whether the assignment results are reasonable with respect to the expertise of each contributor, we analyze information from individual contributions in order to extract the primary directions each member of the software team. In this context, we apply tf-idf in order to identify the primary directions of each contributor based on the issues he/she is involved into. This identification includes the creation of a corpus that contains all concepts included in the project as keywords. We perform the same analysis for each contributor using the title and the body of all issues he/she is involved into and we calculate the frequencies of the co-occurring concepts. Table VIII presents the results for all five contributors of the project.
analyzed in this subsection, including the top 5 keywords for each contributor.
<table>
<thead>
<tr>
<th>Contributor</th>
<th>Dominant Keywords</th>
</tr>
</thead>
<tbody>
<tr>
<td>Contributor 1</td>
<td>—</td>
</tr>
<tr>
<td>Contributor 2</td>
<td>jetty, server, add, thread, request</td>
</tr>
<tr>
<td>Contributor 3</td>
<td>test, error, fix, update, java</td>
</tr>
<tr>
<td>Contributor 4</td>
<td>add, http, request, jvm, java</td>
</tr>
<tr>
<td>Contributor 5</td>
<td>log, http, support, client, add</td>
</tr>
</tbody>
</table>
Given the results of this table, contributors 2, 4, and 5 appear to primarily undertake the implementation of new features, as reflected by the dominant keywords of their contributions. Upon manually examining the commits of the respective repository, we found out that the majority of their contributions involves additions of new source code rather than modifications and deletions (the percentage of additions is around 70% of the contributions). On the other hand, contributor 3 appears to be responsible for testing, as the majority of his/her contributions refer to fixing errors that occur upon testing (e.g. the titles of the issues assigned to this contributor include “fix launcher for Python < 2.7”, “Fix sporadic failure in testHttpRequestEvent”, etc.). As before, this is also reflected in the difference between the added and removed lines of code. For this contributor, the deleted lines of code are twice as many as the added ones (32,093 versus 67,199 at the time of writing). All in all, the aforementioned patterns are also reflected in the assignments recommended from our strategy and thus verify that our models are able to capture the contribution characteristics of the analyzed projects.
V. Threats to Validity
In this section, we discuss any threats to the validity of our approach as well as the steps taken to mitigate them. At first, concerning our selection of repositories, this was based on their popularity as defined by the number of Github stars and the number of contributors, which may not be representative for projects that follow specialized development principles. However, given that these repositories cover a diverse set of different scopes, we may safely assume that our methodology performs efficiently on the majority of software development projects, from a domain-agnostic point of view. Should one require the analysis of more domain-specific repositories, the training set should be modified appropriately to incorporate the custom features of such projects.
Concerning missing data, we may note that the quality of information provided is a factor that affects the performance of our issue assignment recommender. In specific, there are cases where the issue title and body fields are omitted, or the issues are left unlabeled or even mislabeled. Additionally, the number of commits and comments may be comparatively small to make reliable assignments. However, this is a challenge faced by several approaches in this field, as issue management systems may sometimes include incomplete or missing information on projects. To mitigate this problem, our approach includes a preprocessing/filtering step in order to use high quality data for the construction of the models.
Finally, comparison with existing approaches was not directly feasible, given that most approaches use different datasets (and not GitHub) and thus they are optimized under a different scope. Traditional issue management systems are typically used by software teams for specific purposes (e.g. to drive development or track bugs), while GitHub issues are somewhat generic, as they may be used not only within a software team for development, but also as a communication channel with users/developers external to the team. We plan, however, as future work to assess the applicability of our methodology on other issue management systems, and evaluate it with respect to the current state-of-the-practice.
VI. Conclusion
As more and more software teams employ issue tracking systems to keep track of the progress of their projects, automated issue assignment has grown to be an important research area for software development. In this work, we have designed a recommendation system that can effectively assign GitHub issues to different contributors in a repository. Our system takes into account repository data offered by GitHub, i.e. commits, issues, and issue comments, and builds a feature set that can be used to produce probabilities for different assignments of an issue. Another major point of our system is the fact that it employs an updatable model that can adapt its parameters to the specifics of different projects/repositories.
Our evaluation indicates that our system is indeed quite robust, producing better results than the baselines in diverse repositories with different number of contributors, and as a whole. Furthermore, upon illustrating the application of our system on a sample repository, we may conclude that it produces reasonable recommendations, and thus can be used effectively for automated issue assignment.
Future work lies in several directions. At first, we could extract more types of data, including e.g. the source code of the commits for each contributor, the workload of the team with respect to the issue, etc. Another idea would be to further investigate the histories of issues to identify patterns (e.g. multiple re-assignments) that affect the performance of our model and/or build a reliability index in order to produce more accurate assignments. Furthermore, we could further investigate our choice of models, especially by employing text embeddings to improve the text mining component of our methodology. An interesting research direction would also be to apply our methodology on other issue tracking systems, beyond GitHub, and assess its performance on these systems. Finally, an interesting idea for future research would be to conduct a survey in order to assess how our system can be used in different scenarios.
REFERENCES
|
{"Source-Url": "https://issel.ee.auth.gr/wp-content/uploads/2020/07/QRS2020IssueAssignment.pdf", "len_cl100k_base": 10123, "olmocr-version": "0.1.53", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 35966, "total-output-tokens": 11727, "length": "2e13", "weborganizer": {"__label__adult": 0.0002932548522949219, "__label__art_design": 0.0002512931823730469, "__label__crime_law": 0.0002460479736328125, "__label__education_jobs": 0.0012292861938476562, "__label__entertainment": 4.1961669921875e-05, "__label__fashion_beauty": 0.00013339519500732422, "__label__finance_business": 0.00018310546875, "__label__food_dining": 0.00023543834686279297, "__label__games": 0.00046181678771972656, "__label__hardware": 0.000476837158203125, "__label__health": 0.00030159950256347656, "__label__history": 0.00013720989227294922, "__label__home_hobbies": 6.854534149169922e-05, "__label__industrial": 0.00020873546600341797, "__label__literature": 0.00016188621520996094, "__label__politics": 0.0001596212387084961, "__label__religion": 0.0002815723419189453, "__label__science_tech": 0.004558563232421875, "__label__social_life": 8.475780487060547e-05, "__label__software": 0.005863189697265625, "__label__software_dev": 0.98388671875, "__label__sports_fitness": 0.0002053976058959961, "__label__transportation": 0.0002834796905517578, "__label__travel": 0.00015115737915039062}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 53323, 0.02432]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 53323, 0.17154]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 53323, 0.92535]], "google_gemma-3-12b-it_contains_pii": [[0, 5506, false], [5506, 11689, null], [11689, 16040, null], [16040, 21894, null], [21894, 26717, null], [26717, 32335, null], [32335, 35730, null], [35730, 40041, null], [40041, 43323, null], [43323, 49226, null], [49226, 53323, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5506, true], [5506, 11689, null], [11689, 16040, null], [16040, 21894, null], [21894, 26717, null], [26717, 32335, null], [32335, 35730, null], [35730, 40041, null], [40041, 43323, null], [43323, 49226, null], [49226, 53323, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 53323, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 53323, null]], "pdf_page_numbers": [[0, 5506, 1], [5506, 11689, 2], [11689, 16040, 3], [16040, 21894, 4], [21894, 26717, 5], [26717, 32335, 6], [32335, 35730, 7], [35730, 40041, 8], [40041, 43323, 9], [43323, 49226, 10], [49226, 53323, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 53323, 0.20253]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
599301c06ec8d8d9210544b1b81268b30d007b7f
|
Adaptive Push-Pull: Disseminating Dynamic Web Data
Pavan Deolasee Amol Katkar Ankur Panchbudhe Krithi Ramamritham Prashant Shenoy
Department of Computer Science and Engineering, Indian Institute of Technology Bombay
Mumbai, India 400076
{pavan,amolk,ankurp,krithi}@cse.iitb.ernet.in
Department of Computer Science
University of Massachusetts
Amherst, MA 01003
{krithi,shenoy}@cs.umass.edu
ABSTRACT
An important issue in the dissemination of time-varying web data such as sports scores and stock prices is the maintenance of temporal coherency. In the case of servers adhering to the HTTP protocol, clients need to frequently pull the data based on the dynamics of the data and a user’s coherency requirements. In contrast, servers that possess push capability maintain state information pertaining to clients and push only those changes that are of interest to a user. These two canonical techniques have complementary properties with respect to the level of temporal coherency maintained, communication overheads, state space overheads, and loss of coherency due to (server) failures. In this paper, we show how to combine push- and pull-based techniques to achieve the best features of both approaches. Our combined technique tailors the dissemination of data from servers to clients based on (i) the capabilities and load at servers and proxies, and (ii) clients’ coherency requirements. Our experimental results demonstrate that such adaptive data dissemination is essential to meet diverse temporal coherency requirements, to be resilient to failures, and for the efficient and scalable utilization of server and network resources.
Keywords
Dynamic Data, Temporal Coherency, Scalability, Resiliency, World Wide Web, Data Dissemination, Push, Pull
1. INTRODUCTION
Recent studies have shown that an increasing fraction of the data on the world wide web is time-varying (i.e., changes frequently). Examples of such data include sports information, news, and financial information such as stock prices. The coherency requirements associated with a data item depends on the nature of the item and user tolerances. To illustrate, a user may be willing to receive sports and news information that may be out-of-sync by a few minutes with respect to the server, but may desire to have stronger coherency requirements for data items such as stock prices. A user who is interested in changes of more than a dollar for a particular stock price need not be notified of smaller intermediate changes.
In the rest of this section, we (a) describe the problem of temporal coherency maintenance in detail, (b) show the need to go beyond the canonical Push- and Pull-based data dissemination, and (c) outline the key contributions of this paper, namely, the development and evaluation of adaptive protocols for disseminating dynamic i.e., time-varying data.
1.1 The Problem of Maintaining Temporal Coherency on the Web
Suppose users obtain their time-varying data from a proxy cache. To maintain coherency of the cached data, each cached item must be periodically refreshed with the copy at the server. We assume that a user specifies a temporal coherency requirement (tcr) for each cached item of interest. The value of tcr denotes the maximum permissible deviation of the cached value from the value at the server and thus constitutes the user-specified tolerance. Observe that tcr can be specified in units of time (i.e., the item should never be out-of-sync by more than 5 minutes) or value (e.g., the stock price should never be out-of-sync by more than a dollar).
In this paper, we only consider temporal coherency requirements specified in terms of the value of the object (maintaining temporal coherency specified in units of time is a simpler problem that requires less sophisticated techniques). As shown in Figure 1, let $S(t)$, $P(t)$ and $U(t)$ denote the value of the data item at the server, proxy cache and the user, respectively. Then, to maintain temporal coherency we should have $|U(t) - S(t)| \leq c$.

The fidelity of the data seen by users depends on the degree to which their coherency needs are met. We define the fidelity $f$ observed by a user to be the total length of time that the above inequality holds (normalized by the total length of the observations). In addition to specifying the coherency requirement $tcr$, users can also specify their fidelity requirement $f$ for each data item so that an algorithm that is capable of handling users’ fidelity and temporal coherency requirements (tcrs) can adapt to users’ needs.
In this paper we develop adaptive push- and pull-based data dissemination techniques that maintain user-specified coherency and fidelity requirements. We focus on the path between a server and a proxy, assuming that push is used by proxies to disseminate data to end-users. Since proxies act as immediate clients to servers, henceforth, we use the terms proxy and client interchangeably (unless specified otherwise, the latter term is distinct from the ultimate end-users of data).
1.2 The Need for Combining Push and Pull to Disseminate Dynamic Data
In the case of servers adhering to the HTTP protocol, proxies need to periodically pull the data based on the dynamics of the data and a user’s coherency requirements. In contrast, servers that possess push capability maintain state information pertaining to clients and push only those changes that are of interest to a user/proxy.
The first contribution of this paper is an extensive evaluation of the canonical push- and pull-based techniques using traces of real-world dynamic data. Our results, reported in Section 2.3 and summarized in Table 1, show that these two canonical techniques have complementary properties with respect to resiliency to (server) failures. Moreover, the approach should not sacrifice the client requirements and conditions prevailing in the network or at the server/proxy. Furthermore, the approach is less resilient to failures due to its stateful nature.
These properties indicate that a push-based approach is suitable when a client requires its coherency requirements to be satisfied with a high fidelity, or when the communication overheads are the bottleneck. A pull-based approach is better suited to less frequently changing data or for less stringent coherency requirements, and when resilience to failures is important.
The scalability properties of PoP and PaP are preferable to those of Pull or Push by themselves. With respect to resiliency, PaP offers graceful degradation upon loss of state at the server or when the server loses a push connection. This is because, with PaP, a client normally obtains data through pushes and pulls, and when pushes from the server stop, pulls come to its rescue. So PaP seamlessly recovers from such failures. Similarly, PoP is designed so that a client comes to know of state space losses or connection losses after a delay, at which point it needs to explicitly switch to pulling. Hence it too experiences graceful degradation, albeit after a delay. So, both PaP and PoP offer better failure handling properties than Push.
The behavior of PaP and PoP can be adjusted to suit the temporal coherency requirements imposed on data. In the case of PaP, this is done by adjusting its parameters which can be done even on short time scales; with PoP, switching from Push to Pull or vice versa for a particular connection is viable over large time scales and this will change the temporal coherency of the disseminated data.
The scalability properties of PoP and PaP are preferable to those of Pull or Push by themselves.
The last row of Table 1 shows the behavior of a protocol PoPoPaP that chooses one of Push, Pull, or PaP, thereby getting the benefits of all three where it is most appropriate to deploy them. This allows it to behave the best along all dimensions: resiliency, temporal coherency, and scalability.
2. PUSH VS. PULL: ALGORITHMS AND THEIR PERFORMANCE
In this section, we present a comparison of push- and pull-based data dissemination and evaluate their tradeoffs. These techniques will form the basis for our combined push-pull algorithms.
### Table 1: Behavioral Characteristics of Data Dissemination Algorithms
<table>
<thead>
<tr>
<th>Algorithm</th>
<th>Resiliency</th>
<th>Temporal Coherency</th>
<th>Overheads (Scalability)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Push</td>
<td>Low/Medium</td>
<td>High</td>
<td>Low/Medium</td>
</tr>
<tr>
<td>Pull</td>
<td>High</td>
<td>Low (for small lcers) High (for large lcers)</td>
<td>Low/Medium Low/Medium High</td>
</tr>
<tr>
<td>PaP</td>
<td>Gracious degradation</td>
<td>Adjustable (fine grain)</td>
<td>Low/Medium Low/Medium High</td>
</tr>
<tr>
<td>PoP</td>
<td>Delayed graceful degradation</td>
<td>Adjustable (coarse grain)</td>
<td>Low/Medium Low/Medium Low/Medium</td>
</tr>
<tr>
<td>PoPoPaP</td>
<td>Gracious degradation</td>
<td>Adjustable</td>
<td>Low/Medium Low/Medium Low/Medium</td>
</tr>
</tbody>
</table>
### 2.1 Pull
To achieve temporal coherency using a pull-based approach, a proxy can compute a *Time To Refresh (TTR)* attribute with each cached data item. The TTR denotes the next time at which the proxy should poll the server so as to refresh the data item if it has changed in the interim. A proxy can compute the TTR values based on the rate of change of the data and the user’s coherency requirements. Rapidly changing data items and/or stringent coherency requirements result in smaller TTRs, whereas infrequent changes or less stringent coherency requirement require less frequent polls to the server, and hence, a larger TTR.\(^1\) Observe that a proxy need not pull every single change to the data item, only those changes that are of interest to the user need to be pulled from the server (and the TTR is computed accordingly).
Clearly, the success of the pull-based technique hinges on the accurate estimation of the TTR value. Next, we summarize a set of techniques for computing the TTR value that have their origins in [21]. Given a user’s coherency requirement, these techniques allow a proxy to adaptively vary the TTR value based on the rate of change of the data item. The TTR decreases dynamically when a data item starts changing rapidly and increases when a hot data item becomes cold. To achieve this objective, the *Adaptive TTR* approach takes into account (a) static bounds so that TTR values are not set too high or too low, (b) the most rapid changes that have occurred so far and (c) the most recent changes to the polled data.
In what follows, we use \(D_0, D_1, \ldots, D_l\) to denote the values of a data item \(D\) at the server in chronological order. Thus, \(D_l\) is the latest value of data item \(D\). \(TTR_{\text{adaptive}}\) is computed as:
\[
\max\{TTR_{\text{min}}, \min(TTR_{\text{max}}, \alpha \times TTR_{\text{min}} + (1-\alpha) \times TTR_{\text{dyn}})\}
\]
where
- \(TTR_{\text{min}}, TTR_{\text{max}}\) denote the range within which TTR values are bound.
- \(TTR_{\text{min}}\) denotes the most conservative, i.e., smallest, TTR value used so far. If the next TTR is set to \(TTR_{\text{min}}\), temporal coherency will be maintained even if the maximum rate of change observed so far recurred. However, this TTR is pessimistic since it is based on worst case rate of change at the source. If this worst case rapid change occurs for only a small duration of time, then this approach is likely to waste a lot of bandwidth especially if the user can handle some loss of fidelity.
- \(TTR_{\text{dyn}}\) is a learning based TTR estimate founded on the assumption that the dynamics of the last few (two, in the case of the formula below) recent changes are likely to be reflective of changes in the near future.
\[
TTR_{\text{dyn}} = (w \times TTR_{\text{estimate}}) + ((1-w) \times TTR_{\text{latest}})
\]
where
\[
TTR_{\text{estimate}} = \frac{TTR_{\text{latest}}}{[D_{\text{latest}} - D_{\text{penultimate}}]} \times c
\]
If the recent rate of change persists, \(TTR_{\text{estimate}}\) will ensure that changes which are greater than or equal to \(c\) are not missed.
- weight \(w\) (0.5 ≤ \(w\) < 1, initially 0.5) is a measure of the relative preference given to recent and old changes, and is adjusted by the system so that we have the recency effect, i.e., more recent changes affect the new \(TTR\) more than the older changes.
\[0 \leq a \leq 1\] is a parameter of the algorithm and can be adjusted dynamically depending on the fidelity desired, with a higher fidelity demanding a higher value of \(a\).
The adaptive TTR approach has been experimentally shown to have the best temporal coherence properties among several TTR assign-\(\text{ments}\) [21]. Consequently, we choose this technique as the basis for pull-based dissemination.
### 2.2 Push
In a push-based approach, the proxy registers with a server, identifying the data of interest and the associated ter, i.e., the value \(c\). Whenever the value of the data changes, the server uses the ter value \(c\) to determine if the new value should be pushed to the proxy; only those changes that are of interest to the user (based on the ter) are actually pushed. Formally, if \(D_k\) was the last value that was pushed to the proxy, then the current value \(D_k\) is pushed if and only if \([D_{k+1} - D_k] \geq c\). To achieve this objective, the server needs to maintain state information consisting of a list of proxies interested in each data item, the ter of each proxy and the last update sent to that proxy.
The key advantage of the push-based approach is that it can meet stringent coherency requirements—since the server is aware of ev-\(\text{ery change, it can precisely determine which changes to push and when.}\)
### 2.3 Performance of Push vs. Pull
In what follows, we compare the push and pull approaches along several dimensions: maintenance of temporal coherency, commu-\(\text{nica-}\)tion overheads, computational overheads, space overheads, and resiliency.
2.3.1 Experimental Model
These algorithms were evaluated using a prototype server/proxy that employed trace replay. For Pull, we used a vanilla HTTP web server with our prototype proxy. For Push, we used a prototype server that uses unicast and connection-oriented sockets to push data to proxies. All experiments were done on a local intranet. We also ran carefully instrumented experiments on the internet and the trends observed were consistent with our results.
Note that it is possible to use multicast for push; however, we assumed that unicast communication is used to push data to each client (thus, results for push are conservative upper-bounds; the message overheads will be lower if multicast is used).
2.3.2 Traces Used
Quantitative performance characteristics are evaluated using real world stock price streams as exemplars of dynamic data. The presented results are based on stock price traces (i.e., history of stock prices) of a few companies obtained from http://finance.yahoo.com. The traces were collected at a rate of 2 or 3 stock quotes per second. Since stock prices only change once every few seconds, the traces can be considered to be “real-time” traces. For empirical and repetitive evaluations, we “cut out” the history for the time intervals listed in table 2 and experimented with the different mechanisms by determining the stock prices they would have observed had the source been live. A trace that is 2 hours long, has approximately 15000 data values. All curves portray the averages of the plotted metric over all these traces. Few of the experiments were done with quotes obtained in real-time, but the difference was found to be negligible when compared to the results with the traces.
The Pull approach was evaluated using the Adaptive TTR algorithm with an $\alpha$ value of 0.9, $TTR_{max}$ of 1 second and three $TTR_{max}$ values of 10, 30 and 60 seconds.
Table 2: Traces used for the Experiment
<table>
<thead>
<tr>
<th>Company</th>
<th>Date</th>
<th>Time</th>
</tr>
</thead>
<tbody>
<tr>
<td>Dell</td>
<td>Jun 1, 2000</td>
<td>21:56-22:53 IST</td>
</tr>
<tr>
<td>UTSI</td>
<td>Jun 1, 2000</td>
<td>22:41-23:15 IST</td>
</tr>
<tr>
<td>CBUK</td>
<td>Jun 2, 2000</td>
<td>18:31-21:37 IST</td>
</tr>
<tr>
<td>Intel</td>
<td>Jun 2, 2000</td>
<td>22:14-01:42 IST</td>
</tr>
<tr>
<td>Oracle</td>
<td>Jun 7, 2000</td>
<td>00:01-01:59 IST</td>
</tr>
<tr>
<td>Veritas</td>
<td>Jun 8, 2000</td>
<td>21:20-23:48 IST</td>
</tr>
<tr>
<td>Microsoft</td>
<td>Jun 8, 2000</td>
<td>21:02-23:48 IST</td>
</tr>
</tbody>
</table>
2.3.3 Maintenance of Temporal Coherency
Since a push-based server communicates every change of interest to a connected client, a client’s $t_{CF}$ is never violated as long as the server does not fail or is so overloaded that the pushes are delayed. Thus, a push-based server is well suited to achieve a fidelity value of 1. On the other hand, in the case of a pull-based server, the frequency of the pulls (translated in our case to the assignment of TTR values) determines the degree to which client needs are met. We quantify the achievable fidelity of pull-based approaches in terms of the probability that user’s $t_{CF}$ will be met. To do so, we measure the durations when $|U(t) - S(t)| > \epsilon$. Let $\delta_1, \delta_2, \ldots, \delta_n$ denote these durations when user’s $t_{CF}$ is violated. Let $observation_{interval}$ denote the total time for which data was observed by a user. Then fidelity is
$$1 - \frac{\sum_{i=1}^{n} \delta_i}{observation_{interval}}$$
and is expressed as a percentage. This then indicates the percentage of time when a user’s desire to be within $c$ units of the source is met.
Figure 2 shows the fidelity for a pull-based algorithm that employs adaptive TTRs. Recall that the Push algorithm offers a fidelity of 100%. In contrast, the Figure shows that the pull algorithm has a fidelity of 70-80% for stringent coherency requirements and its fidelity improves as the coherency requirements become less stringent. (The curve marked PaP is for the PaP algorithm that combines Push and Pull and is described in Section 3.1.)
Figure 2: Fidelity for Varying Coherence Requirements
<table>
<thead>
<tr>
<th>Company</th>
<th>No. of Messages</th>
</tr>
</thead>
<tbody>
<tr>
<td>Pull (Max. TTR=10)</td>
<td>1600</td>
</tr>
<tr>
<td>Pull (Max. TTR=30)</td>
<td>1200</td>
</tr>
<tr>
<td>Pull (Max. TTR=60)</td>
<td>800</td>
</tr>
<tr>
<td>PaP (Max. TTR=60)</td>
<td>1600</td>
</tr>
</tbody>
</table>
Figure 3: Overheads for Varying Coherence Requirements
2.3.4 Communication Overheads
In a push-based approach, the number of messages transferred over the network is equal to the number of times the user is informed of data changes so that the user specified temporal coherency is maintained. (In a network that supports multicasting, a single push message may be able to serve multiple clients.) A pull-based approach requires two messages—an HTTP IMS request, followed by a response—per poll. Moreover, in the pull approach, a proxy polls the server based on its estimate of how frequently the data is changing. If the data actually changes at a slower rate, then the proxy might poll more frequently than necessary. Hence a pull-based approach is liable to impose a larger load on the network. However, a push-based approach may push to clients who are no longer interested in a piece of information, thereby incurring unnecessary message overheads. We quantify communication
overheads in terms of the number of messages exchanged between server and proxy. Figure 3 shows the number in the variation of the number of messages with coherence requirement $80.05 \leq c \leq 80.4$. As seen in Figure 3, the Push approach incurs a small communication overhead because only values of interest to a client are transferred over the network. The Pull approach, on the other hand, imposes a significantly higher overhead.
2.3.5 Computational Overheads
Computational overheads for a pull-based server result from the need to deal with individual pull requests. After getting a pull request from the proxy, the server has to just look up the latest data value and respond. On the other hand, when the server has to push changes to the proxy, for each change that occurs, the server has to check if the tcr for any of the proxies has been violated. This computation is directly proportional to the rate of arrival of new data values and the number of unique temporal coherence requirements associated with that data value. Although this is a time varying quantity in the sense that the rate of arrival of data values as well as number of connections change with time, it is easy to see that push is computationally more demanding than pull. On the other hand, it is important to remember that servers respond to individual pull requests and so may incur queueing related overheads.
2.3.6 Space Overheads
A pull-based server is stateless. In contrast, a push-based server must maintain the tcr for each client, the latest pushed value, along with the state associated with an open connection. Since this state is maintained throughout the duration of client connectivity, the number of clients which the server can handle may be limited when the state space overhead becomes large (resulting in scalability problems). To achieve a reduction in the space needed, rather than maintain the data and tcr needs of individual client separately, the server combines all requests for a particular data item D and needing a particular tcr; as soon as the change to D is greater than equal to c, all the clients associated with D are notified. Let the above optimization process convert n connections into u unique (D, c) pairs. The state space needed is:
$$u \times (\text{bytes needed for a } (D, c) \text{ pair}) + n \times (\text{bytes needed for a connection state}) \quad (1)$$
Also, since $u \leq n$, this space is less than the space required if above optimization was not applied (in which case u in the first term of 1 will be replaced by n).
2.3.7 Resiliency
By virtue of being stateless, a pull-based server is resilient to failures. In contrast, a push-based server maintains crucial state information about the needs of its clients; this state is lost when the server fails. Consequently, the client’s coherency requirements will not be met until the proxy detects the failure and re-registers the tcr requirements with the server.
The above results are summarized in Table 1. In what follows, we present two approaches that strive to achieve the benefits of the two complementary approaches by adaptively combining Push and Pull.
3. PAP: DYNAMIC ALGORITHM WITH PUSH AND PULL CAPABILITIES
In this section, we present Push-and-Pull (PAP) — a new algorithm that simultaneously employs both push and pull to achieve the advantages of both approaches. The algorithm has tunable parameters that determine the degree to which push and pull are employed and allow the algorithm to span the entire range from a push approach to a pull approach. Our algorithm is motivated by the following observations.
The pull-based adaptive TTR algorithm described in Section 2.1 can react to variations in the rate of change of a data item. When a data item starts changing more rapidly, the algorithm uses smaller TTRs (resulting in more frequent polls). Similarly, the changes are slow, TTR values tend to get larger. If the algorithm detects a violation in the coherency requirement (i.e., $|D_{dateu} - D_{penultimate}| > c$), then it responds by using a smaller TTR for the next pull. A further violation will reduce the TTR even further. Thus, successive violations indicate that the data item is changing rapidly and the proxy gradually decreases the TTR until the TTR becomes sufficiently small to keep up with the rapid changes.
Experiments reported in [21] show that the algorithm gradually “learns” about such “clubbed” (i.e., successive) violations and reacts appropriately. So, what we need is a way to prevent even the small number of temporal coherency violations that occur due to the delay in this gradual learning process. Furthermore, if a rapid change occurs at the source and then the data goes back to its original value before the next pull, this “spike” will go undetected by a pull-based algorithm. The PaP approach described next helps the TTR algorithm to “catch” all the “clubbed” violations properly; moreover “spikes” also get detected. This is achieved by endowing push capabilities to servers and having the server push changes that a proxy is unable to detect. This increases the fidelity for clients at the cost of endowing push capability to servers. Note that, since proxies continue to have the ability to pull, the approach is more resilient to failures than a push approach (which loses all state information on a failure).
3.1 The PaP Algorithm
Suppose a client registers with a server and intimates its coherency requirement tcr. Assume that the client pulls data from the server using an algorithm, say A, to decide its TTR values (e.g., Adaptive TTR). After initial synchronization, server also runs algorithm A. Under this scenario, the server is aware of when the client will be pulling next. With this, whenever server sees that the client must be notified of a new data value, the server pushes the data value to the proxy if and only if it determines that the client will take time to poll next. The state maintained by this algorithm is a soft state in the sense that even if push connection is lost or the clients’ state is lost due to server failure, the client will continue to be served at least as well as under A. Thus, compared to a Push-based server, this strategy provides for graceful degradation.
In practice, we are likely to face problems of synchronization between server and client because of variable network delays. Also, the server will have the additional computational load imposed by the need to run the TTR algorithm for all the connections it has with its clients. The amount of additional state required to be maintained by the server cannot be ignored either. One could argue that we might as well resort to Push which will have the added advantage of reducing the number of messages on the network. However, we will have to be concerned with the effects of loss of state information or of connection loss on the maintenance of temporal coherency.
Fortunately, for the advantages of this technique to accrue, the server need not run the full-fledged TTR algorithm. A good approximation to computing the client’s next TTR will suffice. For example, the server can compute the difference between the times of the last two pulls ($diff$) and assume that the next pull will occur after a similar delay, at $t_{predict}$. Suppose $T(i)$ is the time of the most recent value. The server computes $t_{predict}$, the next predicted
pulling time as follows:
- let \( \text{diff} = T(i) - T(i-1) \)
- server predicts the next client polling time as \( t_{\text{predict}} = T(i) + \text{diff} \).
If a new data value becomes available at the server before \( t_{\text{predict}} \) and it needs to be sent to the client to meet the client’s \( \text{ter} \), the server pushes the new data value to the client.
In practice, the server should allow the client to pull data if the changes of interest to the client occur close to the client’s expected pulling time. So, the server waits, for a duration of \( \epsilon \), a small quantity close to \( \text{TT} R_{\text{min}} \), for the client to pull. If a client does not pull when server expects it to, the server extends the push duration by adding \( (\text{diff} - \epsilon) \) to \( t_{\text{predict}} \). It is obvious that if \( \epsilon = 0 \), PaP reduces to push approach; if \( \epsilon \) is large then the approach works similar to a pull approach. Thus, the value of \( \epsilon \) can be varied so that the number of pulls and pushes is balanced properly, \( \epsilon \) is hence one of the factors which decides the temporal coherency properties of the PaP algorithm as well as the number of messages sent over the network.
### 3.2 Details of PaP
The arguments at the beginning of this section suggest that it is a good idea to let the proxy pull when it is polling frequently anyway and violations are occurring rapidly. Suppose, starting at \( t_i \) a series of rapid changes occurs to data \( D \). This can lead to a sequence of “clubbed” violations of \( \text{ter} \) unless steps are taken. The adaptive TTR algorithm triggers a decrease in the TTR value at the proxy. Let this TTR value be \( \text{TT} R_\epsilon \). The proxy polls next at \( t_{i+1} = t_i + \text{TT} R_\epsilon \). According to the PaP algorithm, the server pushes any data changes above \( \text{ter} \) during \( (t_i, t_{i+1}) \). Since a series of rapid changes occurs, the probability that some violation(s) may occur in \( (t_i, t_{i+1}) \) is very high and thus these changes will also be pushed by the server further forcing a decrease in the TTR at the proxy and causing frequent polls from the proxy. Now, the TTR value at the proxy will tend towards \( \text{TT} R_{\text{min}} \) and \( \text{diff} \) will also approach zero, thus making the durations of possible pushes from the server close to zero. It is evident that if rapid changes occur, after a few pushes, the push interval will be zero, and client will pull almost all the rapid changes thereafter. Thus the server has helped the proxy pull sooner than it would otherwise. This leads to better fidelity of data at the proxy than with a pull approach.
If an isolated rapid change (i.e., spike) occurs, then the server will push it to the proxy leading to a decrease in the TTR used next by the proxy. It will poll sooner but will not find any more violations and that in turn will lead to an increase in the TTR.
Thus, the proxy will tend to pull nearly all but the first few in a series of rapid changes helped by the initial pushes from the server, while all “spikes” will be pushed by the server to the proxy. The result is that all violations will be caught by the PaP algorithm in the ideal case (e.g., with the server running the adaptive TTR algorithm in parallel with the proxy). In case the server is estimating the proxy’s next TTR, the achieved temporal coherency can be made to be as close to the ideal, as exemplified by Pure Push, by proper choice of \( \epsilon \).
Overall, since the proxy uses the pushed (as well as pulled) information to determine TTR values, the adaptation of the TTRs would be much better than with a pull-based algorithm alone.
Although the amount of state maintained is nearly equal to push, the state is a soft state. This means that even if the state is lost due to some reason or the push connection with a proxy is lost, the performance will be at least as good as that of TTR algorithm running at the proxy as clients will keep pulling.
### 3.3 Performance Analysis of PaP
Figure 2 shows that for PaP algorithm, the fidelity offered is more than 98% for stringent \( \text{ter} \) and 100% for less stringent \( \text{ter} \). From Figure 3, we see that compared to Pull, the PaP algorithm has very little network overhead because of the push component. Its network overheads are, however, slightly higher than that of Push.
The value of \( \text{TT} R_{\text{max}} \) needs to be chosen to balance the number of pushes and pulls. Experimental results (not shown here) indicate, as one would expect, that when \( \text{TT} R_{\text{max}} \) is large the number of successful pushes is large, but as we decrease \( \text{TT} R_{\text{max}} \), the number of pushes decreases slowly until a point where pulls start dominating.

Figure 4 shows the variation in fidelity when \( \epsilon \) is varied. When \( \epsilon \) is zero, the algorithm reduces to push and hence fidelity is 100%. But as we start increasing the value of \( \epsilon \) the fidelity starts suffering. For values of \( \epsilon < 3 \), the fidelity is above 75%. And for \( \epsilon = \text{TT} R_{\text{min}} - 1 \), fidelity is approximately 99%. For values of \( \epsilon \) closer to \( \text{TT} R_{\text{max}} \) (in this case 60), fidelity is low as the pulls overtake pushes and the algorithm behaves like a TTR algorithm.
Figure 4 also shows the effect of changing \( \text{TT} R_{\text{max}} \) in conjunction with \( \epsilon \) on the fidelity offered by the algorithm. As \( \text{TT} R_{\text{max}} \) decreases, pulls increase. As pulls become more dominant, the server has less chance to push the data values, and a bigger \( \epsilon \) gives the server fewer opportunities to push. This explains the effect in Figure 4 for \( \text{TT} R_{\text{max}} = 5 \) or \( \text{TT} R_{\text{max}} = 10 \). As pulls increase and the server has less and less chance to push, fidelity suffers and decreases more rapidly than in the case of \( \text{TT} R_{\text{max}} = 60 \). It can also be observed that, as \( \epsilon \) takes values greater than \( \text{TT} R_{\text{max}} \), fidelity offered becomes constant. This is because even if server sets \( \epsilon \) greater than \( \text{TT} R_{\text{max}} \) client will keep polling at the maximum rate of \( \text{TT} R_{\text{max}} \). In effect, setting \( \epsilon \) greater than \( \text{TT} R_{\text{max}} \) is equivalent to setting it to \( \text{TT} R_{\text{max}} \). This explains the crossover of curves in Figure 4.
As expected, as \( \epsilon \) is increased the number of pulls become higher and higher. For \( \epsilon = 0 \), there are no pulls and for \( \epsilon = \text{TT} R_{\text{max}} \) there are no pushes. More fidelity requires more number of pushes and for the case where number of pushes is equal to number of pulls, fidelity is close to 50%. The more we increase the number of pulls (i.e., \( \epsilon \)), the lower the obtained fidelity.
### 3.4 Tuning of PaP: Making the Approach Adaptive and Intelligent
One of the primary goals of our work was to have an adaptive and intelligent approach which can be tuned according to condi-
behaviors with push and pull at the two extremes. We now describe
resources and the data and temporal coherency needs of users, and
we refer to our approach as server chooses push or pull for a particular client. Consequently,
4. POP: DYNAMICALLY CHOOSING
parameters such as $\varepsilon$ and $TTR_{max}$, thereby
making the system resort to pulls.
if the load on the server is high (due to more pushes), $\varepsilon$ can
be set to a moderate/high value and/or $TTR_{max}$ can be set
to low/moderate value so that the amount of pushes decreases
and there are more pulls.
4. POP: DYNAMICALLY CHOOSING BETWEEN PUSH OR PULL
PoP achieves its adaptiveness through the adjustment of param-
ers such as $\varepsilon$ and $TTR_{max}$, and thereby obtains a range of be-
haviors with push and pull at the two extremes. We now describe
a somewhat simpler approach wherein, based on the availability
of the resources and the data and temporal coherency needs of users, a
server chooses push or pull for a particular client. Consequently,
we refer to our approach as Push-or-Pull (PoP).
4.1 The PoP Algorithm
PoP is based on the premise that at any given time a server can
categorize its clients either as push clients or pull clients and this
categorization can change with system dynamics. This categoriza-
tion is possible since the server knows the parameters like the num-
er of connections it can handle at a time and can determine the
resources it has to devote to each mode (Push/Pull) of data dissem-
ination so as to satisfy its current clients. The basic ideas behind
this approach are:
allow failures at a server to be detected early so that, if pos-
sible, clients can switch to pulls, and thereby achieve grace-
ful degradation to such failures. To achieve this, servers are
designed to push data values to their push clients when one of
two conditions is met: (1) The data value at the server differs from the previously forwarded value by $tcr$ or more,
(2) A certain period of time $TTR_{limit}$ has passed since the
last change was forwarded to the clients. The first condition
ensures that the client is never out of sync with the values at
the server by an amount exceeding the $tcr$ of the client.
The second condition assures the client after passage of ev-
ery $TTR_{limit}$ interval that (a) the server is still up and (b) the
state of the client with the server is not lost. This makes the
approach resilient. In case of the state of the client being lost
or the connection being closed because of network errors, the
---
**Figure 5: PoP: Choosing between Push and Pull**
4.2 Details of PoP
Whenever a client contacts a server for a data item, the client
also specifies its $tcr$ and fidelity requirements.
- Irrespective of the fidelity requirement, if the server has suf-
ficient resources (such as a new monitoring thread, memory,
etc.), the client is given a push connection.
- Otherwise, if the client can tolerate lower fidelity, then server
disseminates data to that client based on client pull requests.
- If the request desires 100% fidelity and the server does not
have sufficient resources to satisfy it, then the server takes
steps to convert some push clients to pull. If this conversion
is not possible, then the new request is denied.
In the latter case, the push clients chosen are those who can with-
stand the resulting degraded fidelity, i.e., those who had originally
demanded less than 100% fidelity but had been offered higher fi-
delity because resources were available then for push connections.
Which client(s) to choose is decided based on additional consider-
ations including (a) bandwidth available (b) rate of change of data and (c) $tcr$. If bandwidth available with a client is low, then forcing
the client to pull will only worsen its situation since pull requires
more bandwidth than push. If the rate of change of data value is
low or the $tcr$ is high, then pull will suffice. Thus, from amongst
the clients which had specified low fidelity requirement, we choose
proxies which have (a) specified a high value of $tcr$, or (b) vol-
uume of data served is small. If a suitable (set of) client(s) is found,
the server sends appropriate “connection dropped” intimation to the
client so that it can start pulling.
4.3 Performance of PoP
Using the same traces given in table 2 we evaluated PoP. The experiments were performed by running the server on a load free Pentium-II machine and simulating clients from four different load free Pentium-II machines. There were 56 users on each client machine, accessing 3-4 data items. Keeping the server’s communication resources constant, the ratio of push to pull connections was varied and the effect on average fidelity experienced by clients in pull mode as well as across all the clients was measured.
As expected, experimental results indicate that the communication overheads drop when the percentage of push sockets is increased. This is to be expected because push algorithms are optimum in terms of communication overheads. As we increase the percentage of push sockets, while the push clients may be able to experience 100% coherency, the percentage of pull requests that are denied due to queue overflow grows exponentially. These results indicate that a balance between pull and push connections must be struck if we want to achieve scalability while achieving high coherency.

We measured the effect of increasing the percentage of push connections on fidelity. As the number of push connections increases, proxies which were serving the largest number of data items or data items with stringent temporal coherency requirements are moved from pull to push mode. The implemented system worked with a fixed number of clients/data items and so the results below do not reflect the effect of admission control (i.e. request denial based on server load and client profile, which includes its requirements, volume of data served to it and its network status) that is part of PoP. The results are plotted in Figure 6 for two cases of computational overheads per pull request: (1) no computational overheads, except for those connected with the use of the socket, and (2) a 10 msec computational overhead per pull request, in addition to the socket handling overheads.
When the computational overheads for a pull are negligible, average fidelity across all clients improves gradually as we increase the percentage of push clients. When a small computational overhead of 10 msec per pull is added, while fidelity improves up to a point, when the number of pull connections becomes small, some of the pull requests experience denial of service thereby affecting the average fidelity across all clients. In fact, the overall fidelity drops nearly 10%.
Recall that all push clients experience 100% fidelity. So, the above drop in fidelity is all due to the pull clients. This is clear when we study the variation of the average fidelity of pull clients. With zero computational overheads for pulls, as we increase the number of push clients, fidelity for pull clients improves from 82% to 84% before dropping to 83%. The improvement as well as drop is more pronounced under 10 msec pull overheads. When a large percentage of the clients are in pull mode, the number of pull requests is very high. This increases the average response time for each client, which in effect, decreases the fidelity for pull clients. This scalability problem is due to computation load at the server when a large number of pull clients are present. As more and more clients switch to push mode, the number of pull requests drops, the response time of the server improves, and better fidelity results. The fidelity for pull clients peaks and then starts deteriorating. At this point the incoming requests cause overflows in the socket queues and the corresponding requests are denied. These again cause an increase in the effective response time of the client and fidelity decreases. The last portion of the curve clearly brings out the scalability issue arising because of resource constraints.
These results clearly identify the need for the system to allocate push and pull connections intelligently. An appropriate allocation of push and pull connections to the registered clients will provide the temporal coherency and fidelity desired by them. In addition, when clients request access to the server and the requisite server resources are unavailable to meet needs of the client, access must be denied. As Figure 5 indicates, this is precisely what PoP is designed to do.
4.4 Tuning of PoP
It is clear from the results plotted in Figure 6, that the way in which clients are categorized as push and pull clients affects the performance of PoP. So the system must dynamically and intelligently allocate the available resources amongst push and pull clients. For example, (a) when the system has to scale to accommodate more clients, it should preempt a few push clients (ideally, those which can tolerate some loss in fidelity) and give those resources to pull clients; (b) if the number of clients accessing the server is very small, a server can allocate maximum resources to push clients thus ensuring high fidelity. Thus, by varying the allocation of resources, a server can either ensure high fidelity or greater scalability.
5. BEYOND PaP AND POP: PoPoPaP
PoPoPaP is a combination of the PoP and PaP approaches:
- The PaP alternative is added to a PoP server: To keep things simple, we can simply replaced Push clients by PaP clients: (a) the average resiliency offered would be better than PoP, (b) the degradation in service is also also likely to be more graceful, and (c) the average coherency offered is likely to be higher than with PoP alone.
- Integration of PaP with PoP makes the approach more adaptive (by fine tuning using the parameters of PaP).
- The PoP algorithm at the server improves the average scalability offered by the system.
Together, these arguments motivate the properties of PoPoPaP mentioned in Table 1: by adaptively choosing pull or PaP for its clients, PoPoPaP can be designed to achieve the desired temporal coherency, scalability desired for a system[11].
6. RELATED WORK
Several research efforts have investigated the design of push-based and pull-based dissemination protocols from the server to the proxy, on the one hand, and the proxy to the client, on the
other. Push-based techniques that have been recently developed include broadcast disks [1], continuous media streaming [3], publish/subscribe applications [19, 4], web-based push caching [14], and speculative dissemination [5]. Research on pull-based techniques has spanned the areas of web proxy caching and collaborative applications [6, 7, 21]. Whereas each of these efforts has focused on a particular dissemination protocol, few have focused on supporting multiple dissemination protocols in web environment.
Netscape has recently added push and pull capabilities to its Navigator browser specifically for dynamic documents [20]. Netscape Navigator 1.1 gives two new open standards-based mechanisms for handling dynamic documents. The mechanisms are (a) Server push, where the server sends a chunk of data; the browser displays the data but leaves the connection open; whenever the server desires, it continues to send more data and the browser displays it, leaving the connection open; and (b) Client pull where the server sends a chunk of data, including a directive (in the HTTP response or the document header) that says “reload this data in 5 seconds”, or “go load this other URL in 10 seconds”. After the specified amount of time has elapsed, the client does what it was told – either reloading the current data or getting new data. In server push, a HTTP connection is held open for an indefinite period of time (until the server knows it is done sending data to the client and sends a terminating, or until the client interrupts the connection). Server push is accomplished by using a variant of the MIME message format “multipart/mixed”, which lets a single message (or HTTP response) contain many data items. Client pull is accomplished by an HTTP response header (or equivalent HTML tag) that tells the client what to do after some specified time delay. The computation, state space and bandwidth requirements in case of Server push will be at least as much as was discussed in section 2.3. In addition, since we are using HTTP MIME messages, the message overhead will be more (on average MIME messages are bigger than raw data). Because of the same reason, it is not feasible to use this scheme for highly dynamic data, where the changes are small and occur very rapidly. Also, it would be very difficult to funnel multiple connections into one connection as envisaged in our model 1 (see equation 1). This will clearly increase the space and computation requirements at the server. For the Client pull case, for reasons discussed in section 6.2, it is very difficult to use this approach for highly dynamic data. Still, these mechanisms may be useful for implementing the algorithms discussed in this paper as they are supported by standard browsers.
Turning to the caching of dynamic data, techniques discussed in [16] primarily use push-based invalidation and employ dependence graphs to track the dependence between cached objects to determine which invalidations to push to a proxy and when. In contrast, we have looked at the problem of disseminating individual time-varying objects from servers to proxies.
Several research groups and startup companies have designed adaptive techniques for web workloads [2, 6, 13]. Whereas these efforts focus on reacting to network loads and/or failures as well dynamic routing of requests to nearby proxies, our effort focuses on adapting the dissemination protocol to changing system conditions.
The design of coherency mechanisms for web workloads has also received significant attention recently. Proposed techniques include strong and weak consistency [17] and the leases approach [9, 22]. Our contribution in this area lies in the definition of temporal coherency in combination with the fidelity requirements of users.
Finally, work on scalable and available replicated servers [23] and distributed servers [8] are also related to our goals. Whereas [23] addresses the issue of adaptively varying the consistency requirement in replicated servers based on network load and application-specific requirements, we focus on adapting the dissemination protocol for time-varying data.
We end this section with a detailed comparison of two alternatives to our approaches: Leases [9, 12], a technique that also combines aspects from pull-based and push-based approaches, and Server-based prediction (instead of our client-based prediction) for setting Time-To-Live attributes for Web objects [10, 15, 17].
6.1 Comparison with Leases
In the leases approach, the server agrees to push updates to a proxy so long as the lease is valid; the proxy must pull changes once the lease expires (or renew the lease). Thus, the technique employs push followed by pull. In contrast, the PoP approach simultaneously combines both push and pull—most changes are pulled by the proxy, changes undetected by the proxy are pushed to it. The leases approach has high fidelity so long as the lease is valid and then has the fidelity of pull until the lease is renewed. As shown earlier, by proper tuning, the fidelity of the PoP algorithm can approach that of push. The leases approach is more resilient to failures than a push (the duration of the lease bounds the duration for which the TCR can be violated; the lease can be renewed thereafter). The PoP approach has even greater resiliency than leases, since proxies continue to pull even if the server stops pushing. Finally, we note that the leases approach can be combined with the PoP algorithm—the lease duration then indicates the duration for which the server agrees to push “missed” (i.e., undetected) changes.
6.2 Client Prediction vs. Server Prediction
PaP and PoP are based on using prediction capabilities at the clients/proxies. An alternative, of course, is to leave the prediction to the server. Such schemes are discussed in [10, 15, 17]. They use the if-modified-since field associated with the HTTP GET method (also known as conditional GET), together with the TTL (time-to-live) fields used in many of proxy caches. These schemes in general work as follows:
- Client does not use any TTR or prediction algorithm, but instead depends on some meta information associated with the data to decide the time at which to refresh the data.
- Since the server has access to all the data, it can use a prediction algorithm to predict a time when the data is going to change by TCR. The server then attaches this time value with outgoing data. Client will use this meta information to decide when to poll next. There is no need for a push connection.
- Since server has better access to data than client, server predictions will be in general more “accurate” than using a TTR algorithm at the client.
Though the Server-Prediction approach looks like a better option than PaP, it runs into following problems:
- the approach requires that previous history for the relevant data be maintained at the server. This will imply increased state information and computational needs at the server and will consequently adversely affect the scalability. Since in PoP (section 4) we reserve the pull method to serve clients when faced with problems of scalability, we prefer to make Pull relatively lightweight.
- the approach is more suitable for data that changes less frequently (e.g., say once every few hours). We are interested in
web data that is highly dynamic and inherently unpredictable (e.g., data that changes every few seconds/minutes such as stock quotes). For dynamic data, the performance will be slightly better than Adaptive TTR, but at a cost of server resources and scalability.
- if the server prediction is wrong and still a change of interest occurs in data, the server is helpless since it cannot push the change to the client. The change is lost. This will never happen in PaP.
In summary, so far dynamic data has been handled at the server end [16, 10]; our approaches are motivated by the goal of offloading this work to proxies.
7. CONCLUDING REMARKS
Since the frequency of changes of time-varying web data can itself vary over time (as hot objects become cold and vice versa), in this paper, we argued that it is a priori difficult to determine whether a push- or pull-based approach should be employed for a particular data item. To address this limitation, we proposed two techniques that combine push- and pull-based approaches to adaptively determine which approach is best suited at a particular instant. Our first technique (PoP) is inherently pull-based and maintains soft state at the server. The proxy is primarily responsible for pulling those changes that are of interest; the server, by virtue of its soft state, may optionally push additional updates to the proxy, especially when there is a sudden surge in the rate of change that is yet to be detected by the proxy. Since the server maintains soft state, it is neither required to push such updates to the proxy, nor does it need to recover this state in case of a failure. Our second technique (PoP) allows a server to adaptively choose between a push- or pull-based approach on a per-connection basis (depending on observed rate of change of the data item or the coherency requirements). We also showed how PoP can be extended to use PaP for some of its connections, leading to the algorithm PopoPaP. Another contribution of our work is the design of algorithms that allow a proxy or a server to efficiently determine when to switch from a pull-based approach to push and vice versa. These decisions are made based on (i) a client’s temporal coherency requirements (i.e., characteristics of the data item, and (iii) capabilities of servers and proxies (e.g., a pure HTTP-based server precludes the use of push-based dissemination and necessitates the use of a pull-based approach by a proxy).
Our techniques have several characteristics that are desirable for time-varying data: they are user-cognizant (i.e., aware of user and application requirements), intelligent (i.e., have the ability to dynamically choose the most efficient set of mechanisms to service each application), and adaptive (i.e., have the ability to adapt a particular mechanism to changing network and workload characteristics). Our experimental results demonstrated that such tailored data dissemination is essential to meet diverse temporal coherency requirements, to be resilient to failures, and for the efficient and scalable utilization of server and network resources.
Currently we are extending the algorithms developed in this paper to design algorithms suitable for cooperative proxies and also for disseminating the results of continual queries [18] posed over dynamic data.
8. REFERENCES
|
{"Source-Url": "https://none.cs.umass.edu/papers/ps/WWW01.pdf", "len_cl100k_base": 12523, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 35033, "total-output-tokens": 14263, "length": "2e13", "weborganizer": {"__label__adult": 0.0003275871276855469, "__label__art_design": 0.0004427433013916016, "__label__crime_law": 0.0003936290740966797, "__label__education_jobs": 0.0019483566284179688, "__label__entertainment": 0.00022161006927490232, "__label__fashion_beauty": 0.00019431114196777344, "__label__finance_business": 0.0012054443359375, "__label__food_dining": 0.00045180320739746094, "__label__games": 0.0006971359252929688, "__label__hardware": 0.0019369125366210935, "__label__health": 0.0008807182312011719, "__label__history": 0.0005583763122558594, "__label__home_hobbies": 0.00011456012725830078, "__label__industrial": 0.0005464553833007812, "__label__literature": 0.0006093978881835938, "__label__politics": 0.0003581047058105469, "__label__religion": 0.0004622936248779297, "__label__science_tech": 0.40380859375, "__label__social_life": 0.00012600421905517578, "__label__software": 0.06488037109375, "__label__software_dev": 0.5185546875, "__label__sports_fitness": 0.0002448558807373047, "__label__transportation": 0.0006337165832519531, "__label__travel": 0.00029397010803222656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 59043, 0.02916]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 59043, 0.34608]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 59043, 0.91262]], "google_gemma-3-12b-it_contains_pii": [[0, 4614, false], [4614, 8209, null], [8209, 13905, null], [13905, 19124, null], [19124, 26533, null], [26533, 33830, null], [33830, 38095, null], [38095, 44288, null], [44288, 51626, null], [51626, 59043, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4614, true], [4614, 8209, null], [8209, 13905, null], [13905, 19124, null], [19124, 26533, null], [26533, 33830, null], [33830, 38095, null], [38095, 44288, null], [44288, 51626, null], [51626, 59043, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 59043, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 59043, null]], "pdf_page_numbers": [[0, 4614, 1], [4614, 8209, 2], [8209, 13905, 3], [13905, 19124, 4], [19124, 26533, 5], [26533, 33830, 6], [33830, 38095, 7], [38095, 44288, 8], [44288, 51626, 9], [51626, 59043, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 59043, 0.08456]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
a5cff09484b1ec2507e4561b087f7c4786a543f3
|
THINGS TO LOOK FOR…
- C Arithmetic and logical operators.
- Flow of control constructs supported by the C language.
- An understanding of branching type constructs.
- An understanding of the structure and control of looping type constructs.
- Proper use of the break and continue statements.
- An understanding of the differences between entrance and exit condition loops.
- Good design and coding styles.
2.0 INTRODUCTION
In this tutorial we will open with a review the C language arithmetic and logical operators then follow with a brief study of the constructs by which we can alter and control the flow through a program. Such constructs include relational expressions as well as those that control branching, jumping, and looping within an application. As the different topics are presented, we will suggest techniques and methodologies that can help one to learn and practice good coding style and to design and develop more robust programs.
In an embedded application, the C operators permit us to perform various kinds of computations on data that we collect either directly or through measurements of outside world signals and events. Relational expressions support the ability to test or compare any returned values then to make decisions based upon the results of such comparisons. The branching, jumping, and looping constructs enable the designer to control how, when, and how often sets of instructions are executed based upon some specified condition(s). Branching constructs permit the selection of several different paths of execution. Jumps permit the program flow to change from executing one set of code to a completely different set; loops permit the application to perform the same set of instructions repeatedly.
2.1 C Operators
The variables in a programming language allow us to express different kinds of data and information about or used by an embedded application. It is the relationships supported by that language between and among that data and information that enable us to develop and build powerful applications using the language. The power of a language derives from the operators that it provides in support of those relationships. The C language supports a rich set of capabilities through the various kinds of operators listed in Figure 2.0.
| • Arithmetic Operators
| • Logical Operators
| • Bitwise Operators |
Figure 2.0 General Categories of C Operators
2.1.1 Introducing the C Language Operators
Most of the C language operators perform the operations that we might expect. Our objective will be to examine how they apply in embedded applications, look at some techniques whereby one can improve their performance in such a context, and identify potential problem areas.
2.1.2 The C Arithmetic Operators
C uses the four familiar arithmetic operators to perform addition, subtraction, division, and multiplication operations on variables. The operators, which are the same for both integral and floating point types, are also those we expect to find on a typical calculator. These are given in Figure 2.1 and they work as we would expect.
2.1.2.1 Operator Precedence and Associativity
Let’s take a look at what appears to be a simple arithmetic problem. If we’re not careful, however, this problem can give us some rather interesting and probably undesired results.
Precedence
Let’s start by declaring some variables
```c
int a = 20;
int b = 25;
int c = 0;
```
We then write
```c
c = a * 2/b + 15;
```
The variable `c` is...
- `a` times 2 ... divided by `b` ... plus 15
…which is really 40 divided by 40.
One might expect the value of the variable \( c \) to be 1. However, if these four lines of code are included in a program which is then compiled and run, the result will print as the number 16. What happened? Why did that happen? The answer to these questions lies in a concept called operator precedence or simply precedence. The ANSI/ISO C language standard specifies both C Operator precedence and associativity. These are given in Table 2.0.
**Table 2.0**
### C Operator Precedence and Associativity
<table>
<thead>
<tr>
<th>Precedence</th>
<th>Operator</th>
<th>Description</th>
<th>Associativity</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>() [] . -> ++ --</td>
<td>Parentheses (grouping) Subscript Member selection direct Member selection indirect Unary postfix increment / postfix decrement</td>
<td>left</td>
</tr>
<tr>
<td>2</td>
<td>++ -- + - ! ~ * & sizeof</td>
<td>Unary prefix increment / prefix decrement Unary plus / minus Unary logical negation Unary bitwise complement Dereference Address of Return size in bytes</td>
<td>right</td>
</tr>
<tr>
<td>3</td>
<td>(type)</td>
<td>Unary cast (change to type)</td>
<td>left</td>
</tr>
<tr>
<td>4</td>
<td>* / %</td>
<td>Multiplication / division / mod</td>
<td>left</td>
</tr>
<tr>
<td>5</td>
<td>+ -</td>
<td>Addition / subtraction</td>
<td>left</td>
</tr>
<tr>
<td>6</td>
<td><< >></td>
<td>Bitwise shift left / shift right</td>
<td>left</td>
</tr>
<tr>
<td>7</td>
<td>< <= > == >=</td>
<td>Relational less than / less than or equal to Relational greater than / greater than or equal to</td>
<td>left</td>
</tr>
<tr>
<td>8</td>
<td>== !=</td>
<td>Relational is equal to / not equal to</td>
<td>left</td>
</tr>
<tr>
<td>9</td>
<td>&</td>
<td>Bitwise AND</td>
<td>left</td>
</tr>
<tr>
<td>10</td>
<td>^</td>
<td>Bitwise Exclusive OR</td>
<td>left</td>
</tr>
<tr>
<td>11</td>
<td></td>
<td></td>
<td>Bitwise inclusive OR</td>
</tr>
<tr>
<td>12</td>
<td>&&</td>
<td>Logical AND</td>
<td>left</td>
</tr>
<tr>
<td>13</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>14</td>
<td>? :</td>
<td>Ternary conditional</td>
<td>right</td>
</tr>
<tr>
<td>15</td>
<td>= += -= *= /= &= &= ^=</td>
<td>= <<= >>=</td>
<td>Assignment Shortcut addition / subtraction Shortcut multiplication / division Shortcut mod / bitwise AND Shortcut bitwise exclusive / inclusive OR Shortcut bitwise shift left / right assignment</td>
</tr>
<tr>
<td>16</td>
<td>,</td>
<td>Comma</td>
<td>right</td>
</tr>
</tbody>
</table>
Precedence specifies the order in which operators are evaluated and associativity establishes whether we work with the operator to the left or to the right in a chained operation.
Let’s revisit the earlier arithmetic expression. From the table, we see that the division operator appears above (has higher precedence than) the addition operator. This means that the compiler will perform the division before the addition. However, multiplication is listed before division (associativity), therefore, it will perform multiplication before division. We will examine associativity in greater detail momentarily.
In the light of precedence,
1. \(a\) is multiplied by 2 first based upon associativity.
This yields 20 times 2 which is 40.
2. Next 40 is divided by \(b\) because division comes before addition.
This is 40 divided by 25 which is 1.6.
3. Finally, 15 is added to 1
To give 16
If it’s necessary for the evaluation to proceed in a different order, one can always override the precedence by using parentheses. To force \(b + 15\) to be done before the division, enclose those terms in parentheses. We now have,
\[
c = a \times \frac{2}{(b + 15)};
\]
Using parenthesis to enclose an operation, changes the evaluation order because the grouping or parentheses operator has higher precedence than any arithmetic operator which means that any expression inside of the parentheses must be evaluated first. Parentheses can be nested to any depth to achieve whatever evaluation order is needed or desired.
The expression is now evaluated as follows,
1. 15 is added to \(b\) because the parentheses must be evaluated first.
This is 25 plus 15 which is 40.
2. Next \(a\) is multiplied by 2 because multiplication precedes division.
This is 20 times 2 or 40.
3. Finally 40 is divided by 40 to give 1.
Associativity
A reasonable question to ask at this point is, if precedence determines which operator is applied first, what happens when all of the operators in a line of code have the same precedence?
Let’s look at the following equation
\[ d = a + b - c; \]
Does the compiler evaluate \( a + b \) or \( b - c \) first? The order is determined by operator associativity. If we look up addition and subtraction in the table above, we will see the associativity is left. This means the operator on the left is evaluated first. In the above line, \( a + b \) will be evaluated first then \( c \) will be subtracted. This is important to remember, because it can affect our mathematical calculations and can produce bugs that are very difficult to find.
**Coding Style**
It is good practice to always use parentheses to ensure a preferred evaluation order.
**Integer Division**
In the arithmetic example above, there was one step in which 40 is divided by 25. The expected answer is 1.6. Since integers do not have fractional parts, the 0.6 is simply truncated. In C, 40 divided by 25 returns 1 because there is only one 25 in 40 with a remainder of 15. Because the two operands are integers, the compiler expresses the result as an integer and 0.6 can’t be expressed as an integer.
If the application needs non integer numbers, certainly one solution is to use floating-point variables. However, in embedded applications, one would rather not bring in the floating point math package unless it’s absolutely necessary. For simple calculations such as this one, it’s much more efficient to take a page from the early computer designer’s book and use scaling.
**The Modulus Operator %**
Recall from the earlier discussion of integer types that integer division gives a whole number part and a fractional part (which could be 0). Such an operation is important enough in mathematics that it’s been given a special name – modulus and operator symbol \(-\%\). The operator is also called the remainder operator, modulus operator, or mod operator. The next example illustrates how the mod operator works.
**Example 2.0**
Let’s apply the modulus operator to the integers in following code fragment,
```c
int aNum = 15;
printf("aNum % 3 is %d \n", aNum %3); // prints 0
// ...3 divides 15 five times with 0 remainder
printf("aNum % 5 is %d \n", aNum %5); // prints 0
//...5 divides 15 three times with 0 remainder
printf("aNum % 10 is %d \n", aNum %10); // prints 5
//...10 divides 15 one time with 5 remainder
printf(" aNum % 2 is %d \n", aNum%2); // prints 1
// ...2 divides 15 seven times with 1 remainder
```
The modulus operator can be used as part of an algorithm to extract the hundreds, tens, and units digits from an integer.
```c
int myInteger = 987; // an integer value
int hundreds = myInteger/100; // gives 9 hundreds
int tens = (myInteger – hundreds*100) / 10; // gives 8 tens
int units = myInteger%10; // gives 7 units
```
### 2.1.3 Logical Operators and Logical Expressions
Logical operators provide another means by which one can specify relationships between and among variables. Such operators permit one to express in software the same logical relationships that are implemented in hardware using combinational logic gates.
In C, there are three common *logical* operators: **AND**, **OR**, and **NOT**.
> **Caution**: One must take care to distinguish the *logical* operators from the *bitwise* operators of similar name.
The logical operators work on expressions while the bitwise operators (discussed in the text) apply the same kind of operations to individual bits.
Like the hardware analog, the AND relationship evaluates to *true* when *all conjuncts* evaluate to *not false*. This is a subtle, but important point here. The C language does not have the notion of true and false in a Boolean sense. In the language, such variables are integral values rather than *booleans* as in C++. To that end, in C, *false* is 0 and *true* is not *false* – be careful not to read true as 1.
The OR relationship evaluates to *true* when *any disjunct is not false* – be careful not to read this as 1. The NOT relationship evaluates to *true* when its operand is *false* and vice versa.
**The Logical AND Operator &&**
Let’s consider the code fragment in Figure 2.2,
```c
int a = 5;
int b = 5;
if ( 5 == a)
{
if (5 == b)
{
printf( "a AND b are both 5\n\n");
}
}
```
---
**Example 2.1**
The modulus operator can be used as part of an algorithm to extract the hundreds, tens, and units digits from an integer.
```c
int myInteger = 987; // an integer value
int hundreds = myInteger/100; // gives 9 hundreds
int tens = (myInteger – hundreds*100) / 10; // gives 8 tens
int units = myInteger%10; // gives 7 units
```
The two *if* statements can be rewritten using a logical *AND* expression as is illustrated *if* in Figure 2.3. If we do so, we have,
```
if (( 5 == a) && (5 == b))
{
printf( "a AND b are both 5\n");
}
```
*Figure 2.3 Using the Logical AND Operator*
Expressing the relation as was done above has provided more readable code and more closely reflects the intent of the author.
**Caution:** It’s important to understand how such an expression is evaluated. The left hand operand is evaluated first. If it evaluates to false, that is ‘0’, the AND operation cannot be true; therefore, there is no reason to evaluate any of the remaining operands. This is different from the way a hardware AND gate works.
An unintended side effect is that if the value of any of the remaining operands is determined by evaluating a function or other code fragment and other program variables depend upon that evaluation occurring, there is a problem. None of the dependent variables will be updated.
**Coding Style**
Notice that the comparison in the *if* construct above tests *(5==a)* rather than *(a==5)*. We make the comparison with the constant as the left hand operand so as not to inadvertently assign the number 5 to the variable a by unintentionally writing a = 5; As written, if we drop one of the ‘*==*’ symbols, the compiler will complain that 5 = a is an illegal assignment.
Always use your tools to help you.
Consider the code fragment in Figure 2.4.
```
int a = 5;
int b = 6;
int c = 7;
.....
if (( 5 == a) && (5 == b) && (5 == f(c)))
{
printf("a AND b AND f(c) are 5 \n");
}
```
*Figure 2.4 Working with the Logical AND Operator*
Because \((5 == b)\) evaluates false, the expression \((5 == f(c))\) is not evaluated. Depending upon the value of ‘b’ at the time the AND is evaluated, processing may be different from that which is expected. In the above case, the function \(f(c)\) is never evaluated.
### Coding Style
Never depend upon side affects – the affecting code may never be executed.
---
The Logical OR Operator \(||\)
Now let’s look at the logical OR operator. We’ll begin with the code fragment in Figure 2.5.
```c
int a = 5;
int b = 5;
if (5 == a)
{
printf("a OR b is 5\n");
}
else
{
if (5 == b)
{
printf("a OR b is 5\n");
}
}
```
Figure 2.5
Testing the Disjunction of Several Statements
As we did with the AND, the fragment can be rewritten using the logical OR operator as we see in Figure 2.6.
```c
if ((5 == a) || (5 == b))
{
printf("a OR b is 5\n");
}
```
Figure 2.6
Working with the Logical OR Operator
As demonstrated earlier, rewriting gives more readable code. Again note that the order of evaluation of the expressions is left to right. Here if \((5 == a)\) is not false, evaluation will stop. There is no need to continue evaluating. As a result, the same cautions apply: don’t depend upon side affects.
To see this, let’s expand the OR expression in the code fragment in Figure 2.7.
```c
int a = 4;
int b = 5;
int c = 6;
if ((5 == a) || (5 == b) || (5 == f(c))
{
printf("a OR b OR f(c) is 5\n");
}
```
Figure 2.7
Working with the Logical OR Operator
Here (5 == a) is false. Evaluation continues because only one expression must be true. Next (5 == b) evaluates true. At this point, further processing stopped. The function f(c) is never evaluated.
The Logical NOT Operator !
The character ! is the logical not or negation operator; the symbol is often read as ‘bang’. The code fragment in Figure 2.8 illustrates how the operator is used when testing if a condition is not satisfied.
```c
int a = 5;
int b = 5;
if (!(5 == a))
{
printf("a OR b is NOT 5\n");
}
else
{
if (!(5 == b))
{
printf("a OR b is NOT 5\n");
}
}
```
Figure 2.8
Testing the Disjunction of Several Negated Statements
Using the same if statements from the earlier OR example, we change what is displayed when either expression is false rather than true. When an expression evaluates to true, the NOT operator inverts the sense to false. Common coding styles for the NOT are,
```c
if (0 != x) // reads if x not equal to zero
if (!(0 == x) // reads if (it is not true that x is equal to zero )
```
2.2 Getting Started
As a program is executing, the flow of control through the set of instructions can take a variety of paths, repeating some, skipping others, or just moving ahead. We now examine each of these to understand how they work and how they can be effectively used in embedded applications.
2.3 Sequential Flow of Control
When a set of firmware executes instructions sequentially, each instruction is selected, evaluated, and performed one following another, in sequence – as the name suggests. Each instruction is completed before the machine proceeds to next. Such a flow is illustrated graphically in Figure 2.9.
An instruction may be *simple* such as the following,
```
expression;
a = b;
a = sqrt (c);
```
or it may be *compound* meaning that it comprises a set (or block) of simple expressions, enclosed in `{}`. In contrast to most other statements in C, such a block of instructions is not terminated with a ‘;’. The curly brackets specify that the enclosed group of expressions is to be treated as a set or block. The language specifies that a compound expression can be used anywhere that a simple expression is used. *Flow of Control* proceeds in order, executing one statement after another, from top to bottom as we see in Figure 2.9.
2.4 The break and continue Statements
The *break* and *continue* statements are used to alter the flow of control inside of loops; the *break* performs the same function inside of switch statements.
**break**
1. Causes the execution of smallest enclosing *while*, *do*, *for*, or *switch* block to be terminated.
2. Execution resumes at the point immediately beyond terminated block - the block is exited.
**continue**
1. Causes execution of smallest enclosing *while*, *do*, *for*, or *switch* block to be terminated.
2. Execution resumes at the point immediately at the end of the terminated block - the block is not exited.
2.5 Conditional - Branch
With branching ability, the sequential flow construct is extended to permit the selection of alternate paths of execution (at some point in the flow) based upon the value of a specified variable or expression. Following the branch, flow resumes sequential execution on the new path as shown in Figure 2.10.
ANSI/ISO C supports two forms of branch construct: the familiar if-else and a multi-way switch. In either case, the flow of control can be represented schematically as in the graphic.
2.5.1 If-Else Construct
The if-else construct enables the selection from several alternative execution paths through the program based upon the value of a specified variable or upon a value returned from a function evaluation or I/O port access. The structure is expandable so as to permit a more complex structures to be built from simple components. The basic flow of control follows the template given in Figure 2.11. Starting with such a structure as a base, an embedded application can be designed to support a rich and sophisticated set of complex decision capability.
2.5.1.1 The Simple Branch
Conditioned on the results of the evaluation of the control expression, the basic form of the branch construct permits the selection of an alternate path of execution at the decision point. If the control expression evaluates to zero, the body of the if block is skipped; otherwise, the statements contained in the body are evaluated before proceeding on the original path.
Coding Style
If the control expression evaluates to non zero, only the first line of code following the if statement is evaluated. If it’s necessary that a block of statements be evaluated, they must be enclosed in curly brackets. So grouped, they are treated as a single statement.
It is good practice to always enclose the body of the if construct in a set of brackets even if it comprises only a single line. Such a practice will help to prevent difficult to find bugs.
2.5.1.2 The Two-Way Branch and More
With the simple branch shown in Figure 2.12a, we are given the choice of executing a designated block of code prior to continuing on the original path. We can extend that construct by more tightly controlling what happens should the if portion fail; that is, evaluate if to 0.

At first such a construct doesn't seem to offer anything beyond the basic if construct, however, let's look at the code fragments in Figure 2.12b. Do they do the same thing?
```
case 1:
if (x > 0)
printf("Big\n");
printf("Small\n");
case 2:
if (x > 0)
printf("Big\n");
else
printf("Small\n");
```
What if x evaluates to 3, what prints?
- In case 1, the printing of Small is independent of the value of x – both Big and Small print.
- In case 2, only Big or Small prints based upon the value of x. If x has the value 3, only Big will print.
One can extend the basic if-else construct as follows. First, write statement1 under the else portion as we see in the first code fragment in Figure 2.13 that is, express statement1 as an if-else construct itself. Following the substitution, the control flow appears as we see in the lower right hand code fragment. The construct can be similarly extended to imple-
ment an n-way branch. Care must be taken with such constructs as the next program demonstrates.
<table>
<thead>
<tr>
<th>if (expression_0)</th>
</tr>
</thead>
<tbody>
<tr>
<td>statement_0;</td>
</tr>
<tr>
<td>else</td>
</tr>
<tr>
<td>statement_1;</td>
</tr>
</tbody>
</table>
Let statement_1 = if (expression_a)
| statement_a |
| else |
| statement_b |
Figure 2.13
The Basic and Extended If-Else Constructs
Example 2.0
Let’s analyze the following bit of code in Figure 2.14.
```
/*
* Conditional Statements - A mismatched else statement.
*/
#include <stdio.h>
void main (void)
{
// declare some working variables
unsigned int aValue = 0;
unsigned int min = 3;
unsigned int max = 10;
// get some data
printf( "Enter number: ");
aValue = getchar();
// convert from ASCII to decimal
aValue -= '0';
// Test for in range
if ((aValue >= min) && (aValue <= max))
// Test for even number
if ((aValue%2) == 0)
printf("aValue is even\n");
// Out of range error
else
printf( "Number out of range\n" );
}
```
Figure 2.14
Working with the Basic If-Else Construct
Does this code fragment in Figure 2.15 print what is expected if the number 5 is entered for the variable aValue?
![Code fragment]
The indentation suggests that the else goes with the outer if. However, the compiler associates the else with the inner if.
If the designer does not specify otherwise using curly brackets, the compiler always associates an else with the most recent if. Such a mismatch can be very difficult to find in a complex program.
Re-stressing, it’s always best to use {} to specify the actual intention of the design.
Observe that when one implements a multiway decision block using a cascade of if-else statements, it’s not necessary that the control expression be the same for each decision. Each if and each else could be evaluating a different expression.
Although syntactically correct, the compound if-else construct can require a more complex set of instructions at the assembly language level which can then lead to larger, more complicated, and slower machine code. The switch construct, that we will study next, presents a more efficient alternative. For an embedded application, this can be important when we are trying to optimize memory size or execution speed.
### 2.6 The Switch Statement
When a multiway branch is based upon different values for the same control expression, the switch statement is a better choice than the if-else. The switch statement is similar to the case statement in Pascal or ADA.
The syntax for the switch is given as shown in Figure 2.16. The control expression must be an integral type. Within the body of the switch, the case label must also be an integral type; it must be a constant expression such as 1, 2, 3 or a string that evaluates to a constant. The default label is the keyword default. Within each case, statement can be either simple or compound.
The execution of the switch proceeds as follows,
1. The control expression evaluated
2. If the value of the control expression is equal to one of the case labels, control is transferred to the point indicated by the label and execution continues from that point.
3. If the value is not equal to any label and the default label is present, control is transferred to the default.
4. If the value is not equal to any label and the default label is not present, control is transferred to the statement after the switch block.
Caution: If the value of the control expression is equal to one of the case labels, control transferred to point indicated by the label and execution continues from that point and will continue sequentially through the remainder of the switch body.
If only the code associated with a single case is intended to be executed, a break statement must be inserted at the point where sequential execution is to be terminated.
The code fragment in Figure 2.17 illustrates the affect of the *break* statement. It is also important to remember that the break only redirects the flow of control to the right hand side of the closing curly bracket of the switch body. It does not alter the flow out of a context that may be enclosing the switch such as a loop or a function.
Let’s now look at an example of using the switch statement. In the program in Figure 2.18, the user is prompted to enter a specific value. Flow of control branches to a different execution path based upon the value that was entered.
If the value 3 is entered, control is transferred to the case 3 label and execution proceeds from that point. First, 3 stars will be printed followed by a newline, then the `break` causes the execution of the switch statement to be terminated. Control is transferred to the point just beyond terminated statement; in this case, the closing curly bracket. If the value 5 is entered, no stars are printed and the default case will be executed and the word `done` will be printed.
2.7 Loops and Iterations
The graphic in Figure 2.19 illustrates the flow of control in the loop construct. When we design a loop, the intention is that one statement or block of statements is executed repeatedly based upon some specified criteria. As is seen, one can make the decision to terminate the loop either before or after executing the block of statements. Thus, based upon the design of the loop, the number of iterations may be none, one, many, or unlimited.
The C language defines two forms of the loop construct. The first type is called an entry-condition loop and the second is called an exit-condition loop. When either type exe-
cutes, the *control expression* is tested on every *iteration* (or *cycle*) of the loop. In support of these constructs, the C language includes 3 types of loop: *do*, *while*, and *for*.
### 2.7.1 The Entry-Condition Loop
An entry-condition loop tests the condition upon entering the loop. To execute the code inside the loop, the condition must be *true*. Recall that in C *true is not false* and *false is zero*. Thus, to execute the code inside the loop, the condition must not be zero. If the condition is not true, the loop is skipped. With such loops, there is the possibility that the code inside the loop will never be executed. Entry-condition loops use the C keywords *while* and *for*. Such loops are usually referred to as *while loops* or *for loops*.
To set up an entry-condition loop one must,
- Set a control expression
- Test the expression to see if the loop should execute
- Re-test the expression to see if the loop should execute again
#### 2.7.1.1 The while Loop
The simplest example of an entry-condition loop construct is the *while* loop shown in Figure 2.20. The *loop body* is delimited by the pair of curly brackets following the *while* keyword. Such a construct means that a loop can contain multiple statements thereby permitting one to build loops of arbitrary complexity. Observe that if *control expression* is initially false, then *statement* is never executed.
**Syntax**
```
while (control expression)
{
statements
}
```
*Figure 2.20 The while Loop*
Execution proceeds as follows.
1. The control expression evaluated
2. If exp != zero
Evaluate the statement(s)
3. Repeat the process
4. Execution is complete when exp evaluates to zero
One can affect an extraordinary exit from the while loop by executing a *break* statement or bypass portions of the body of the construct by executing a *continue* statement as shown in Figure 2.21.
```
while (control expression)
{
statement
continue or break
::
::
statement
}
```
Figure 2.21
Managing Control Flow in the while Construct
**Coding Style**
Control can be transferred out of a while loop using a *break* or *return* statement. Portions of the while body can be bypassed using a *continue* statement. This practice is strongly discouraged. We never want an asynchronous entry or exit from a block of code. We also want to ensure that we have only one entry and exit point to a block of code.
### 2.7.1.2 The for Loop
The for loop is also an example of an entry-condition loop and is considerably more general than the while loop. Its syntax is given in Figure 2.22.
```
Syntax
for (exp0; exp1; exp2)
{
statements
}
```
Figure 2.22
The for Loop
The control expressions $exp_i$ perform the following.
- $exp_0$ is a comma separated list of initialization expressions.
The list of expressions is part of the for loop.
The list ends with a semi-colon.
Initialization occurs once when the loop is entered. It is never repeated.
- $exp_1$ is the loop control expression.
It is followed by a semi-colon.
If the control expression does not evaluate to zero, execution of the loop continues.
If it evaluates to zero, execution terminates.
The control expression is tested on every iteration through the loop.
- $exp_2$ is used to update the loop control expression
It will execute on each iteration of the loop.
The control expressions $exp_i$ are all optional. However, the semicolon separated places must still be present in the opening statement.
We can write
```
for (; ; )
```
but not
```
for ( )
```
We can use such a construct to create infinite loops which we’ll study shortly. These are essential in the design embedded applications. Unlike desktop applications, an embedded application is intended to run until stopped rather than after a single execution.
The execution of the for loop proceeds as follows.
1. Evaluate $exp_0$ if present
2. Evaluate $exp_1$ if present
if zero
terminate
if not zero or not present
continue execution
3. Execute the body of the for statement
4. Evaluate $exp_2$ if present
As we noted for the while loop, control can be transferred out using a `break` or `return` statement as illustrated in Figure 2.23. Once again, such a practice is strongly discouraged.
Let’s look at an example of a simple for loop.
**Example 2.2**
```c
/*
* A simple for loop to print some numbers
*/
#include <stdio.h>
void main(void)
{
int i = 0; // declare and initialize a loop variable
for (i=0; i < 5; i++)
{
printf("The loop index is: %i\n",i);
}
return;
}
```
**Figure 2.24**
Working with the for loop Construct
### 2.7.3 Infinite Loops
In an embedded application, a program will often be structured as a sequence of initializing statements followed by an infinite loop running the intended tasks of the system. Such a loop can be constructed using either a `for` loop with no control expressions or a `for`
while loop with a control expression that always evaluates to true. These are written as is shown in Figure 2.25.
```
while (1)
{
statement
}
```
Figure 2.25
Creating an Infinite Loop
So constructed, a loop can be used to control the execution of a series of tasks as the following code module illustrates.
**Example 2.3**
In Figure 2.26, we have a very simple task loop. The loop comprises four tasks,
- Prompt for input
- An input operation
- A computation operation
- An output operation
Each task gets as much time as it needs and always completes execution before relinquishing control. The system runs forever. When analyzing the code, several questions should come to mind.
- Why is the second getchar() operation performed?
- What is the following line doing?
```
result += (value - '0');
```
When one enters data to a C program from the standard input or `standard in` (generally the keyboard) each character is stored in a temporary buffer until the user enters a new line by pressing the `Enter key` on the keyboard. The `getchar()` function only reads one character at a time, thus, the newline character is left in the input buffer. That is what is being read and discarded during the second input operation.
When data is entered into a program from the keyboard, each character is encoded in ASCII. If it is necessary to perform traditional arithmetic operations on the data, each character must first be converted to a decimal integer.
If one were to look at the ASCII table, one would find that the characters 0..9 are represented by the hex numbers 30..39 (or decimal numbers 48..57). Specifically, the character ‘0’ has the hex value 30 (or decimal 48). Thus, if the character ‘7’ (hex 37) is entered, subtracting ‘0’ (30) from ‘7’ (37) gives us the integer value 7. This technique only works on the ASCII characters 0..9.
2.7.2 The Exit-Condition Loops
An exit-condition loop tests the condition at the end of the loop, after the code inside the loop has already executed. Such loops guarantee that the code inside the loop will execute at least once whether or not the control expression is true. Exit-condition loops are delimited by the C keyword pair `do` and `while`. Such loops are commonly referred to as `do-while` loops or simply `do` loops.
2.7.2.1 The `do` - `while` Loop
The `do-while` loops, illustrated in Figure 2.27, are similar to basic while loops. Execution proceeds analogously.
```
/*
* Flow of Control - A simple task loop.
*/
#include <stdio.h>
void main(void)
{
// declare some working variables
char value;
int result = 2;
// build an infinite loop
while (1)
{
// Prompt for input
printf("Enter a character: ");
// Read the keyboard – an input operation
value = getchar();
// Do some calculations and display result
getchar();
result += (value - '0');
printf("The calculations show: %i\n", result);
}
}
```
The execution of the do-while loop proceeds as follows.
1. Evaluate each statement in the loop body.
2. Evaluate the control expression
3. If the control expression \( \neq \) zero
4. Repeat the process
5. Execution complete
When the control expression evaluates to zero.
Once again, as seen in Figure 2.28, control can be transferred out of the loop using a break or return statement...and once again, such a practice is strongly discouraged. Observe that with the do – while construct, \textit{statement} is always executed at least once.
\begin{figure}[h]
\centering
\includegraphics[width=0.5\textwidth]{do-while.png}
\caption{Managing Control Flow in the do-while loop Construct}
\end{figure}
\textbf{Example 2.4}
The code module in Figure 2.29 will execute five iterations of the loop and will print the message and the loop index on each iteration.
\begin{verbatim}
/*
* A simple do-while loop to print some numbers
*/
#include <stdio.h>
void main(void)
{
int i = 0;
do
{
printf("The loop index is:%d\n",i);
i++;
} while (i < 5);
return;
}
\end{verbatim}
\begin{figure}[h]
\centering
\includegraphics[width=0.5\textwidth]{do-while.png}
\caption{Managing Control Flow in the do-while loop Construct}
\end{figure}
2.8 Summary
In this tutorial we opened with a review the C language arithmetic and logical operators then followed with a brief study of the constructs by which we can alter and control the flow through a program. We learned that such constructs include relational expressions as well as those that control branching, jumping, and looping within an application. We also covered a variety of techniques and methodologies that can help one to learn and practice good coding style and to design and develop more robust programs.
References
|
{"Source-Url": "http://higheredbcs.wiley.com/legacy/college/peckol/0471721808/tut_pdf/tutorial2.pdf", "len_cl100k_base": 9022, "olmocr-version": "0.1.50", "pdf-total-pages": 26, "total-fallback-pages": 0, "total-input-tokens": 52719, "total-output-tokens": 10545, "length": "2e13", "weborganizer": {"__label__adult": 0.00041794776916503906, "__label__art_design": 0.00035953521728515625, "__label__crime_law": 0.00026917457580566406, "__label__education_jobs": 0.000675201416015625, "__label__entertainment": 6.961822509765625e-05, "__label__fashion_beauty": 0.00016963481903076172, "__label__finance_business": 0.00015735626220703125, "__label__food_dining": 0.0005183219909667969, "__label__games": 0.0008215904235839844, "__label__hardware": 0.002735137939453125, "__label__health": 0.0004320144653320313, "__label__history": 0.0002086162567138672, "__label__home_hobbies": 0.0001499652862548828, "__label__industrial": 0.0005517005920410156, "__label__literature": 0.00023365020751953125, "__label__politics": 0.0002371072769165039, "__label__religion": 0.0005617141723632812, "__label__science_tech": 0.014068603515625, "__label__social_life": 6.300210952758789e-05, "__label__software": 0.0030002593994140625, "__label__software_dev": 0.97265625, "__label__sports_fitness": 0.0004105567932128906, "__label__transportation": 0.0008130073547363281, "__label__travel": 0.00021767616271972656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 37799, 0.02892]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 37799, 0.75832]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 37799, 0.88369]], "google_gemma-3-12b-it_contains_pii": [[0, 1740, false], [1740, 3542, null], [3542, 5614, null], [5614, 7626, null], [7626, 10037, null], [10037, 12178, null], [12178, 13822, null], [13822, 15046, null], [15046, 16343, null], [16343, 18252, null], [18252, 20225, null], [20225, 21489, null], [21489, 22708, null], [22708, 24541, null], [24541, 25831, null], [25831, 26543, null], [26543, 27191, null], [27191, 28692, null], [28692, 29872, null], [29872, 31306, null], [31306, 32159, null], [32159, 34021, null], [34021, 35132, null], [35132, 36399, null], [36399, 36926, null], [36926, 37799, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1740, true], [1740, 3542, null], [3542, 5614, null], [5614, 7626, null], [7626, 10037, null], [10037, 12178, null], [12178, 13822, null], [13822, 15046, null], [15046, 16343, null], [16343, 18252, null], [18252, 20225, null], [20225, 21489, null], [21489, 22708, null], [22708, 24541, null], [24541, 25831, null], [25831, 26543, null], [26543, 27191, null], [27191, 28692, null], [28692, 29872, null], [29872, 31306, null], [31306, 32159, null], [32159, 34021, null], [34021, 35132, null], [35132, 36399, null], [36399, 36926, null], [36926, 37799, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, true], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 37799, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 37799, null]], "pdf_page_numbers": [[0, 1740, 1], [1740, 3542, 2], [3542, 5614, 3], [5614, 7626, 4], [7626, 10037, 5], [10037, 12178, 6], [12178, 13822, 7], [13822, 15046, 8], [15046, 16343, 9], [16343, 18252, 10], [18252, 20225, 11], [20225, 21489, 12], [21489, 22708, 13], [22708, 24541, 14], [24541, 25831, 15], [25831, 26543, 16], [26543, 27191, 17], [27191, 28692, 18], [28692, 29872, 19], [29872, 31306, 20], [31306, 32159, 21], [32159, 34021, 22], [34021, 35132, 23], [35132, 36399, 24], [36399, 36926, 25], [36926, 37799, 26]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 37799, 0.049]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
178f975abcb373fffe5c66b002e784393a19be75
|
A Unified Scheduler for Recursive and Task Dataflow Parallelism
Published in:
2011 International Conference on Parallel Architectures and Compilation Techniques (PACT 2011)
Document Version:
Author final version (often known as postprint)
Link:
Link to publication record in Queen's University Belfast Research Portal
Publisher rights
© 2011IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.
General rights
Copyright for the publications made accessible via the Queen's University Belfast Research Portal is retained by the author(s) and / or other copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated with these rights.
Take down policy
The Research Portal is Queen's institutional repository that provides access to Queen's research output. Every effort has been made to ensure that content in the Research Portal does not infringe any person's rights, or applicable UK laws. If you discover content in the Research Portal that you believe breaches copyright or violates any law, please contact openaccess@qub.ac.uk.
A Unified Scheduler for Recursive and Task Dataflow Parallelism
Hans Vandierendonck
Dept. of Electronics and Information Systems, Ghent University, Ghent, Belgium, Email: hvdieren@elis.ugent.be
George Tzenakis and Dimitrios S. Nikolopoulos
Foundation for Research and Technology – Hellas (FORTH), Heraklion, Crete, Greece, Email: {tzenakis,dsn}@ics.forth.gr
Abstract—Task dataflow languages simplify the specification of parallel programs by dynamically detecting and enforcing dependencies between tasks. These languages are, however, often restricted to a single level of parallelism. This language design is reflected in the runtime system, where a master thread explicitly generates a task graph and worker threads execute ready tasks and wake-up their dependents. Such an approach is incompatible with state-of-the-art schedulers such as the Cilk scheduler, that minimize the creation of idle tasks (work-first principle) and place all task creation and scheduling off the critical path. This paper proposes an extension to the Cilk scheduler in order to reconcile task dependencies with the work-first principle. We discuss the impact of task dependencies on the properties of the Cilk scheduler. Furthermore, we propose a low-overhead ticket-based technique for dependency tracking and enforcement at the object level. Our scheduler also supports renaming of objects in order to increase task-level parallelism. Renaming is implemented using versioned objects, a new type of hyperobject. Experimental evaluation shows that the unified scheduler is as efficient as the Cilk scheduler when tasks have no dependencies. Moreover, the unified scheduler is more efficient than SMPSS, a particular implementation of a task dataflow language.
I. INTRODUCTION
Task dataflow parallel programming languages facilitate the construction of parallel programs with high performance by leveraging a runtime scheduler that is aware of dependencies between tasks. In a task dataflow language, each argument of a task is labeled with an access mode, e.g. input, output and the combined input/output dependencies. These labels summarize the memory side-effects of the task on these arguments as read-only, non-exposed read and write and read/write. Hereby, the scheduler can dynamically track dependencies between tasks, but it can also change the execution order of tasks, while still respecting the dependencies. This is very similar to how an out-of-order processor dynamically changes the execution order of assembly instructions.
Task dataflow languages have multiple benefits: they increase parallelism by tracking task dependencies dynamically [1], [2], they create additional parallelism by renaming memory objects [3] and they remove the sensitivity of algorithmic variations on processor architecture [4]. Task dataflow languages are currently investigated mostly in the context of high-performance computing [1]–[3], [5], but the ideas have also been applied to make parallel Java programs deterministic by automatically inferring memory footprints of tasks and tracking the dependencies [6]. Applications that benefit from task dataflow include irregular parallel algorithms such as Cholesky decomposition [1], [3], algorithms with many cross-iteration dependencies such as the Smith-Waterman algorithm [7] and h264 video decoding [8].
Task dataflow languages are often restricted to a single level of parallelism: a single master thread spawns tasks but the tasks themselves cannot launch new tasks. As such, these task dataflow languages are incompatible with recursive fork/join languages such as Cilk [9], [10]. However, for many algorithms it is well known how to extract all parallelism efficiently and in these cases dependency tracking is pure overhead.
In this paper, we present a unified language and scheduler that simultaneously allows algorithms expressed in the task-dependency and fork/join styles. Hereby, the programmer can freely select the most appropriate programming style for each algorithm in an application. Furthermore, this programming model allows the construction of arbitrary parallel pipelines, a construct that appears in emerging workloads [11] and is only partially supported by Cilk.
The contributions of this paper are the following:
- We develop a scheduler that unifies work-first scheduling with dependency-aware scheduling. The unified scheduler necessarily violates some of the provably-good properties of the Cilk scheduler, however, the scheduler retains the good behavior of the Cilk scheduler when tasks are specified without dependencies, or when such tasks execute serially.
- We present versioned objects, a new type of hyperobject that facilitates tracking task dependencies on the object level. Versioned objects encapsulate the meta-data that is necessary to track dependencies, as well as seamlessly rename objects to increase task parallelism. While other approaches limit versioning to a single level of data (e.g. arrays without pointers), our approach allows versioning of arbitrary data structures.
- We present a new and efficient mechanism for tracking dependencies on objects. Our method uses tickets (similarly to ticket-based locks [12]) to enforce the program order of the C/C++ elision of the program.
- We demonstrate through experimental evaluation that our unified scheduler is as efficient as the Cilk++ scheduler.
typedef float (*block_t)[16]; // 16x16 tile
typedef versioned<block_t> vers_block_t;
typed indep-<float>[16][16]> in_block_t;
typed indep-outdep-[16][16]> inout_block_t;
void mul_add(in_block_t A, in_block_t B, inout_block_t C) {
b = (block_t)A; // Recover pointers
c = (block_t)B; // to the raw data
c = (block_t)C; // from the versioned objects
}
// ... serial implementation on a 16x16 tile ...
}
void matmul(vers_block_t A, vers_block_t B, vers_block_t C, unsigned n) {
for (unsigned i=0; i<n; ++i) {
for (unsigned j=0; j<n; ++j) {
for (unsigned k=0; k<n; ++k) {
spawm mul_add( (in_block_t)A[i*n+j],
(vers_block_t)B[j*n+k],
(vers_block_t)C[i*n+k] );
}
}
}
}
sync;
Fig. 1. Square matrix multiplication expressed in a language supporting runtime tracking and enforcement of task dependencies.
when task dependencies are absent. Moreover, we demonstrate that our scheduler is **more efficient than SMPSS** [3], a proven task graph scheduler. Our scheduler supports much finer-grain tasks than SMPSS, which allows more task-level parallelism for the same problem size.
The remainder of this paper is organized as follows. Section II gives an overview of our programming model. Section III discusses how we track dependencies on memory objects and how we create new versions of these objects. Next, Section IV discusses our extension to a work-first scheduler. Section V experimentally validates that our scheduler is competitive with state-of-the-art work-first and dependency-aware schedulers. Finally, Section VI discusses related work and Section VII concludes this paper.
### II. Programming Model
Figure 1 illustrates programming in our language. It is assumed that the language contains parallelism control statements as in Cilk: **spawn** expresses that a procedure call may proceed in parallel with the caller and **sync** expresses that the execution of a procedure should stall until all spawned procedures have finished. We extend this however with the notion of dependencies between tasks.
Dependencies are tracked at the object level. An object must be declared as a **versioned** object in order to enable dependency tracking. Versioned objects support automatic tracking of dependencies as well as creating new versions of the object in order to increase ask-level parallelism (a.k.a. renaming [3]).
Dependency tracking is enabled on tasks that take particular types as arguments: the **indep**, **outdep** and **inoutdep** types. These types are little more than a wrapper around a versioned object that extends its type with the memory access mode of the task: **input**, **output** or **input/output** (in/out). The language allows only to pass versioned objects to such arguments.
When spawning a task, the scheduler analyzes the signature of the spawned procedure for arguments with a memory access mode. If none of the arguments describe a memory access mode, then the spawn statement is an **unconditional spawn** and it has the same semantics as a Cilk spawn. Otherwise, the spawn statement is a **conditional spawn**. The memory accesses of the task are tracked and, depending on runtime conditions, the task either executes immediately or it is queued up in a set of pending tasks.
The **sync** statement in our language has the same semantics as the Cilk sync statement: it postpones the execution of a procedure until all child tasks have finished execution. Some languages provide a **conditional sync** that postpones the execution of a procedure until all tasks operating on a particular object are finished (e.g. the **waiton** clause in SMPSS [3]). We have not yet defined the semantics of a conditional sync in our language because we have no use for it in our benchmarks. Such an extension would be quite straightforward.
We consider only situations where dependencies are tracked between the children of a single parent procedure. Each dynamic procedure instance may have a task graph that restricts the execution order of its children. This restriction allows us to prove that all parallel executions compute the same value as the sequential elision of the program [13]. Note that the sequential elision of the program always respects the dependencies in the program: by deducing dependencies from input/output properties, there can never be backward dependencies in the sequential elision. Furthermore, by having multiple independent task graphs in a program, we can mitigate the performance impact of building the task graph in serial fashion.
Our model allows arbitrarily mixing fork/join style and task graph execution. The only problematic issue to allow this is that we must take care when nesting task graphs, in particular when passing versioned objects across multiple dependent spawns. To make this work correctly, we must use distinct metadata for every dependent spawn to track its dependencies separately. This is detailed in Section III-F.
We assume that there is an implicit **sync** statement at the end of every procedure. It should be clear to the reader that the busy-leaves property of the Cilk scheduler is violated when tasks execute out-of-order. The busy-leaves property states that any created stack frame that has no left sibling is currently operated on by a worker [14]. This property lies at the basis of the provable time and space bounds of the Cilk scheduler. We argue however that dependency-aware tracking necessarily violates the busy-leaves property. Moreover, our scheduler retains all the good properties of Cilk in the absence of conditional spawns.
1 Note that Cilk only guarantees the existence of a sequential elision. By allowing locks and data races, different parallel executions may compute different outcomes.
III. OBJECT VERSIONING AND DEPENDENCY TRACKING
In general, "hyperobjects are a linguistic mechanism that allow different branches of a multi-threaded program to maintain co-ordinated local views of the same nonlocal variable" [15]. For instance, for a summing reducer hyperobject, a view is simply a distinct allocated instance of the summing variable. Threads that execute in parallel are assigned a distinct summing variable to operate on, such that races will not occur. When the threads join, then the private summing variables are reduced into a single variable. In this example, the reduction function is simply addition.
In this work, we define versioned objects, a new type of hyperobject that hides dependency tracking and renaming of objects from the programmer. A versioned object combines two pieces of information: the object metadata that tracks the status of the object (tasks reading, writing, etc.) and a pointer to dynamically allocated memory that holds an instance of the object.
A. Automatic Renaming
The semantics of a hyperobject are defined by the actions that the runtime system takes on fork and join points. For the reducer, holder and splitter hyperobjects defined in [15], it is typical that the parent procedure and the child procedure are assigned a distinct view. These views are reduced into a single view when the procedures join.
The nature of versioned objects is however quite different. A version can be valid across all newly spawned child procedures and their children recursively, even if they have not been spawned yet. Alternatively, the version has been superseded by a new version and should not be used any more by newly spawned children. As such, when a new version of an object is created at a spawn statement, both parent and child will reference this new version. This is necessary such that a new version created on spawn of a task with an output dependency on an object will be visible to a later spawned task with an input dependency on the same object. The child continues to use the new version, while the parent may replace it at a later spawn statement.
After creating a new version, the old version will get out of use gradually and will be cleaned up automatically by the runtime system when the last thread that references the version terminates.
We create new versions only for arguments accessed with the output memory access mode. Moreover, there must exist tasks in the system that read or write the associated object, but that have not yet finished execution. In this case, renaming is clearly advantageous.
B. Dependency Tracking with a Task Graph
In general, tracking dependencies between tasks requires the storage of the full task graph. This is undesirable for two reasons. First, the task graph has many nodes (one for each task) and it must support an arbitrary number of incoming and outgoing edges (dependencies) in each task. This implies that quite expensive data structures must be used that require multiple memory allocations per node. The edges in the task graph have a double function: (i) to determine readiness of tasks (absence of incoming edges) and (ii) to wake-up dependent tasks (by traversing all outgoing edges). Second, updating the task graph is expensive in terms of locking because every task involved in the update must be locked in order to correctly orchestrate between multiple threads that update the task graph. Furthermore, tasks that have already moved to per-worker ready queues may have to be locked, temporarily inhibiting their execution.
In this work, we present an alternative organization of the task graph. Instead of explicitly storing all edges, we use a ticketing system to correctly sequence tasks that operate on the same objects. The ticketing system bears some similarity to ticket locks [12] however, we use only the sequencing properties of the mechanism, which is known as fairness for locks.
Our organization of the task graph simplifies the major operations on the task graph such as task enqueueing, task dequeuing, task readiness check, checking conditions for renaming, etc. It also allows to wake-up ready tasks with little overhead, however retrieving a ready task is slightly more complicated in this system. Our experimental evaluation shows that we can hide the latency of retrieving a ready task off the critical path.
C. Ticket Locks
A ticket lock consists of two counters: a global counter and a next counter. The system works similarly to how tickets work at a butcher's store: new clients take the next ticket one-by-one, each of them incrementing the next counter. The global counter is advertised and shows the ticket of the client that is currently served. When serving a client is finished, the global counter is incremented by one to indicate whose turn is next. The tickets place all clients in a virtual queue where the order of the clients in the queue is defined by the numbers on their tickets.
Ticket locks are implemented using atomic increments of the next counter. When multiple threads are competing to acquire a lock, the hardware will sequence the atomic increments of all threads in a particular order. It is guaranteed by the ticket lock that threads are served in this order. This fairness property is one reason why they are used in the Linux kernel [16]. It is this property that we are interested in.
D. Tickets for Task Parallelism
Because ticket locks strictly order all tasks, we present two extensions that allow to extract parallelism from a task graph. First, we allow that multiple tasks wait on the same ticket, allowing them to execute in parallel. Second, we use two sets of ticket counters to separately track readers and writers of the object.\(^2\) Task parallelism is exposed by synchronizing on one or two tickets, depending on how a task accesses the associated object.
\(^2\)There is one set of reader and writer counters per object. Furthermore, every new version of an object gets a new set of reader and writer counters.
Figure 2 summarizes the operations on the tickets. The \( \mathcal{R} \) tickets track readers while the \( \mathcal{W} \) tickets track writers. Each ticketing system has two counters as before: a next counter and a global counter.
When enqueueing a task, the task is registered in the reader set, the writer set or both, depending on whether the task accesses the object in \textit{input}, \textit{output} or \textit{in/out} mode. Furthermore, the next reader and/or writer tickets are copied to bookmark the order of the task in the sequence of all readers or all writers.
For instance, tasks with an input dependency are strictly ordered in the readers set as the next reader counter is incremented in the enqueue operation and the global reader counter is incremented in the dequeue operation. However, such tasks are not dependent on other tasks in the readers set. They can start execution as soon as all prior writers have finished. Thus, they register in the reader set but wait on a ticket from the writers set. Hereto, the next writer ticket is copied (not incremented) and the task will be ready to execute when the global writer counter equals the tasks writer ticket.
In a similar vein, tasks with an output dependency are strictly ordered in the writers set. They are always ready to execute due to renaming. Tasks with an in/out dependency are strictly ordered with respect to both readers and writers. Note that we chose not to rename objects in case of an in/out dependency because the benefits in terms of task parallelism are not clear. It is however straightforward to extend the system described here.
E. Example
Figure 3 shows the operation of the tickets for enqueuing a sequence of tasks with input, output and in/out dependencies. The tickets for the readers and writers conceptually order all readers and writers, respectively, in queues. We draw these queues to help understand the mechanism, but note that these queues are not stored in the program. Solid edges from a position in a queue to a task show that the task holds the corresponding ticket. Dashed edges show the tickets that a task is waiting on, i.e. the global counter must reach the ticket value pointed to.
Task \( T_0 \) has an output dependency. Because the next reader counter equals the global reader counter (there are no pending tasks that access the object), the object is not renamed. \( T_0 \) is inserted at the head of the writers list. It is ready to execute. For the sake of the argument, we assume that it remains executing while additional tasks are spawned.
Task \( T_1 \) has an input dependency on the object. Thus, it is inserted in the readers set (solid edge) and it copies the next ticket from the writers set (dashed edge). This ticket will equal the global writer ticket when \( T_0 \) finishes execution. As such, the tickets reflect that \( T_1 \) is dependent on \( T_0 \).
Similarly, task \( T_2 \) has an input dependency and the same actions are taken. Because both \( T_1 \) and \( T_2 \) wait only on \( T_0 \) to finish execution, they may execute simultaneously.
The next task, \( T_3 \), has an in/out dependency. It is inserted in both the readers and writers set (solid edges) and it grabs the next ticket from both sets (dashed edges). This is because task \( T_3 \) has to wait on all prior writers (to satisfy read-after-write dependencies) and it has to wait on all prior readers (to satisfy write-after-read dependencies). It may appear that \( T_3 \) is not waiting explicitly on \( T_1 \). Note however that the global reader counter can be increased by two positions only if both \( T_1 \) and \( T_2 \) execute.
Task \( T_4 \) has again an input dependency. It is inserted in the readers set and it copies the next ticket from the writers set. This ticket indicates that it must wait on \( T_3 \) to finish execution.
If a task \( T_5 \) with an output dependency arrives now, then a new version of the object written to will be created to increase...
Cilk’s strategy has the practical consequence that most of the procedure spawns are executed serially. This property follows from the work-first principle: it is generally better to execute a spawned procedure immediately than it is to create a task descriptor, enqueue it, dequeue it and execute it. (The work-first principle is an improvement of Cilk-5 [10] over Cilk-3 [9].)
A. Cilk Runtime Data Structures
The Cilk runtime maintains several data structures to control the execution of a Cilk program: the (extended) spawn deque, stack frames and full frames (Figure 5). We refer the reader to [15] for a thorough discussion of the internals of the Cilk-5/Cilk++ scheduler. Our discussion deviates slightly on elements that are particular to our implementation.
Stack frames, as in sequential C/C++ programs, store variables local to a procedure invocation as well as temporaries and control information, e.g. to link the procedure back to the caller. On top of this information, Cilk stack frames store a small number of control variables.
Some frames are accessible by multiple worker threads. As such, they contain additional fields to control multi-threaded actions. Full frames contain all the information of a stack frame, as well as a lock, a continuation to proceed the execution of a frame by a different worker, a join counter and a list of child frames, which is implemented by a pointer to the first child and pointers to the sibling frames.
All frames link together in a tree, where the root of the tree corresponds to the main procedure of the program (Figure 5). This tree is also known as a cactus stack. This tree is, in fact, a part of the complete procedure call tree. It is a snapshot of the procedure instances that are currently active.
Full frames are always located near the root of the call tree, while stack frames appear near the leaves of the call tree. In fact, Cilk maintains the following invariants: (i) the parent of a full frame is a full frame, and (ii) a stack frame has at most one child and this child is a stack frame.
Every worker thread operates on its own set of frames, which are organized as spawn deques of call stacks. A worker pushes stack frames on the front of its spawn deque as it executes new procedure instances and it pops them as they
IV. A Unified Work-First/Task Graph Scheduler
One of the often recurring programming idioms in the context of Cilk programs is to solve a problem by recursively splitting it in smaller sub-problems, resulting in a procedure call tree, typically built from recursive procedure calls. Eventually, the sub-problems are small enough to be treated as indivisible units of work, called the leaf tasks. The parallelization strategy applied by Cilk is to split the call graph as few times as possible. Consequently, the call tree is split at the top, as many times as is necessary to split off a piece of work for every thread. Splitting off a piece of work is effected by work stealing.
finish execution. The organization of the spawn deque is however distinct from the stack used in sequential C/C++ programs in two ways. First, the spawn deque is organized by call stacks. Frames created for normal procedure calls are appended to the same call stack as their parent. Frames created on procedure spawns are inserted in a new call stack. Second, other workers may steal call stacks from the back of the spawn deque when they run out of work. It is not possible to add call stacks to the back of the spawn deque.
Cilk separately maintains a current call stack, which is the call stack currently operated on. The current call stack can never be stolen by another worker. The extended spawn deque consists of the current call stack and the spawn deque.
The oldest frame on an extended deque is always a full frame. All other frames are stack frames. Moreover, every stack frame belongs to a single extended deque. Frames that do not belong to any extended deque are necessarily full frames.
### B. Cilk Runtime Actions
The Cilk runtime manages the extended spawn deques when executing a procedure call and its return, when executing a procedure spawn and its return and when executing a sync statement. We only provide a very short description. Furthermore, we make abstraction of the organization of a spawn deque by call stacks. It complicates the discussion but bears no importance to the contributions of this paper. Full details can be found in [15].
When executing a procedure call or spawn, a new stack frame is allocated and pushed on the extended spawn deque. When executing a return from a call, the frame from which the return leaves is either a full frame or a stack frame. If it is a stack frame, then it is popped from the extended spawn deque and execution continues in its parent, which is guaranteed to belong to the same extended spawn deque. If the frame being left is a full frame, then it is necessarily the top frame on the extended deque. The frame is popped, leaving the extended deque empty. The worker continues by executing an unconditional steal of the parent frame.
When executing a return from a spawn, we again make the distinction between a stack frame and a full frame. If the frame where the return leaves from is a stack frame, then the frame is popped and execution resumes in the parent. If the frame where the return leaves from is a full frame, then it is again the top frame of the extended deque. The frame is popped, leaving the extended deque empty. The worker continues by executing a provably-good steal of the parent frame.
If a sync statement is executed inside a stack frame, then the runtime system does nothing. Otherwise, the current frame is a full frame and the scheduler will perform a counter-intuitive provably-good steal on the frame itself.
Cilk implements multiple stealing actions. Provably-good stealing and unconditional stealing try to continue the execution of the program in the most sensible way by continuing the execution on a frame that is a direct ancestor of the last frame executed. In contrast, random work stealing occurs when the worker has no good idea about what frame to execute next.
In random work stealing the worker randomly selects a victim worker to steal a call stack. Random selection of a victim is repeated until a victim is found with a non-empty deque. The oldest call stack is removed from the deque of the victim and every stack frame on it is converted to a full frame. For every such frame, the frame is added to its parent’s children list and the join counter of the parent frame is incremented. Similarly, the oldest frame in the extended spawn deque is converted to a full frame and its parent’s children list and join counter are updated. Finally, the runtime system executes a resume-full-frame action on the youngest frame that was stolen.
In a provably-good steal, if the join counter of the stolen frame is zero and no worker is working on the frame (it lives outside the spawn deques), the runtime system executes a resume-full-frame action on the frame. Otherwise, the runtime system performs random work stealing.
Unconditional steals occur when returning from a procedure call. In this case, the runtime system executes a resume-full-frame action on the frame. The resume-full-frame action on a frame pushes the frame on the worker’s extended deque and executes its continuation.
### C. Extensions for Task Graph Scheduling
Dependency-aware scheduling requires additional data structures to maintain a list of pending frames. Pending frames are created upon conditional spawns where the dependencies are not satisfied when the spawn statement executes. Pending frames live outside the spawn deques because there is no worker that is executing them. Therefore, pending frames are necessarily full frames.
To support dependency-aware scheduling, we add to each full frame a list of pending frames (Figure 6). We will explain later how this list is organized. Also, stack frames and full frames are extended to store the tickets that tasks acquire, wait on and release.³
³Stack frames need such information in order to acquire tickets when the stack frame is converted to a full frame.
frame, the spawned procedure is executed as if it were an unconditional spawn, because we track only dependencies within the scope of a single parent frame. In this case, the parent and its children are executing serially.
On a conditional procedure spawn executed from a full frame, we acquire all objects passed as an argument with a memory access mode. If all objects are ready, then a new stack frame is allocated and it is pushed on the worker’s extended spawn deque. The worker continues by executing the spawned procedure. Otherwise, if not all objects are ready, then a new pending frame is created and it is stored in the pending list of the parent. The worker continues with the execution of the parent, immediately after the spawn statement. Note that, during this process, the parent never becomes stealable by another worker because the child frames are not pushed on the spawn deque. Hereby, we can efficiently generate pending frames.
On a return from a conditional spawn that leaves a stack frame, we perform no additional actions because the spawn is executed as if it were an unconditional spawn. On a return from a conditional spawn that leaves a full frame, all acquired objects are released, potentially waking up pending frames. The extended spawn deque is now empty. The worker continues with a provably-good steal of the parent of the frame that was finished.
Actions for unconditional spawns and calls and the corresponding returns are unmodified from the original scheduler. The main difference in the scheduler is in the implementation of the stealing algorithms.
Random work stealing is modified in two ways. First, when a stack frame is converted to a full frame, we check if it has been created by a conditional spawn. If so, then all objects passed to arguments with dependency attributes are now acquired. These objects must be ready because we track only dependencies between the children of a common parent and in this case, the parent has only one child.
Second, we introduce a new steal situation. In particular, if random stealing selects a victim that has only one call stack on its extended deque, then we investigate the parent of the top frame on the victim’s deque. If this parent does not belong to any spawn deque (which means its execution is stuck in a \texttt{SYNC} statement), then we perform a steal-ready-child action on the parent frame. This effectively steals a sibling of the top frame of the victim’s deque.
Note that we only attempt a steal-ready-child action when the parent has reached a \texttt{SYNC} statement. In other cases, we prefer to retry random stealing until the worker is found whose extended deque contains the parent frame.\footnote{Or, random stealing identifies a victim that is executing a different branch of the computation. This could be a part of the program unrelated to out-of-order execution of tasks, or it could be a different out-of-order section.} The baseline random stealing actions are sufficient to move the parent frame to the thief’s extended deque. Continuing the execution of the parent in this case has the advantage that additional pending frames are created, increasing the scope of out-of-order execution. Moreover, it is possible that the parent frame executes spawn statements that are immediately executable.
Provably-good stealing of a frame still resumes the frame if the join counter is zero. Otherwise, if the frame (which is full) has pending children, we perform a retrieve-ready-child action on the frame.
The steal-ready-child action locates a ready frame in the pending list. Because we lazily maintain the pending list and because we do not separately maintain a list of ready frames, the steal-ready-child action must necessarily traverse the pending list in serial fashion. Note however that this action is performed only when the parent has already arrived at a sync statement. As such, all pending frames in this out-of-order execution phase have been generated and the phase is progressing towards its end. The list will thus shrink rapidly, speeding up the search. Moreover, once a ready task has been located this way, the scheduler will perform a retrieve-ready-child action as the result of returning from the spawned task on a full frame. This effectively bypasses the expensive steal-ready-child operation.
The retrieve-ready-child action locates a ready frame in the pending list. In contrast to the steal-ready-child action, this action is executed after an out-of-order task has finished execution. Because finishing tasks wake up other tasks, it is now possible to start the search in the pending list from a position that is near ready frames. We organize the pending list in such a way that this search completes successfully very quickly, as described shortly below.
If either of the actions above finds a ready child in the pending list, the runtime system then executes a resume-full-frame action on the frame. Note that objects have already been acquired when the pending frame was created. If no ready child can be located in the pending list, then the runtime system performs random work stealing. The unconditional steal action is unmodified. Note that the runtime system has been carefully designed such that serial execution of a procedure with conditional spawns proceeds without introducing important overhead. This is a consequence of applying the work-first principle to dependency-aware scheduling.
D. Organization of the Pending List
Frames in the pending list are organized by their depth in the task graph. The depth of a task $T$ is defined as the maximum length of a path of dependent tasks in the task graph that ends in $T$. Tracking the depth of a task is fairly simple while the information is sufficient to quickly retrieve ready tasks.
To track the depth of a task, we extend the metadata of each versioned object with a depth field. Each new version of an object is assigned depth zero. Also, full frames are extended with a depth field. The depth of objects and tasks is updated when objects are acquired. When a task acquires objects passed with access modes \texttt{input} and \texttt{in/out}, then the depth of the object is updated as the newly computed depth of the task, plus one. Using the depth of each task, we organize the pending list as a list of
TABLE I
<table>
<thead>
<tr>
<th>Benchmark</th>
<th>Input</th>
<th>Serial $T_S$</th>
<th>Cilk++ $T_3$</th>
<th>Unified $T_6$</th>
<th>$T_S/T_3$</th>
<th>$T_3/T_6$</th>
<th>$T_S/T_6$</th>
</tr>
</thead>
<tbody>
<tr>
<td>cholesky</td>
<td>3000x3000 sparse matrix, 16x16 blocks</td>
<td>10.10</td>
<td>10.09</td>
<td>0.65</td>
<td>15.53</td>
<td>8.85</td>
<td>0.65</td>
</tr>
<tr>
<td>fft</td>
<td>16M data points</td>
<td>11.97</td>
<td>14.09</td>
<td>0.96</td>
<td>12.46</td>
<td>12.34</td>
<td>0.90</td>
</tr>
<tr>
<td>heat</td>
<td>4Kx4K grid, 100 time steps</td>
<td>35.66</td>
<td>36.48</td>
<td>4.19</td>
<td>8.51</td>
<td>35.95</td>
<td>4.25</td>
</tr>
<tr>
<td>lu</td>
<td>4Kx4K matrix, 16x16 blocks</td>
<td>40.21</td>
<td>38.69</td>
<td>2.10</td>
<td>19.14</td>
<td>34.23</td>
<td>2.14</td>
</tr>
<tr>
<td>rectmul</td>
<td>4Kx4K dense matrices</td>
<td>64.64</td>
<td>65.61</td>
<td>3.44</td>
<td>18.79</td>
<td>69.55</td>
<td>3.92</td>
</tr>
<tr>
<td>spacemul</td>
<td>4Kx4K dense matrices</td>
<td>64.41</td>
<td>62.67</td>
<td>3.32</td>
<td>19.40</td>
<td>65.92</td>
<td>3.82</td>
</tr>
<tr>
<td>strassen</td>
<td>4Kx4K dense matrices</td>
<td>45.18</td>
<td>48.05</td>
<td>7.18</td>
<td>8.72</td>
<td>45.33</td>
<td>5.10</td>
</tr>
</tbody>
</table>
Fig. 7. Scalability graphs for the Cilk benchmarks.
V. EVALUATION
We implemented our unified scheduler as a C++0x library that provides spawn and sync (although with a function call syntax) as well as versioned objects and the memory access types. We use the type introspection facilities of C++0x to analyze the signature of spawned procedures for arguments with memory access modes. Our implementation of the cactus stack differs from that of Cilk-5 in the sense that spawned procedures actually execute on the cactus stack whereas Cilk-5 stores only the data on it that is live across a spawn statement. This is effected by direct manipulation of the stack pointer using inline assembly code. Our system knows only one code version per procedure whereas Cilk-5 uses a micro-scheduled version to resume frames after a steal and a nano-scheduled version that is optimized for serial execution [10].
We experimentally validate the performance of our unified scheduler by comparing its performance to the Cilk++ and SMPSS schedulers. Hereto, we use a set of benchmarks distributed with Cilk and a set of benchmarks distributed with SMPSS. In each case, algorithms are blocked and we retain the original block sizes. We scale up to the problem size to match the performance of current processors.
The experimentation machine contains 4 quad-core AMD
Opteron 8350 HE processors clocked at 2GHz and runs the Ubuntu 9.10 operating system. We compile our scheduler using gcc 4.6, we use the Cilk++ compiler which is based on gcc 4.2.4 and we use a custom compiler. We compute speedups relative to the serial elision of the benchmarks, which we compile using gcc 4.6. In each case, the optimization level is set to -O4. We use GotoBLAS2 (rev. 1.13) [17] for the implementation of BLAS kernels, when required by the benchmarks.
A. Comparison to Cilk++
Table I shows the Cilk benchmarks that we use in this study, together with some performance metrics. Figure 7 shows scalability graphs for the Cilk++ scheduler and our unified scheduler. The graphs show the speedup compared to the serial elision of the Cilk++ programs. These results show that the performance of our unified scheduler is quite comparable to Cilk++.
In the cases of rectmul and spacemul, the performance with the Cilk++ scheduler scales better than linear. We suspect that this is due to NUMA effects, as these benchmarks allocate and initialize memory in parallel, distributing it across multiple NUMA nodes. The other benchmarks perform initialize memory sequentially.
B. Comparison to SMPSS
Table II and Figure 8 show the performance of SMPSS and the unified scheduler on the SMPSS benchmarks. The unified scheduler gives comparable performance to SMPSS on 3 out of 5 benchmarks. It outperforms SMPSS on jacobi, which has very fine-grained tasks (32-word copy). Furthermore, SMPSS suffers performance anomalies when executing transpose with a high thread count. We will show next that SMPSS does not admit as fine-grain tasks as the unified scheduler, which explains the poor results for SMPSS above.
C. Fork/Join vs. Task-Dataflow Style
In this section we compare the fork/join style of programming to the task-dataflow style. We have created a matrix multiplication benchmark in both styles based on the Cilk rectmul benchmark and using BLAS dgemm in the leaf tasks.
Figure 9 shows performance measurements for a varying number of threads and for block sizes of 16x16, 32x32 and 64x64. The task-dataflow style is very sensitive to the block size. The performance of SMPSS degrades for fine-grain leaf tasks on 16x16 blocks. Performance is bad even on a single thread. This is caused by a very high constant overhead in the runtime. There are also performance anomalies on 32x32 blocks. Performance is, however, good for a 64x64 block size.
The unified scheduler also has performance deficiencies on 16x16 blocks but it behaves much better than SMPSS. Note that the single-thread performance on 16x16 blocks is comparable to the performance of Cilk++ on 16x16 blocks. In other words, performance overhead related to dependency tracking is successfully avoided by the design of our scheduler.
Matrix multiplication in the task-dataflow style reaches
implement the final serial stage, while the unified version uses task dependencies. Both versions obtain comparable speedup. The hmmer code is taken from the SPEC CPU2006 integer suite. In this case, the last pipeline stage is a reduction operation. We show two versions on the unified scheduler: one where the last pipeline stage is implemented by means of a lock and one where it is implemented by task ordering. The latter case is more restrictive in terms of parallelism, but the results show that, for two or more threads, both versions obtain comparable results. The Cilk++ version also uses locks but it is somewhat slower, a consequence of building on an older version of the gcc compiler which performs less aggressive optimization of the inner loops of the benchmark.
VI. RELATED WORK
Several task dependency aware languages and schedulers have been described in the recent literature, e.g. SuperMatrix [4], StarPU [5], SMPSS [3], CellSS [20]. All of them detect inter-task dependencies at runtime by comparing memory accesses made by tasks. Similar to OpenCL [21], StarPU allows name-based dependency tracking between tasks. This allows the programmer to explicitly state dependencies between tasks, irrespective of their memory side-effects.
To the best of our knowledge, all schedulers cited above explicitly maintain the task graph. While this approach is sensible, it turns out that it is expensive in terms of locking tasks. In contrast, we designed a scheduler that avoids such overheads, although we have to pay a small price when recovering ready tasks.
SMPSS looks up object metadata by means of a hash table that is indexed with the starting address of an object [3]. As such, metadata lookup and renaming are completely invisible to the programmer, but the hash table lookup implies runtime overhead. StarPU, on the other hand, requires that the programmer registers the objects used in dependency tracking [5]. The runtime system returns a descriptor that contains the object metadata. Tasks must reference this descriptor to enable dependency tracking. This system removes runtime overhead related to looking up object metadata. Our system has similar benefits to StarPU as the metadata always resides with the object, although our language provides a better abstraction.
Some systems require that the complete memory footprints of tasks are specified. This facilitates off-loading tasks on accelerator processors such as the Cell processor [20], [22] and GPUs [23]. In [2], tasks are distributed across nodes that communicate by means of MPI.
StarPU schedules tasks based on the predicted execution time of tasks [23]. Execution time models are calibrated by fitting measurement data to polynomial equations where the free variables describe problem sizes (e.g. matrix dimension). The scheduler assigns tasks to the processor that is predicted to complete the task earliest.
Nabbit [7] is a library that schedules the execution of task graphs using the Cilk++ language. The paper also provides upper bounds on the parallel execution time of task graphs with the Nabbit scheduler.
Concurrent Collections (CnC) is a programming model that allows mixing task and data parallelism [24]. It is an implicitly parallel and deterministic programming model where the user specifies high-level operations along with semantic ordering constraints. Together, these define a CnC graph. CnC does not specify a scheduler by itself, but can be targeted to one of many task dataflow schedulers.
The SMPSS language implements reductions with reduction in/out arguments. The scheduler allows to simultaneously execute tasks with dependencies arising from reduction arguments. It is assumed that such tasks lock the shared variable when it is updated. Cilk++ provides reducer hyperobjects to implement reductions [15]. This construct can also be used in the context of our scheduler, provided that no task dependencies are specified on the reduction variable.
VII. CONCLUSION
This paper presented a language and a scheduler that supports both fork/join parallel programming and task dataflow parallel programming. The scheduler extends work-first scheduling with task dependency-aware scheduling in a way that introduces minimal overhead. We demonstrate that our scheduler is as efficient as the Cilk++ scheduler on fork/join programs and that it is more efficient than SMPSS, a dependency-aware scheduler. In particular, our scheduler is more well-behaved on small task granularities, while SMPSS shows severe performance anomalies.
The language allows to structure algorithms either in fork/join style or in task-dataflow style, depending on what is more appropriate for the algorithm at hand. As such, the overhead of dependency tracking must not be paid on truly parallel algorithms.
In future work, we plan to extend ticked-based dependency tracking to accesses to partially overlapping memory regions. This is still an open problem in dependency-aware scheduling. Such a system is necessary to mix the fork/join and dependency-aware styles in a single algorithm and may allow higher performance and/or better performance portability.
ACKNOWLEDGMENT
Hans Vandierendonck is a Postdoctoral Fellow of the Research Foundation – Flanders (FWO). This research was performed at FORTH-ICS sponsored by a Travel Grant of the FWO. The research leading to these results has received funding from the European Community’s Seventh Framework Programme [FP7/2007-2013] under the ENCORE Project (http://www.encore-project.eu), grant agreement n° 248647, the TEXT Project (http://www.project-text.eu/), grant agreement n° 261580, and under the European Network of Excellence on High Performance and Embedded Architecture and Compilation (HiPEAC, http://www.hipeac.net), grant agreement n° 217068.
REFERENCES
|
{"Source-Url": "http://pure.qub.ac.uk/portal/files/5240382/paper.pdf", "len_cl100k_base": 10151, "olmocr-version": "0.1.53", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 40345, "total-output-tokens": 12386, "length": "2e13", "weborganizer": {"__label__adult": 0.0003647804260253906, "__label__art_design": 0.00040030479431152344, "__label__crime_law": 0.0003902912139892578, "__label__education_jobs": 0.0005435943603515625, "__label__entertainment": 9.387731552124023e-05, "__label__fashion_beauty": 0.0001704692840576172, "__label__finance_business": 0.00024116039276123047, "__label__food_dining": 0.0003578662872314453, "__label__games": 0.0007028579711914062, "__label__hardware": 0.002445220947265625, "__label__health": 0.0005373954772949219, "__label__history": 0.00032806396484375, "__label__home_hobbies": 0.00012254714965820312, "__label__industrial": 0.0006890296936035156, "__label__literature": 0.00020992755889892575, "__label__politics": 0.00035262107849121094, "__label__religion": 0.0006117820739746094, "__label__science_tech": 0.08782958984375, "__label__social_life": 8.541345596313477e-05, "__label__software": 0.008392333984375, "__label__software_dev": 0.8935546875, "__label__sports_fitness": 0.0003693103790283203, "__label__transportation": 0.0008473396301269531, "__label__travel": 0.00023996829986572263}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 52406, 0.02644]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 52406, 0.17089]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 52406, 0.90357]], "google_gemma-3-12b-it_contains_pii": [[0, 1756, false], [1756, 7147, null], [7147, 12856, null], [12856, 18872, null], [18872, 22874, null], [22874, 25856, null], [25856, 31071, null], [31071, 37398, null], [37398, 39620, null], [39620, 42495, null], [42495, 45607, null], [45607, 52406, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1756, true], [1756, 7147, null], [7147, 12856, null], [12856, 18872, null], [18872, 22874, null], [22874, 25856, null], [25856, 31071, null], [31071, 37398, null], [37398, 39620, null], [39620, 42495, null], [42495, 45607, null], [45607, 52406, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 52406, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 52406, null]], "pdf_page_numbers": [[0, 1756, 1], [1756, 7147, 2], [7147, 12856, 3], [12856, 18872, 4], [18872, 22874, 5], [22874, 25856, 6], [25856, 31071, 7], [31071, 37398, 8], [37398, 39620, 9], [39620, 42495, 10], [42495, 45607, 11], [45607, 52406, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 52406, 0.04433]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
40e44fd3f3250e6686bf25955f0f22654f8899cd
|
PROBLEMS OF CONCURRENT PROCESSING
Apart from interprocess communication (which we assume to be expected), processes can interact with each other in many ways. We have seen how the hardware, in conjunction with the operating system, prevents unwanted interference in principle. All requests for resources, including more memory, printers, files etc. are mediated by the operating system. This makes it impossible under most circumstances for one process to use a resource which is currently allocated to another process.
We have also seen, though, that in other situations processes want to share a resource; sharing memory is a prime example. In this case the processes themselves must ensure that the shared resource is used in a sensible manner, without data from one process getting jumbled with data from another process. To coordinate their actions, the processes must communicate with each other. Well, that's all right – they can communicate through the shared resource. Of course, to achieve reliable communication, the processes must ensure that the shared resource is used in a sensible manner...... We have been here before. It is clear that they need help from the operating system. But how exactly does it work?
These two problems are essentially the same. A solution to the first, restricting access to a resource, would enable processes sharing a resource to coordinate their activity on the shared resource. In effect, access to the shared resource is voluntarily restricted in the interests of reliable communication.
MUTUAL EXCLUSION
In the chapter PROCESSES IN COMPUTERS, we noticed that we would require some means of ensuring that resources accessible to more than one process can be temporarily reserved for a single process so that it can perform some operation without any danger of the resource being changed while the operation was in progress.
To give a simple illustration, suppose there is a shared memory buffer which receives characters from one process which manages an input device, and from which characters are taken by a second process which must interpret them. Every time an end-of-line character is received, the interpreting process is activated, and the input process begins again at the beginning of the buffer. It is clearly necessary to ensure that the input process cannot use the buffer while the interpreting process is reading it, for if that were not so the input process could overwrite the characters in the buffer before the interpreter had used them. (It is tempting to rely on the slowness of typing and the high speed of the interpreting process to make the system work without special provision for safety – which is all very well, until someone redirects the input and takes it from a disc file. It never works.) Similarly, we don't really want the interpreter process to use the contents of the buffer until the input process has reached the end of the line.
This sort of relationship between processes which share resources is very common, and we must be able to restrict access to the shared entity to one process for an arbitrary period of time. This restriction of access is known as mutual exclusion. Only one process at a time is allowed to use the resource (so it's also called exclusive access).
Locks.
We will start by looking at what happens within the kernel itself when a resource is about to be allocated to a process. Something like the following code might be used internally by the kernel to ensure restricted access to a resource:
```c
if this resource is in use then
put the requesting process to sleep; { it will eventually wake up here }
mark the resource as being used; { preventing other processes from getting the resource }
```
This code manages transitions between the *runnable* and *waiting* states in the process state diagram. It provides ( or attempts to provide ) the mutual exclusion which we required, and is known as a *lock*. In the simplest case the resource is marked as in use by setting a specific value in a variable, known as a *lock variable*, corresponding to the resource. ( Other housekeeping operations are also likely to be necessary, but here we shall restrict our discussion to the communication issues. ) Every resource which is protected by this method must have a lock variable ( which might be a single lock bit ).
We see that restricting access to any resource, hardware or software, can be done the same way. Here's the same code presented rather more formally in terms of a variable `X.lock` which acts as the lock variable for a resource `X`. We require code for two operations, which we shall call `lock` and `unlock`.
```plaintext
lock( X ) :
if X.lock
then begin
link the PCB to a queue of processes waiting for X;
suspend the current process;
end
else X.lock := true;
unlock( X ) :
if there is a process waiting for X
then make it runnable
else X.lock := false;
```
Though there's a little more to say about it yet, it's clear that this code is doing what we need in order to enforce the mutual exclusion we require. Once a process has the resource, no other process can take it away.
The code outlined above means well – but, as we know from our daily life, meaning well is not always enough. In practice, we have to guard against possible ill effects on our good intentions from external sources, and it is the same with the operating system. The code is, by definition, running in a multiprogramming system, for if there were no other process active, we wouldn't need to worry about cooperation. There are therefore other things going on, some of which are likely to involve interrupts. Interrupts might suspend this process, and, as we have seen, it might not be resumed immediately – so circumstances might change without the process being aware of it. This can cause trouble.
Let us assume that the resource required is not currently being used when the process ( call it A ) makes its system call to request the resource. The system starts work on the code, and first finds that the resource is not in use, so doesn't suspend the process. Just before it gets as far as marking the resource as being used, an interrupt occurs. This interrupt does whatever it has to do, but then starts a different process – say, B.
B also requests the same resource by similarly making a system call. The system starts work on the code, and first finds that the resource is not in use, so doesn't suspend B. This time, though, no interrupt occurs, so the request succeeds, the resource is marked as being used, and B carries on, confident that all is well for it to use the resource. At some later time, while B is still using the resource, some other pattern of interrupts starts A again, and it carries on, as it should, from exactly where it left off. Because it has already checked that the resource is not being used, it also returns successfully from the system call believing that it can safely proceed to use the resource. In pictures :
<table>
<thead>
<tr>
<th>Process A</th>
<th>System, working for A</th>
<th>System, working for B</th>
<th>Process B</th>
</tr>
</thead>
<tbody>
<tr>
<td>lock( X ) :</td>
<td>if X.lock</td>
<td></td>
<td></td>
</tr>
<tr>
<td></td>
<td>then ( not locked, so doesn't happen )</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Support for execution: page 3.
```plaintext
<<<<<< Context switch <<<<<<
else
X.lock := true;
continues ...
```
In other words the system did not ensure exclusive access to the resource.
The problem is the possibility of a process switch between testing to see if the resource is in use and marking the resource as used. There are many solutions to this problem. The simplest is to notice that the criminal in the story is the interrupt, so if we prevent interrupts the problem is solved. Here's a revised version of the `lock` code (`unlock` works, so doesn't need revision):
```plaintext
lock( X ) :
disable interrupts;
if X.lock
then begin
link the PCB to a queue of processes waiting for X;
suspend the current process;
{ – and don't forget to turn on the
interrupts again when starting a new process ! }
end
else X.lock := true;
permit interrupts;
```
This works provided that the story is sufficiently realistic – which it is, provided that there is only one processor in the system. And that, in turn, works because the hardware absolutely and immutably guarantees mutual exclusion so far as access to the processor is concerned. We are relying on one sort of mutual exclusion to implement another.
But that no longer works if there are several processors, each of which can address the lock variable. (You have to be quite careful in this section not to confuse `processes` – the operating system abstraction, with `processors` – the hardware entities which do the work. The first sentence of this paragraph was referring to multiple processors.) If that's so, disabling interrupts does not stop a process currently running on another processor from reading the variable.
Even in single-processor systems it is not generally a good idea to prevent interrupts if it can be avoided. The longer the time for which interrupts are switched off, the worse the service for any operation which needs interrupts, and the greater the chance that important events might be missed. There is also a greater chance that the interrupts might not get switched on again, which can be catastrophic. Some other method is needed.
The accept-interrupts-but-don't-switch-immediately solution.
One widely used solution to this problem is to postpone context switching. Whenever the system is in supervisor mode no forced context switches are allowed. The diagram shows that this will certainly work for the example given. This is the solution used by Unix. A clock interrupt can be used to mark the time at which the current process has used up its allotted ration of time in execution; if such an interrupt occurs while the processor is in system mode, the system sets a flag to record the occurrence and then continues with the
supervisor call. When the system mode operation is complete and it is time to return to user mode the flag is checked; if it is set, the current process is returned to a waiting state and the next process resumed.
Notice that interrupts are not disabled. In some cases the computer can be spending quite a lot of time in kernel mode and many sources of interrupts have to be serviced as soon as possible after generating the interrupt.
In this way our operating system can ensure mutual exclusion over resources using code very similar to that outlined above.
This solution is still only good for a single processor system. There are in fact purely software solutions to this problem even for multiprocessor systems. They are all based on the fact that only one processor can access the contents of a particular memory location at a time – once again, we are using mutual exclusion enforced by the hardware to implement mutual exclusion more generally.
Test-and-set instructions.
Most processors provide indivisible instructions, i.e. instructions which are guaranteed to run to completion without being broken into either by an interrupt or by a memory access from another processor. All we need to make our lock work is an instruction like this which allows the testing of a variable and changing it to a given value ensuring no other access to the variable until the changed value has been successfully stored.
There are a variety of such instructions; we will look at one example, commonly called the test-and-set instruction.
It works like this:
```
testandset( a, b )
```
The value of b is copied into a and b (whatever its previous value) is set to some standard value, which we shall call true. Using this instruction we can implement secure mutual exclusion.
If we want to use resource X and the lock variable for this resource is known as X.lock:
```
repeat
testandset( busy, X.lock )
until not busy;
```
If you are unaccustomed to concurrent processing you should wonder how the Boolean variable busy can ever become false if the first time through the loop it is true, since testandset() always makes X.lock true.
The answer is that if the value of busy is true the first time through the loop, another process has exclusive access to the resource X. In this case there will come a time when it releases its hold on the resource by setting X.lock to false – so the full code for our operation must be something like this:
```
repeat
testandset( busy, X.lock )
until not busy;
...... Do things with X ....
X.lock := false;
```
This solution definitely works, but it is still not the solution we want to use because it causes unfriendly behaviour.
First of all, any process requesting an unavailable resource will "busy wait" : the process keeps executing the repeat loop until the other process releases the resource, or until it is displaced from the processor by an interrupt. In a machine with a single processor, this is almost always a complete waste of time, since the process which currently has control of the resource cannot be running simultaneously. If no other interrupt can happen, it stops the system completely. In a multiprocessor the situation is not so bad, for other processes can continue using other processors, but it is still something we want to avoid.
Locks which work on the principle of busy waiting are known as *spin locks*.
The other major problem with this solution is that there is no way of being fair to processes requesting the resource. Whichever process happens to run straight after the resource is released will pick it up, regardless of the fact that another process might have been waiting a long time for the resource.
To solve these problems we must be able to suspend a process if the resource it requires is not immediately available, and wake it up when it is deemed fair that it should receive the resource. We will postpone a closer look at this until we have studied semaphores.
**Semaphores.**
Sometimes we don't require a specific resource, but are happy to have any device which will do the job we want done – for example, if you want to print something it might not matter which printer you use. In this case it does not make sense to insist that a process wait for a particular printer. By allocating to the process the first acceptable printer which becomes available we will improve the service to the user. Compare queueing in a bank : before the days of the single-queue-to-multiple-tellers all the other queues always moved faster than the queue which you joined. We hasten to add that, apart from this rather subjective evidence, there are good sound reasons from queueing theory to believe that the best overall service is indeed given by multiple servers with a single queue.
This was the original motivation behind *semaphores* introduced by Edsger Dijkstra in 1965. Dijkstra defined a semaphore as an integer counter which is acted on by the indivisible operations P and V, and an initialization. The indivisibility means that only one process can use the semaphore at a time.
Take S as our example semaphore. So far as counting goes, it behaves just like an integer variable, but it has the peculiar characteristic that it never becomes negative. V is an increment operation :
\[
V( S ) : \\
S := S + 1;
\]
P is a decrement operator which always reduces the value of the counter by 1. What if the counter is zero when P is applied ? If the counter must not become negative, but P must always reduce its value, then P will have to wait until the current value of S is at least 1.
\[
P( S ) : \\
wait \text{ until } S > 0; \\
S := S - 1;
\]
This is all very well, but what does it have to do with resource allocation and mutual exclusion ? That's easy : think of the integer counter as recording the number of printers ( or whatever ) that are available. ( That should make it clear why the counter can't be negative. ) Whenever a printer is allocated to a process, the count is decremented; and whenever a process finishes with a printer and returns it to the operating system, the count is incremented. That makes it clear what the initial value of the counter must be : it must be set to the number of resources which that semaphore is controlling. Clearly, if the value is to retain this significance, we can't just execute P and V indiscriminately; instead,
every execution of P (corresponding to the allocation of a resource) must normally be
followed eventually by a single execution of V (when the resource is released). The
normal pattern is therefore:
P( Printers );
............ do things with the allocated printer.
V( Printers );
Apart from that, P and V should be left strictly alone, except in two special circumstances,
both directly related to the counting function. It is sensible to use V when a resource is
added to the system, either when the system is started or at any subsequent time. Using V
once during each addition makes sure that the counter ends up with the correct value.
Similarly, it is reasonable to use P when a resource is removed from the system (for
maintenance or repair, for example).
You might notice that the little code fragment above is quite like another little code
fragment which we presented while discussing locks. That isn't accidental; consider a
semaphore controlling a resource of just one item. Such a semaphore can only take on
values of 0 and 1 (so it is called a binary semaphore) and it is exactly equivalent to a
lock. Non-binary semaphores are sometimes called counting semaphores or general
semaphores to distinguish them, but as there's no real distinction that isn't particularly
helpful.
There is no theoretical difference between a lock and
a semaphore, but there's a practical difference: it's
easier to implement test-and-set as an atomic
hardware instruction than increment-and-return-the-
value. That's because the new value to be stored in
the variable for a lock is independent of the original
value, while for a semaphore there is (depending on
the sort of semaphore you want) an arithmetic or
logical function to perform. Particularly with small
processors, it's therefore often easier to implement
locks than counting semaphores.
The theoretical identity depends on both locks and semaphores being correctly
implemented; there is no guarantee that an incorrectly implemented lock will be equivalent
to a semaphore, binary or not! In practice, the main difficulty in managing a correct
implementation of a semaphore is just the same as that for a lock: the P and V operations
must—by definition—be indivisible, which leads us straight back to the discussion
about indivisible locks. This time, there is no easy answer corresponding to test-and-set,
because the values of the semaphore variable are not restricted to true and false, so if we
get the implementation wrong the result might well be that the semaphore variable ends up
with an impossible value, implying that there are too many or too few resources.
And this is just a bit embarrassing, because the obvious answer is to use a lock to
implement the semaphore. For example:
P( S ) :
mine := false;
repeat
lock( S );
if S > 0
then begin
S := S - 1;
mine := true;
end;
unlock( S );
until mine;
That will work – provided that we can implement the lock properly. Everything depends on locks; once again, we're unable to implement mutual exclusion unless we have some already.
You might reasonably wonder why the two semaphore operations were given such helpful names as P and V. The reason is that Edsger Dijkstra is Dutch, so, sensibly enough, used Dutch words in his original definition. We do not speak Dutch, but, according to one source, V stood for "verhogen" – to increment – and P stood for "proberen" – to test. Without in any way disparaging the Dutch language, we find these difficult to remember, and you will recall that we believe that instructions should be easy to remember and understand. In the interests of consistency, then, ( and for the sake of remembering which one is which ) we shall henceforth call them signal ( as in "signal that a resource is available" ) and wait ( as in "wait, if necessary, until a resource is available" ). Though not the only alternative names ( some texts call them up and down for reasons which might – or might not – become apparent later ), wait and signal are perhaps the most common.
Now we return to the problem of busy waiting. In Dijkstra's original description of semaphores there was no mention of what happens at the word "wait" in the P ( wait ) operation. What can happen ? So far, all we've seen is the busy wait, but we didn't like that much. The obvious alternative is perhaps to suspend the process for a while, then let it have another try, and keep going until it's lucky. That's an improvement so far as processor use is concerned, but not the way we'd like to run our operating system.
For such reasons, it has become standard to associate a queue with each semaphore. This is a much more disciplined way of controlling the resource allocation, and we have much more control over the rules – so, depending on how we organise the queue, we can offer a first-come-first-served service ( simple queue ), or take account of priorities ( with a priority queue ).
And so signal and wait become :
\[
\text{signal( S ) :} \\
\quad \text{if anything waiting on S then} \\
\quad \quad \text{start the first process on the S queue} \\
\quad \text{else} \\
\quad \quad S := S + 1;
\]
\[
\text{wait( S ) :} \\
\quad \text{if } S < 1 \text{ then} \\
\quad \quad \text{put this process on the S queue} \quad \text{(this means the process is stopped)} \\
\quad \text{else} \\
\quad \quad S := S - 1;
\]
In this implementation the value of S always tells us how many of the resources are currently available. Also note how much more work the signal operation has to do compared to the simple V operation of Dijkstra.
An even more useful implementation is :
\[
\text{signal( S ) :} \\
\quad S := S + 1; \\
\quad \text{if } S < 1 \text{ then} \\
\quad \quad \text{start the first process on the S queue;}
\]
\[
\text{wait( S ) :} \\
\quad S := S - 1; \\
\quad \text{if } S < 0 \text{ then} \\
\quad \quad \text{put this process on the S queue;}
\]
Under this definition the semaphore $S$ indicates two different things. If $S$ is negative it means that there are $\text{abs}(S)$ processes waiting in the queue. If $S$ is not negative it means that there are $S$ of the resources available and that no processes are waiting in the queue.
The only reason semaphores work is that both semaphore operations are indivisible or atomic i.e. from the programmer’s point of view a signal or a wait runs to completion before any other process can access the semaphore. It is possible to provide atomic semaphore instructions in hardware as with the testandset instructions, however it is easy to construct secure signal and wait routines by making entry to these mutually exclusive via the testandset lock we saw earlier.
Apart from the mutual exclusion aspect of semaphores they also provide a convenient method for synchronizing processes, as we mentioned when discussing interprocess communication. In this case one of the processes can signal the other which is waiting.
As an example of this we will look at the classic producer/consumer relationship. In which one process produces results which the other consumes or uses. ( Mutual exclusion is still here. The resource is the buffer which carries the result from the producer to the consumer.)
```pascal
program producerconsumerrelationship;
var
numberdeposited : semaphore;
numberreceived : semaphore;
numberbuffer : integer;
procedure producerprocess;
var
nextresult : integer;
begin
while true do begin
calculate( nextresult );
wait( numberreceived );
numberbuffer := nextresult;
signal( numberdeposited )
end
end;
procedure consumerprocess;
var
nextresult : integer;
begin
while true do begin
wait( numberdeposited );
nextresult := numberbuffer;
signal( numberreceived );
use( nextresult )
end
end;
begin
semaphoreinitialize( numberdeposited, 0 );
semaphoreinitialize( numberreceived, 1 );
cobegin
producerprocess;
consumerprocess
coend
end.
```
The cobegin ... coend pseudo-Pascal keywords indicate that all statements between them can theoretically run in parallel. In a multiprocessor both the producerprocess and
consumerprocess could actually run simultaneously, with the synchronization being handled by the semaphores.
One nice thing about this implementation of the producer/consumer problem is that it is possible to have several producer and consumer processes without changing our code (except by adding more calls inside the cobegin ... coend). This might be desirable, for example, if the consumption of the next result takes a lot longer than the production. One producer could keep many consumers happy.
In case you didn't notice this is a user level example. The operating system would provide the semaphore routines as system calls. Semaphores can also be used within the kernel to provide the mutual exclusion that operating system resources require.
Flashback to busy-waiting.
At the end of our discussion on the test-and-set instruction, we promised that the busy-waiting problems would be cleared up. This is it.
All is well if we use a simple lock to guarantee the indivisibility of the semaphore operations. At first sight, this is an outrageous statement: we've seen that the same problems turn up in both cases, so how can one cure the other? But contemplate these two points: first, busy-waiting is only a problem when the resource concerned isn't available (obvious); and, second, access to semaphores is almost always possible, because — unlike the resources which they protect — they are only actively in use momentarily during the allocation and release operations. There is still no universally excellent solution — but there are satisfactory solutions for both cases:
- If there is only one processor, then we can implement the lock by preventing context switches while the semaphore operation is in progress — that is, by ensuring exclusive access to the processor.
- If there are several processors, then we cannot prevent multiple access by that method unless we also stop the other processors, which is bad in principle, and gets worse as the number of processors increases (more processes mean both more interrupts and more disruption per interrupt). A solution which only affects the requesting process is therefore necessary, so we have to fall back on a spin lock. But this is no longer such a dreadful thing; it doesn't stop all processing, because the other processors carry on, and we know that it won't be necessary for very long.
The trick works because we now have a two-level system: the semaphore controls a long-term lock, but can itself be implemented with a short-term lock, for which we can use methods which are not acceptable for long-term control.
Event counters.
Another method for guaranteeing results with concurrent programs, this time without using mutual exclusion, is the event counter, developed by Reed and Kanodia in 1977. The operations which work on event counters are advance, read, and await. Event counters always start at zero.
```plaintext
advance( E ) :
E := E + 1;
if any processes awaiting the new value of E then
start these processes;
read( E ) :
return the value of E;
await( E, count ) :
if E < count then
```
put this process to sleep, awaiting \( E = \text{count} \);
Because of the fact that event counters don't require mutual exclusion in order to work you find that the processes themselves have to put a bit more effort in as can be seen in the following producer/consumer solution, modelled on the semaphore solution.
```
program producerconsumerrelationship;
var
numberdeposited : eventcount;
numberreceived : eventcount;
numberbuffer : integer;
procedure producerprocess;
var
i : integer;
nextresult : integer;
begin
i := 0;
while true do begin
calculate( nextresult );
i := i + 1;
await( numberreceived, i - 1 );
numberbuffer := nextresult;
advance( numberdeposited )
end
end;
procedure consumerprocess;
var
i : integer;
nextresult : integer;
begin
i := 0;
while true do begin
i := i + 1;
await( numberdeposited, i );
nextresult := numberbuffer;
advance( numberreceived );
use( nextresult )
end
end;
begin
cobegin
producerprocess;
consumerprocess
coend
end.
```
Event counters alone are not powerful enough to provide generalised mutual exclusion to a resource. The difficulty lies in determining what number the counter should wait for. We saw a way around this in the producer/consumer problem. There is a way to make event counters work as providers of mutual exclusion by adding a new concept of tickets. This is similar to being given a ticket in a shop and waiting for your turn to come up.
Messages.
Semaphores and event counters are rather low level ways of providing synchronization. Even though it might not be obvious at first it is also possible to coordinate processes with messages. A simple send and receive protocol is adequate.
```
send( topprocess, message );
```
sends the message to the process \texttt{toprocess}. \texttt{toprocess} might be any process from a group of processes. In other words the receiving process doesn't have to be unique.
\begin{verbatim}
receive( fromprocess, messagebuffer );
\end{verbatim}
waits for a message from \texttt{fromprocess} and stores the message in \texttt{messagebuffer}. \texttt{fromprocess} may be any process from a group of processes.
Using this system our producer/consumer solution becomes:
\begin{verbatim}
procedure producerprocess;
var
nextresult : integer;
begin
while true do begin
calculate( nextresult );
send( consumerprocess, nextresult );
end;
end;
procedure consumerprocess;
var
nextresult : integer;
begin
while true do begin
receive( producerprocess, nextresult );
use( nextresult )
end;
end;
\end{verbatim}
followed by the same main program as before.
This example seems to show that message passing is simpler than the other methods we have seen. However you need to realize that the producer/consumer problem is concerned with passing information from one process to another – which is exactly what message passing deals with.
It is clear that messages must not be lost to make this method work. As we have seen either the send must block until the message has been received or the message system must buffer messages until they are received. This leads us to the conclusion that although programming a solution with message passing is simpler than other solutions, there is more going on behind the scenes.
Compared to semaphores and event counters the message method of providing mutual exclusion has advantages and disadvantages. The most important advantage of using messages is that they can be used easily on distributed systems (i.e. over a network). The major disadvantage is the speed (or lack of it) with which processes can communicate.
Being careful.
Before we carry on we need to talk about the difficulty of correct concurrent programming. The producer/consumer example is one of the simplest and yet even experienced programmers can get it wrong. Here is a semaphore solution to the problem which was published in a very popular textbook (here altered slightly to fit our conventions).
program producerconsumerrelationship;
var exclusiveaccess : semaphore;
numberdeposited : semaphore;
numberbuffer : integer;
procedure producerprocess;
var nextresult : integer;
begin
while true do begin
calculate( nextresult );
wait( exclusiveaccess );
numberbuffer := nextresult;
signal( exclusiveaccess );
signal( numberdeposited )
end;
end;
procedure consumerprocess;
var nextresult : integer;
begin
while true do begin
wait( numberdeposited );
wait( exclusiveaccess );
nextresult := numberbuffer;
signal( exclusiveaccess );
use( nextresult )
end;
end;
begin
semaphoreinitialize( exclusiveaccess, 1 );
semaphoreinitialize( numberdeposited, 0 );
cobegin
producerprocess;
consumerprocess
coend
end.
This solution is actually modelled on one in Dijkstra's original semaphore paper. However this implementation of it is incorrect. The author makes sure that the consumer process will always wait until a result has been produced, however there is nothing to prevent the producer process getting out of step. It only has to wait for exclusive access to the buffer. This means there is nothing to stop it going around and around several times if the consumer process is held up for any reason after releasing the buffer. If this happens previously calculated results will be overwritten before they are consumed.
By the way, Dijkstra's solution didn't have this problem; his buffer was infinitely long and could accept results without overwriting.
So we see how easy it is to make a mistake when we try to coordinate activity between two or more processes. Since it is the operating system's job to make using the machine easier, wouldn't it be appropriate to provide assistance in this situation as well?
Forgetting to unlock.
The most common danger is forgetting to unlock something which has been locked, thereby preventing any other process ever accessing that resource. This is especially easy to do when a section of code can be left in several ways.
Monitors.
A monitor is a mutual exclusion construct which can solve this problem along with some other security problems. Rather than giving each process the ability to control their own resource access (which semaphores and event counters do), a monitor is an external construct which completely controls use of a resource. There is no way to use a resource except by asking the monitor.
The word "monitor" is thoroughly overloaded in the vocabulary of computing. There are at least four quite different meanings for the word. We have already mentioned one of them; when looking at the history of operating systems, we discussed monitor systems.
The meaning in the context of this chapter is to do with concurrency. The appropriate meaning should always be obvious from the context – but take this as a warning to be careful!
In a monitor not only are there shared resources, there are also shared routines to deal with those resources. In fact the only way the resources can be manipulated is with the routines provided by the monitor.
Not only does the monitor contain variables and procedures; it also restricts access to one process at a time. In other words, whenever a process wants to execute a shared routine the monitor checks to see if any other process is currently executing code inside the monitor. If there is such a process, the calling process has to wait until the process currently inside the monitor has left it. In fact a number of processes might be trying to access monitor routines at the same time. These processes will be stored in a queue awaiting service.
It is useful to think of the monitor as having one guarded entry point. This entry point queues processes and sends them to their required routines when it is their turn to enter the monitor.
Since only one process is allowed in the monitor at any time and the monitor is the only way to access the shared resources we are guaranteed mutual exclusion.
Sometimes it is necessary for a process to wait whilst still possessing mutual exclusion over a resource. This is a situation fraught with danger as we will see when discussing deadlock. In a monitor it does not make sense to stop all access to the monitor if the process currently running needs to wait for something, especially if that something can only be provided by another process running in the monitor. The solution is to have special variables known as condition variables.
There are wait and signal calls on condition variables just as we had with semaphores. The difference is that they are much simpler to implement, in that they are inside a protected area and there is no count associated with the variable.
A wait on a condition variable will stop the currently running process. In this case another process is then allowed to enter the monitor. The process will continue only after another process calls signal on the same condition variable. This means the associated resource is available. If no processes are waiting when there is a signal, nothing happens, and the signal is lost.
If more than one process is waiting on the same condition variable, the monitor must decide which one is to run.
There is a design problem which must be dealt with when talking about condition variables. When a signal indicates that a waiting process can proceed we will have two processes running in the monitor. One way of dealing with this is to hold all the signals until the currently running process either leaves the monitor or waits for some other condition. In this case the running process must ensure it does not change the condition
which it has signalled. A better solution halts the signalling process and restarts the
signalled one. The simplest method is to force signalling processes to leave the monitor.
In this way a signal is the last thing a process can do in the monitor.
The only way monitors can be used is if they are part of the language. We will see a
monitor solution to a classic problem later.
The properties of monitors are those of their components and the interactions
between them. Monitors encapsulate all or most of the items listed in this table:
<table>
<thead>
<tr>
<th>Resource</th>
<th>the thing being protected or handed out.</th>
</tr>
</thead>
<tbody>
<tr>
<td>Local data</td>
<td>only accessible from inside the monitor.</td>
</tr>
</tbody>
</table>
| Scheduler | the monitor might have to make decisions about which
| process should enter next. The scheduler might ensure
| processes aren't postponed indefinitely. |
| Queues | when a process is inside the monitor and has to wait for
| something it is put to sleep on one of these. There is a
| separate queue and associated condition variable for each
| reason that a process might wait whilst inside the monitor.
| When a process waiting on a condition variable is released
| it must run before a new process enters the monitor. |
| Procedures | the routines which do all of the manipulation of the shared
| variables. |
| Initialization | for example, to set up the number of printers. |
Equivalence of solutions.
Now that we have seen a variety of solutions to the problem of providing mutual
exclusion it is interesting to wonder which is the best solution. We have already seen
some of the advantages semaphores and event counters have over messages and vice
versa.
In one sense all of the methods we have looked at, simple locks, semaphores, event
counters, messages and monitors, are equivalent. It is possible to implement any one of
them using any other one. We will have a look at a couple of examples.
Semaphore implementation of a monitor.
As we have seen a semaphore is a relatively low level device to provide mutual exclusion.
Using a semaphore to implement a monitor is almost trivial. The important thing about the
monitor is that only one process is allowed to run inside it at a time. All we need to do is
to place a semaphore at the entrance to the monitor. This semaphore is initialised to one,
restricting the number of processes inside the monitor to one. Each process wanting to
enter the monitor does a wait on this semaphore and most processes leaving the monitor
signal it.
There is a complication with condition variables. We decided that the simplest way
of handling signals on condition variables was to ensure that the signalling process then
left the monitor. Now it must leave the monitor in a different way from that outlined
above. If it does a semaphore signal on the guarding semaphore another waiting process
might run. We have to guarantee that any processes waiting on the condition variable run
first. So the signal on the condition variable must check to see if any processes are
waiting on this variable. If a process is waiting it must be restarted. One way to
accomplish this is by associating a semaphore with each condition variable.
Message implementation of a semaphore.
As our second example we will construct a lower level mechanism, a semaphore, with a
more complicated one, messages.
The method is to have a controlling process which receives all of the wait and signal messages and sends messages informing processes when they can continue running.
A wait is a send to this process followed by a receive waiting for a reply.
When a wait message is sent to this controlling process it checks the value of what is effectively the semaphore count. If this indicates the resource is free this count is decremented and it sends a reply message straight away to the requesting process. If the resource is not available the reply is not sent until a corresponding signal is received. In the meantime the requesting process is blocked waiting for a reply.
Hopefully these two examples show the approach to take in trying to implement any of the mutual exclusion constructs in terms of any other one.
At a logical level these different constructs are equally powerful. At the practical level they all have their advantages and disadvantages. The most important being the trade-off between flexibility and safety. We have seen that monitors are safer in the sense that a programmer is less likely to make a mistake using them. This safety comes at the price of inflexibility. In certain situations (SIGOS 1992) monitors can lead to far more context switching than a semaphore solution to the same coding problem.
ANOTHER CLASSIC PROBLEM: THE DINING PHILOSOPHERS.
Another very well known problem, not because it occurs often but because it is so elegant, is the problem of the Dining Philosophers. This problem was developed by the father of the semaphore, Edsger Dijkstra.
- We have a round table around which are sitting five philosophers.
- In front of each philosopher is a plate with spaghetti.
- Each philosopher thinks for a while and then gets hungry and wants to eat for a while.
Each philosopher thinks and eats independently of the others. The times each one spends thinking and eating might vary.
The problem is that there are only five forks on the table, one between each pair of philosophers. To eat the spaghetti a philosopher must be holding the two forks immediately to the left and right. (At this point someone usually comments that if that's the way philosophers go about hygiene, then it's no wonder that there are so few of them. Or that it's much easier to eat spaghetti with one fork anyway, so why don't we reformulate the problem with chopsticks? These are both sound comments, but we are bound by the weight of tradition.)
Obviously not all the philosophers can eat at once. We must write a procedure for each philosopher in such a way that they can all carry on with their business with no philosophers starving to death whilst they meditate on the meaning of life or why people enrol in Computer Science courses. All the philosopher procedures are run concurrently.
Our first attempt using semaphores might be something like this:
```plaintext
procedure philosopher( name : integer );
```
while true do
begin
think;
wait( name ); /* semaphore associated with the fork to
the right */
wait(( name+1 ) mod 5 ); /* the fork to the left */
eat;
signal( name );
signal(( name+1 ) mod 5 )
end;
end;
What happens if by coincidence all philosophers manage to get as far as getting the fork
on their right? Unfortunately we end up with five dead philosophers, they all starve.
This is an example of deadlock where a cycle of processes are each waiting on
resources held by other processes in the cycle. We will talk about deadlock in detail later
in the course.
To stop this problem we might try doing a wait on both forks at once.
while true do
begin
think;
simultaneous_wait( name, ( name+1 ) mod 5 );
eat;
simultaneous_signal( name, ( name+1 ) mod 5 )
end;
Here we have introduced a new sort of mutual exclusion primitive, a simultaneous wait
and signal. In this case the running process is suspended whenever either resource is
unavailable. While it is suspended it doesn't hold either of the resources it is waiting for.
It is only restarted when both resources are available for it.
Well, this certainly solves our deadlock problem. A philosopher never holds one
fork at a time; always either two or none. This means that the philosopher will eventually
finish eating and return both forks allowing those adjoining philosophers to try to grab
two forks. The deadlock problem is solved.
Imagine what could happen to some unlucky philosopher though. Assume the
philosophers are numbered from 1 to 5 around the table. Say it works out that whenever
philosopher 1 stops eating philosopher 3 always starts and vice versa. Poor philosopher 2
will still starve. This is a case of indefinite postponement (sometimes known as
starvation). Oh well, I suppose philosopher 2 just has to be philosophical about the
situation.
It seems that every new mutual exclusion construct gets applied to the Dining
Philosophers to show how elegant the new method is.
There is a nice Monte Carlo solution to this problem which entails two philosophers
tossing a coin to solve contention for a fork. This always works in the average case but
can still lead to indefinite postponement even though it is very unlikely.
Apart from the trivial solution of imposing mutual exclusion over any attempt to eat,
allowing only one philosopher to eat at a time, most solutions to this problem entail
keeping track of the states of the philosophers so that decisions can be made according to
what surrounding philosophers are doing.
Here is our monitor solution. We hope this shows the power of monitors.
var
forkAvail : array [ 0..4 ] of boolean;
forkWait : array [ 0..4 ] of conditionVariable;
procedure GetBothForks( name : integer );
begin
if not forkAvail[ name ] then
Wait( forkWait[ name ] );
forkAvail[ name ] := FALSE;
if not forkAvail[ ( name + 1 ) mod 5 ] then
Wait( forkWait[ ( name + 1 ) mod 5 ] );
forkAvail[ ( name + 1 ) mod 5 ] := FALSE;
end;
procedure PutBackBothForks( name : integer );
begin
forkAvail[ name ] := TRUE;
forkAvail[ ( name + 1 ) mod 5 ] := TRUE;
Signal( forkWait[ name ] );
Signal( forkWait[ ( name + 1 ) mod 5 ] )
end;
begin
for i := 0 to 4 do
forkAvail[ i ] := TRUE
end. { monitor ForkControl }
Where each philosopher looks like
procedure Philosopher( name : integer );
begin
while TRUE do
begin
Think;
GetBothForks( name );
Eat;
PutBackBothForks( name )
end;
end;
Note on preemption ( What is this doing here ? ).
To preempt something generally means to get in before it and stop it from happening.
When we talk of preemptive scheduling we mean that a currently running process can be
stopped wherever it is in its code, and another process can be started. Usually preemptive
scheduling is contrasted with non-preemptive scheduling ( obviously enough ) and
cooporative scheduling.
Non-preemptive scheduling means that the process doesn't have to give up the
processor, except when it wants to. It is tempting to think that this means that all
processes would run to completion before another process can begin. In reality this is not
the case. Processes don't always have all necessary resources. When a process cannot
proceed until it has acquired some resource, typically input, there is no point in the
process holding on to the processor and the request for the resource will lead to a context
switch.
Cooperative scheduling is a specialised type of non-preemptive scheduling. With
cooporative scheduling the processes are supposed to go out of their way to enable other
processes to get a fair share of the processor. This means that someone, or something
(the compiler) has inserted code into each process to request a context switch after every few (hundred thousand) instructions. Cooperative scheduling has usually been implemented on single user systems which were not intended to be multiprogramming when originally designed and the mechanisms for preempting processes do not exist. (Several systems built on top of MS-DOS are good examples of this. The Macintosh dispatcher is also based on a cooperative scheme.)
Of course there is no guarantee that programmers will follow the rules of cooperative scheduling, this is why all multi-user operating systems have to use preemptive scheduling to ensure that users are treated fairly. On a single user system if one process is receiving more processor time than it should, this is mitigated by the fact that the work is being done for one person.
Whenever different solutions to the same problem are implemented in operating systems there are also hybrid solutions which try to get the best features of multiple solutions. As an example we have seen that Unix postpones context switches until the process is about to return to the user running state. This means that Unix doesn’t implement strict preemptive process scheduling. There is a period of time that the process keeps running until it is ready to relinquish the processor. Of course the user level programmer has no say over the matter.
COMPARE:
Silberschatz and GalvinINT4: Chapter 6.
REFERENCES.
QUESTIONS.
Consider the "disable interrupts" version of the lock() procedure. How can you guarantee that the interrupts will indeed be switched on when the new process starts?
Modify the semaphore solution to the producer/consumer problem allowing a ring buffer for the output.
Earlier we said that the semaphore solution to the producer/consumer problems works unchanged with multiple consumers and producers. Does the event counter solution also scale correctly? If not, explain what the difficulty is.
Modify the event counter solution to the producer/consumer problem allowing a ring buffer for the output.
How would the message passing producer/consumer problem be generalized to allow multiple producers and consumers?
In some ways the traditional Unix kernel is like a monitor. Make a list of similarities and differences between the two.
Design the mutual exclusion primitives simultaneous_wait and simultaneous_signal mentioned above.
What unwanted limitations are included in our monitor solution to the Dining Philosopher's problem?
Message passing can be equivalent in power to semaphores. How could you implement semaphores with an asynchronous message passing system similar to that shown above?
|
{"Source-Url": "https://www.cs.auckland.ac.nz/~alan/courses/os/book/4.How.19.probconcurrent.pdf", "len_cl100k_base": 10864, "olmocr-version": "0.1.53", "pdf-total-pages": 19, "total-fallback-pages": 0, "total-input-tokens": 40578, "total-output-tokens": 12020, "length": "2e13", "weborganizer": {"__label__adult": 0.0002677440643310547, "__label__art_design": 0.0002932548522949219, "__label__crime_law": 0.0002384185791015625, "__label__education_jobs": 0.0005426406860351562, "__label__entertainment": 7.539987564086914e-05, "__label__fashion_beauty": 0.0001055002212524414, "__label__finance_business": 0.00016498565673828125, "__label__food_dining": 0.00033402442932128906, "__label__games": 0.0005035400390625, "__label__hardware": 0.0019664764404296875, "__label__health": 0.0003020763397216797, "__label__history": 0.0002410411834716797, "__label__home_hobbies": 0.00010508298873901369, "__label__industrial": 0.0004527568817138672, "__label__literature": 0.0002751350402832031, "__label__politics": 0.00021696090698242188, "__label__religion": 0.0004935264587402344, "__label__science_tech": 0.04443359375, "__label__social_life": 7.873773574829102e-05, "__label__software": 0.00899505615234375, "__label__software_dev": 0.93896484375, "__label__sports_fitness": 0.00022172927856445312, "__label__transportation": 0.0005574226379394531, "__label__travel": 0.00017178058624267578}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 51589, 0.00378]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 51589, 0.49178]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 51589, 0.9403]], "google_gemma-3-12b-it_contains_pii": [[0, 3728, false], [3728, 7318, null], [7318, 10125, null], [10125, 12804, null], [12804, 16494, null], [16494, 19443, null], [19443, 22459, null], [22459, 24653, null], [24653, 27794, null], [27794, 29560, null], [29560, 31801, null], [31801, 33844, null], [33844, 37428, null], [37428, 40980, null], [40980, 43917, null], [43917, 46503, null], [46503, 48556, null], [48556, 51102, null], [51102, 51589, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3728, true], [3728, 7318, null], [7318, 10125, null], [10125, 12804, null], [12804, 16494, null], [16494, 19443, null], [19443, 22459, null], [22459, 24653, null], [24653, 27794, null], [27794, 29560, null], [29560, 31801, null], [31801, 33844, null], [33844, 37428, null], [37428, 40980, null], [40980, 43917, null], [43917, 46503, null], [46503, 48556, null], [48556, 51102, null], [51102, 51589, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 51589, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 51589, null]], "pdf_page_numbers": [[0, 3728, 1], [3728, 7318, 2], [7318, 10125, 3], [10125, 12804, 4], [12804, 16494, 5], [16494, 19443, 6], [19443, 22459, 7], [22459, 24653, 8], [24653, 27794, 9], [27794, 29560, 10], [29560, 31801, 11], [31801, 33844, 12], [33844, 37428, 13], [37428, 40980, 14], [40980, 43917, 15], [43917, 46503, 16], [46503, 48556, 17], [48556, 51102, 18], [51102, 51589, 19]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 51589, 0.01906]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
dff7ec5ea8e8f3004ce802012ea5a926677fca82
|
[REMOVED]
|
{"Source-Url": "https://hal-emse.ccsd.cnrs.fr/emse-01372223/file/WISTP-2016-preprint.pdf", "len_cl100k_base": 8902, "olmocr-version": "0.1.50", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 39435, "total-output-tokens": 10805, "length": "2e13", "weborganizer": {"__label__adult": 0.0006847381591796875, "__label__art_design": 0.0004267692565917969, "__label__crime_law": 0.0011730194091796875, "__label__education_jobs": 0.00032830238342285156, "__label__entertainment": 8.189678192138672e-05, "__label__fashion_beauty": 0.0002474784851074219, "__label__finance_business": 0.0002872943878173828, "__label__food_dining": 0.0005259513854980469, "__label__games": 0.0010051727294921875, "__label__hardware": 0.00640869140625, "__label__health": 0.0009627342224121094, "__label__history": 0.00034308433532714844, "__label__home_hobbies": 0.00017690658569335938, "__label__industrial": 0.000941753387451172, "__label__literature": 0.0002244710922241211, "__label__politics": 0.0005064010620117188, "__label__religion": 0.0007252693176269531, "__label__science_tech": 0.06939697265625, "__label__social_life": 8.404254913330078e-05, "__label__software": 0.00615692138671875, "__label__software_dev": 0.9072265625, "__label__sports_fitness": 0.0005307197570800781, "__label__transportation": 0.0011396408081054688, "__label__travel": 0.0002739429473876953}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 43461, 0.04949]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 43461, 0.37259]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 43461, 0.85692]], "google_gemma-3-12b-it_contains_pii": [[0, 1165, false], [1165, 3690, null], [3690, 7113, null], [7113, 10177, null], [10177, 12496, null], [12496, 14574, null], [14574, 17545, null], [17545, 20221, null], [20221, 23229, null], [23229, 26030, null], [26030, 27869, null], [27869, 29435, null], [29435, 31261, null], [31261, 34084, null], [34084, 37210, null], [37210, 40444, null], [40444, 43461, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1165, true], [1165, 3690, null], [3690, 7113, null], [7113, 10177, null], [10177, 12496, null], [12496, 14574, null], [14574, 17545, null], [17545, 20221, null], [20221, 23229, null], [23229, 26030, null], [26030, 27869, null], [27869, 29435, null], [29435, 31261, null], [31261, 34084, null], [34084, 37210, null], [37210, 40444, null], [40444, 43461, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 43461, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 43461, null]], "pdf_page_numbers": [[0, 1165, 1], [1165, 3690, 2], [3690, 7113, 3], [7113, 10177, 4], [10177, 12496, 5], [12496, 14574, 6], [14574, 17545, 7], [17545, 20221, 8], [20221, 23229, 9], [23229, 26030, 10], [26030, 27869, 11], [27869, 29435, 12], [29435, 31261, 13], [31261, 34084, 14], [34084, 37210, 15], [37210, 40444, 16], [40444, 43461, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 43461, 0.09901]]}
|
olmocr_science_pdfs
|
2024-11-27
|
2024-11-27
|
c75767cabf72fdc49f8272c90176469a6c57a398
|
[REMOVED]
|
{"Source-Url": "http://perso.ens-lyon.fr/christophe.alias/teaching/master/eval/benabderrahmane2010polyhedral.pdf", "len_cl100k_base": 11432, "olmocr-version": "0.1.53", "pdf-total-pages": 21, "total-fallback-pages": 0, "total-input-tokens": 51788, "total-output-tokens": 14533, "length": "2e13", "weborganizer": {"__label__adult": 0.00037932395935058594, "__label__art_design": 0.0003485679626464844, "__label__crime_law": 0.0003578662872314453, "__label__education_jobs": 0.0003969669342041016, "__label__entertainment": 5.990266799926758e-05, "__label__fashion_beauty": 0.00017595291137695312, "__label__finance_business": 0.0001938343048095703, "__label__food_dining": 0.0004115104675292969, "__label__games": 0.0007920265197753906, "__label__hardware": 0.0016231536865234375, "__label__health": 0.0005621910095214844, "__label__history": 0.0002808570861816406, "__label__home_hobbies": 0.00012731552124023438, "__label__industrial": 0.00057220458984375, "__label__literature": 0.0002256631851196289, "__label__politics": 0.0003132820129394531, "__label__religion": 0.0006160736083984375, "__label__science_tech": 0.0272674560546875, "__label__social_life": 6.961822509765625e-05, "__label__software": 0.004444122314453125, "__label__software_dev": 0.95947265625, "__label__sports_fitness": 0.0004148483276367187, "__label__transportation": 0.0007648468017578125, "__label__travel": 0.0002593994140625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 58940, 0.03091]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 58940, 0.25849]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 58940, 0.86693]], "google_gemma-3-12b-it_contains_pii": [[0, 2607, false], [2607, 5738, null], [5738, 8568, null], [8568, 10940, null], [10940, 13512, null], [13512, 16935, null], [16935, 20066, null], [20066, 22659, null], [22659, 25584, null], [25584, 28547, null], [28547, 31666, null], [31666, 33351, null], [33351, 36533, null], [36533, 39572, null], [39572, 42789, null], [42789, 44809, null], [44809, 48094, null], [48094, 51117, null], [51117, 54307, null], [54307, 58012, null], [58012, 58940, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2607, true], [2607, 5738, null], [5738, 8568, null], [8568, 10940, null], [10940, 13512, null], [13512, 16935, null], [16935, 20066, null], [20066, 22659, null], [22659, 25584, null], [25584, 28547, null], [28547, 31666, null], [31666, 33351, null], [33351, 36533, null], [36533, 39572, null], [39572, 42789, null], [42789, 44809, null], [44809, 48094, null], [48094, 51117, null], [51117, 54307, null], [54307, 58012, null], [58012, 58940, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 58940, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 58940, null]], "pdf_page_numbers": [[0, 2607, 1], [2607, 5738, 2], [5738, 8568, 3], [8568, 10940, 4], [10940, 13512, 5], [13512, 16935, 6], [16935, 20066, 7], [20066, 22659, 8], [22659, 25584, 9], [25584, 28547, 10], [28547, 31666, 11], [31666, 33351, 12], [33351, 36533, 13], [36533, 39572, 14], [39572, 42789, 15], [42789, 44809, 16], [44809, 48094, 17], [48094, 51117, 18], [51117, 54307, 19], [54307, 58012, 20], [58012, 58940, 21]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 58940, 0.0461]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
fa0aa02b6f85d83c3c8c46fb786a5368ba7f4cef
|
[REMOVED]
|
{"len_cl100k_base": 15605, "olmocr-version": "0.1.53", "pdf-total-pages": 30, "total-fallback-pages": 0, "total-input-tokens": 67120, "total-output-tokens": 19751, "length": "2e13", "weborganizer": {"__label__adult": 0.0004630088806152344, "__label__art_design": 0.0003888607025146485, "__label__crime_law": 0.00028824806213378906, "__label__education_jobs": 0.00450897216796875, "__label__entertainment": 6.580352783203125e-05, "__label__fashion_beauty": 0.00019741058349609375, "__label__finance_business": 0.0004475116729736328, "__label__food_dining": 0.00035190582275390625, "__label__games": 0.0006504058837890625, "__label__hardware": 0.0005908012390136719, "__label__health": 0.00040602684020996094, "__label__history": 0.0002264976501464844, "__label__home_hobbies": 0.000118255615234375, "__label__industrial": 0.00033092498779296875, "__label__literature": 0.0003788471221923828, "__label__politics": 0.0002777576446533203, "__label__religion": 0.0004761219024658203, "__label__science_tech": 0.00411224365234375, "__label__social_life": 0.00014913082122802734, "__label__software": 0.0035724639892578125, "__label__software_dev": 0.98095703125, "__label__sports_fitness": 0.0003635883331298828, "__label__transportation": 0.0005335807800292969, "__label__travel": 0.0002007484436035156}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 76435, 0.03617]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 76435, 0.29078]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 76435, 0.90751]], "google_gemma-3-12b-it_contains_pii": [[0, 1691, false], [1691, 4890, null], [4890, 7846, null], [7846, 9634, null], [9634, 12486, null], [12486, 15752, null], [15752, 19006, null], [19006, 21162, null], [21162, 22974, null], [22974, 24892, null], [24892, 27167, null], [27167, 29275, null], [29275, 32028, null], [32028, 34884, null], [34884, 37789, null], [37789, 40281, null], [40281, 42178, null], [42178, 45037, null], [45037, 46621, null], [46621, 48220, null], [48220, 51653, null], [51653, 53272, null], [53272, 56005, null], [56005, 58332, null], [58332, 61624, null], [61624, 64925, null], [64925, 65085, null], [65085, 69660, null], [69660, 74617, null], [74617, 76435, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1691, true], [1691, 4890, null], [4890, 7846, null], [7846, 9634, null], [9634, 12486, null], [12486, 15752, null], [15752, 19006, null], [19006, 21162, null], [21162, 22974, null], [22974, 24892, null], [24892, 27167, null], [27167, 29275, null], [29275, 32028, null], [32028, 34884, null], [34884, 37789, null], [37789, 40281, null], [40281, 42178, null], [42178, 45037, null], [45037, 46621, null], [46621, 48220, null], [48220, 51653, null], [51653, 53272, null], [53272, 56005, null], [56005, 58332, null], [58332, 61624, null], [61624, 64925, null], [64925, 65085, null], [65085, 69660, null], [69660, 74617, null], [74617, 76435, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 76435, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 76435, null]], "pdf_page_numbers": [[0, 1691, 1], [1691, 4890, 2], [4890, 7846, 3], [7846, 9634, 4], [9634, 12486, 5], [12486, 15752, 6], [15752, 19006, 7], [19006, 21162, 8], [21162, 22974, 9], [22974, 24892, 10], [24892, 27167, 11], [27167, 29275, 12], [29275, 32028, 13], [32028, 34884, 14], [34884, 37789, 15], [37789, 40281, 16], [40281, 42178, 17], [42178, 45037, 18], [45037, 46621, 19], [46621, 48220, 20], [48220, 51653, 21], [51653, 53272, 22], [53272, 56005, 23], [56005, 58332, 24], [58332, 61624, 25], [61624, 64925, 26], [64925, 65085, 27], [65085, 69660, 28], [69660, 74617, 29], [74617, 76435, 30]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 76435, 0.18254]]}
|
olmocr_science_pdfs
|
2024-12-11
|
2024-12-11
|
703be88da38029b08456c83f2063738fe49f7505
|
Conversion and Emulation-aware Dependency Reasoning for Curation Services
Yannis Tzitzikas and Yannis Marketakis and Yannis Kargakis
Institute of Computer Science, FORTH-ICS
Computer Science Department, University of Crete, Greece
{tzitzik|marketak|kargakis}@ics.forth.gr
ABSTRACT
A quite general view of the digital preservation problem and its associated tasks (e.g. intelligibility and task-performability checking, risk detection, identification of missing resources for performing a task) is to approach it from a dependency management point of view. In this paper we extend past rule-based approaches for dependency management for modeling also converters and emulators and we demonstrate how this modeling allows performing the desired reasoning and thus enables offering more advanced digital preservation services. Specifically these services can greatly reduce the human effort required for periodically checking (monitoring) whether a task on a digital object is performable.
1. INTRODUCTION
In digital preservation there is a need for services that help archivists in checking whether the archived digital artifacts remain intelligible and functional, and in identifying the consequences of probable losses (obsolescence risks). To tackle the aforementioned requirements [14] showed how the needed services can be reduced to dependency management services, and how a semantic registry (compatible with OAIS\(^1\)) can be used for offering a plethora of curation services. Subsequently, [15] extended that model with disjunctive dependencies. The key notions of these works is the notion of module, dependency and profile. In a nutshell, a module can be a software/hardware component or even a knowledge base expressed either formally or informally, explicitly or tacitly, that we want to preserve. A module may require the availability of other modules in order to function, be understood or managed. We can denote such dependency relationships as \( t > t' \) meaning that module \( t \) depends on module \( t' \). A profile is the set of modules that are assumed to be known (available or intelligible) by a user (or community of users), and this notion allows controlling the number of dependencies that have to be recorded formally (or packaged in the context of an encapsulation preservation strategy). Subsequently, and since there is not any objective method to specify exactly which are the dependencies of a particular digital object, [10] extended the model with task-based dependencies where the notion of task is used for determining the dependencies of an object. That work actually introduced an extensible object-oriented modeling of dependency graphs expressed in Semantic Web (SW) languages (RDF/S). Based on that model, a number of services have been defined for checking whether a module is intelligible by a community (or for computing the corresponding intelligibility gap), or for checking the performability of a task. These dependency management services were realized over the available SW query languages. For instance, GapMgr\(^2\) and PreScan\(^3\) are two systems that have been developed based on this model, and have been applied successfully in the context of the EU project CASPAR\(^4\). Subsequently, [16] introduced a rule-based model which also supports task-based dependencies, and (a) simplifies the disjunctive dependencies of [15], and (b) is more expressive and flexible than [10] as it allows expressing the various properties of dependencies (e.g. transitivity, symmetry) straightforwardly. That work actually reduced the problem of dependency management to Datalog-based modeling and query answering.
However, the aforementioned works did not capture converters and emulators. Since conversion (or migration) and emulation are quite important preservation strategies, a dependency management approach should allow modeling explicitly converters and emulators (and analyze them from a dependency point of view, since they have to be preserved too), and exploit them during the offered preservation services. For example, a sequence of conversions can be enough for vanishing an intelligibility gap, or for allowing performing a task. Since there is a plethora of emulation and migration approaches that concern various layers of a computer system (from hardware to software) or various source/target formats (e.g. see [3] for an overview), it is beneficial to use advanced knowledge management techniques for aiding the exploitation of all possibilities that the existing and emerging emulators/converters enable, and assist preservation planning (e.g. [1]). This is crucial since the scale and complexity of information assets and systems evolve towards overwhelming the capability of human archivists and curators (either system administrators, programmers and designers).
\(^1\)Open Archival Information System (ISO 14721:2003).
\(^2\)http://athena.ics.forth.gr/9000/Applications/GapManager/
\(^3\)http://www.ics.forth.gr/isl/PreScan
\(^4\)http://www.casparpreserves.eu/
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. IPSJ32012, Oct 1-5, 2011, Toronto, ON, Canada. Copyright 2012, Digital Curation Institute, iSchool, University of Toronto.
In a nutshell, the main contributions of this paper are: (a) we extend the rule-based approach of [16] for modeling explicitly converters and emulators, (b) we demonstrate how this modeling apart from capturing the preservability of converters and emulators, enables the desired reasoning regarding intelligibility gaps, task performability, risk detection etc; (c) we introduce an algorithm for visualizing the intelligibility gaps and thus assisting their treatment, and (d) shows how the approach can be implemented using recently emerged Semantic Web tools. The rest of this paper is organized as follows. Section 2 discusses the motivation and the context of our work. Section 3 introduces the rule-based modeling and Section 4 discusses the corresponding inference services. Section 5 shows how the approach can be implemented using Semantic Web tools. Finally Section 6 summarizes, discusses related issues and identifies issues for further research.
2. CONTEXT AND BACKGROUND
Migration (according to Wikipedia) is a set of organized tasks designed to achieve the periodic transfer of digital materials from one hardware/software configuration to another, or from one generation of computer technology to a subsequent generation. The purpose of migration is to preserve the integrity of digital objects and to retain the ability for clients to retrieve, display, and otherwise use them in the face of constantly changing technology. Emulation (according to Wikipedia) combines software and hardware to reproduce in all essential characteristics the performance of another computer of a different design, allowing programs or media designed for a particular environment to operate in a different, usually newer environment. Emulation requires the creation of emulators, programs that translate code and instructions from one computing environment so it can be properly executed in another. Popular examples of emulators include QEMU [2], Dioscuri [17], etc. There is currently a rising interest on emulators for the needs of digital preservation [8]. Just indicatively, [18] overviews the emulation strategies for digital preservation and discusses related issues, and several recent projects have focused on the development of emulators for the needs of digital preservation (e.g. see [17] and [11]).
In brief, and from a dependency perspective, we could say that the migration process changes the dependencies (e.g. the original digital object depends on an old format, while the migrated digital object now depends on a newer format). Regarding emulation we could say that the emulation process does not change the dependencies of digital objects. An emulator essentially makes available the behavior of an old module (actually by emulating its behavior). It follows that the availability of an emulator can “satisfy” the dependencies of some digital objects, but we should note that the emulator itself has its own dependencies that have to be preserved to ensure its performability. The same also holds for converters.
Running Example
James has a laptop where he has installed the NotePad text editor, the javac 1.6 compiler for compiling Java programs and JRE1.5 for running Java programs (bytecodes). He is learning to program in java and C++ and to this end, and through NotePad he has created two files, HelloWorld.java and HelloWorld.cc, the first being the source code of a program in java, the second of one in C++. Consider another user, say Helen, who has installed in her laptop the VI editor and JRE1.5.
Suppose that we want to preserve these files, i.e. to ensure that in future James and Helen will be able to edit, compile and run these files. In general, to edit a file we need an editor, to compile a program we need a compiler, and to run the bytecodes of a Java program we need a Java Virtual Machine. To ensure preservation we should be able to express the above.
To this end we could use facts and rules. For example, we could state: A file is editable if it is a TextFile and a TextEditor is available. Since James has two text files (HelloWorld.java, HelloWorld.cc) and a text editor (NotePad), we can conclude that these files are editable by him. By a rule of the form: If a file is Editable then it is Readable too, we can also infer that these two files are also readable. We can define more rules in a similar manner to express more task-based dependencies, such as compilability, runability etc. For our running example we could use the following facts and rules:
<table>
<thead>
<tr>
<th>Facts and Rules</th>
<th>James</th>
<th>Helen</th>
</tr>
</thead>
<tbody>
<tr>
<td>NotePad is a TextEditor</td>
<td>✔</td>
<td>✔</td>
</tr>
<tr>
<td>VI is a TextEditor</td>
<td>✔</td>
<td>✔</td>
</tr>
<tr>
<td>HelloWorld.java is a JavaFile</td>
<td>✔</td>
<td>✔</td>
</tr>
<tr>
<td>HelloWorld.cc is a C++File</td>
<td>✔</td>
<td>✔</td>
</tr>
<tr>
<td>javac1.6 is a JavaCompiler</td>
<td>✔</td>
<td>✔</td>
</tr>
<tr>
<td>JRE1.5 is a JVM</td>
<td>✔</td>
<td>✔</td>
</tr>
<tr>
<td>gcc is a C++Compiler</td>
<td>✔</td>
<td>✔</td>
</tr>
</tbody>
</table>
Table 1: Modeling the running examples with Facts and Rules
The last two columns indicate which facts are valid for James and which for Helen. From these we can infer that James is able to compile the file HelloWorld.java and that if James sends his TextFiles to Helen then she can only edit them but not compile them since she has no facts about Compilers.
Let us now extend our example with converters and emulators. Suppose James has also an old source file in Pascal PL, say game.pas, and he has found a converter from Pascal to C++, say p2c++. Further suppose that he has just bought a smart phone running Android OS and he has found an emulator of WinOS over Android OS. It should follow that James can run game.pas on his mobile phone (by first converting it in C++, then compiling the outcome, and finally by running over the emulator the executable yielded by the compilation).
Regarding curation services, we have identified the following key requirements
Task-Performability Checking. To perform a task we have to...
perform other subtasks and to fulfill associated requirements for carrying out these tasks. Therefore, we need to be able to decide whether a task can be performed by examining all the necessary subtasks. For example, we might want to ensure that a file is runnable, editable or compilable. This should also exploit the possibilities offered by the availability of converters. For example, the availability of a converter from Pascal to C++, a compiler of C++ over Windows OS and an emulator of Windows OS over Android OS should allow inferring that the particular Pascal file is runnable over Android OS.
**Risk Detection.** The loss or removal of a software module could also affect the performability of other tasks that depend on it and thus break a chain of task-based dependencies. Therefore, we need to be able to identify which tasks are affected by such removals.
**Identification of missing resources to perform a task.** When a task cannot be carried out it is desirable to be able to compute the resources that are missing. For example, if Helen wants to compile the file `HelloWorld.cc`, her system cannot perform this task since there is not any C++ Compiler. Helen should be informed that she should install a compiler for C++ to perform this task.
**Support of Task Hierarchies.** It is desirable to be able to define task-type hierarchies for gaining flexibility and reducing the number of rules that have to be defined.
**Properties of Dependencies.** Some dependencies are transitive, some are not. Therefore we should be able to define the properties of each kind of dependency.
**Background: Datalog**
Datalog is a query and rule language for deductive databases that syntactically is a subset of Prolog. As we will model our approach in Datalog this section provides some background material (the reader who is already familiar with Datalog can skip this section).
The basic elements of Datalog are: *variables* (denoted by a capital letter), *constants* (numbers or alphanumeric strings), and *predicates* (alphanumeric strings). A *term* is either a constant or a variable. A constant is called *ground term* (i.e., any occurrence of a constant is a term). A *literal* is a term or a negated atom. A *clause* is a finite list of literals, and a *ground clause* is a clause which does not contain any variables. Clauses containing only negative literals are called *negative clauses*, while *positive clauses* are those with only positive literals in it. A *unit clause* is a clause with only one literal. *Horn Clauses* contain at most one positive literal. There are three possible types of Horn clauses, for which additional restrictions apply in Datalog:
- **Facts** are positive unit clauses, which also have to be ground clauses.
- **Rules** are clauses with exactly one positive literal. The positive literal is called the *head*, and the list of negative literals is called the *body* of the rule. In Datalog, rules also must be *safe*, i.e. all variables occurring in the head also must occur in the body of the rule.
In Datalog, the set of predicates is partitioned into two disjoint sets, $\text{Epred}$ and $\text{Ipred}$. The elements of $\text{Epred}$ denote extensionally defined predicates, i.e. predicates whose extensions are given by the facts of the Datalog programs (i.e. tuples of database tables), while the elements of $\text{Ipred}$ denote intensionally defined predicates, where the extension is defined by means of the rules of the Datalog program.
In our context, the proposed implementation is described at Section 5.
### 3. THE RULE-BASED MODEL
In accordance to [16], digital files and profiles (as well as particular software archives or system settings) are represented by facts (i.e. database tuples), while task-based dependencies (and their properties) are represented as Datalog rules.
To assist understanding, Figure 1 depicts the basic notions in the form of a rather informal concept map, in the sense that a rule-based approach cannot be illustrated with a graph in a manner both intuitive and precise.

**Digital Files, Type Hierarchies, and Profiles**
Digital files and their types are represented as EDB facts using predicates that denote their types, e.g. for the three files of our running example we can have the facts shown in the left column of the following table. Software components are described analogously (e.g. see right column).
<table>
<thead>
<tr>
<th>Facts</th>
</tr>
</thead>
<tbody>
<tr>
<td>for digital files</td>
</tr>
<tr>
<td>for software components</td>
</tr>
<tr>
<td><code>JavaFile(HelloWorld.java)</code></td>
</tr>
<tr>
<td><code>TextEditor(vi)</code></td>
</tr>
<tr>
<td><code>C++File(HelloWorld.cc)</code></td>
</tr>
<tr>
<td><code>JVM(jre1.5win)</code></td>
</tr>
<tr>
<td><code>PascalFile(game.pas)</code></td>
</tr>
<tr>
<td><code>JVM(jre1.6linux)</code></td>
</tr>
</tbody>
</table>
Each file can be associated with more than one type. In general we could capture several features of the files (apart from types) using predicates (not necessarily unary), e.g. `LastModifDate(HelloWorld.java, 2008-10-18)`.
The types of the digital files can be organized *hierarchically*, and such taxonomies can be represented with rules, e.g. to define that every `JavaFile` is also a `UTF8File` we must add the rule `UTF8File(X) :- JavaFile(X)`.
A profile is a set of facts, describing the modules available (or assumed to be known) to a user (or community). For example, the profiles of James and Helen are the ticked facts in the corresponding columns of Table 1.
Task-Dependencies and Task Hierarchies
We will also use (IPred) predicates to model tasks and their dependencies. Specifically, for each real world task we define two intensional predicates: one (which is usually unary) to denote the (performability of the) task, and another one (with arity greater than one) for denoting the dependencies of the task. For instance, Compile(HelloWorld.java) will denote the compilability of HelloWorld.java. Since its compilability depends on the availability of a compiler (specifically a compiler for the Java language), we can express this dependency using a rule of the form: Compile(X) :- Compilable(X,Y) where the binary predicate Compilable(X, Y) is used for expressing the appropriateness of a Y for compiling a X. For example, Compilable(HelloWorld.java, javac 1.6) expresses that HelloWorld.java is compilable by javac 1.6. It is beneficial to express such relationships at the class level (not at the level of individuals), specifically over the types (and other properties) of the digital objects and software components, i.e. with rules of the form:
- Compilable(X,Y) :- JavaFile(X), JavaCompiler(Y).
- Compilable(X,Y) :- C++File(X), C++Compiler(Y).
- Runable(X,Y) :- JavaClassFile(X), JVM(Y).
- Editable(X,Y) :- JavaFile(X), TextEditor(Y).
Relations of higher arity can be employed based on the requirements, e.g.:
- Run(X) :- Runnable(X,Y,Z)
- Runnable(X,Y,Z) :- JavaFile(X), Compilable(X,Y), JVM(Z)
We can express hierarchies of tasks as we did for file type hierarchies, for enabling deductions of the form: “if we can do task A then certainly we can do task B”, e.g. “if we can edit something then certainly we can read it too” expressed as: Edit(X) :- Read(X).
We can also express general properties of task dependencies, like transitivity. For example, from Runnable(a.class, JVM) and Runnable(JVM, Windows) we might want to infer that Runnable(a.class, Windows). Such inferences can be specified by a rule of the form:
- Runable(X,Y) :- Runnable(X,Z), Runnable(Z,Y).
As another example, IntelligibleBy(X,Y,Z) :- IntelligibleBy(X,Z), IntelligibleBy(Z,Y). This means that if X is intelligible by Z and Z is intelligible by Y, then X is intelligible by Y. This captures the assumptions of the dependency model described in [14] (i.e. the transitivity of dependencies).
Modeling Converters
Conversions are special kinds of tasks and are modeled differently. In brief to model a converter and a corresponding conversion we have to introduce one unary predicate for modeling the converter (as we did for the types of digital files) and one rule for each conversion that is possible with that converter (specifically one for each supported type-to-type conversion).
In our running example, consider the file game.pas (which contains source code in Pascal PL), and the converter p2c++ from Pascal to C++. Recall that James has a compiler for C++. It follows that James can compile game.pas since he can first convert it in C++ (using the converter), then compile it and finally run it. To capture the above scenario it is enough to introduce a predicate for modeling the converters from Pascal to C++, say ConverterPascal2C++, and adding the following rule:
C++File(X) :- PascalFile(X), ConverterPascal2C++(Y).
Since the profile of James will contain the facts PascalFile(game.pas) and ConverterPascal2C++(p2c++), we will infer C++File(game.pas), and subsequently that this file is compilable and runnable.
Finally we should not forget that a converter is itself a module with its own dependencies, and for performing the intended task the converter has to be runnable. Therefore, we have to update the rule as follows:
C++File(X) :- PascalFile(X), ConverterPascal2C++(Y), Run(Y).
Modeling Emulators
Emulation is again a special kind of task and is modeled differently. Essentially we want to express the following: (i) If we have a module X which is runnable over Y, (ii) and an emulator E of Y over Z (hosting system=Z, target system=Y), (iii) and we have Z and E, (iv) then X is runnable over Z. For example, consider the case where:
- X=a.exe (a file which is executable in Windows operating system),
- Y=WinOS (the Windows operating system),
- Z=AndroidOS (the Android operating system), and
- E=W4A (i.e. an emulator of WinOS over AndroidOS).
In brief, for each available emulator (between a pair of systems) we can introduce a unary predicate for modeling the emulator (as we did for the types of digital files, as well as for the converters), and writing one rule for the emulation.
For example, suppose we have a file named a.exe which is executable over WinOS. For this case we would have written:
- Run(X) :- Runable(X,Y)
- Runable(X,Y) :- WinExecutable(X), WinOS(Y)
and the profile of a user that has this file and runs WinOS would contain the facts WinExecutable(a.exe) and WinOS(mycomputer), and by putting them together it follows that Run(a.exe) holds. Now consider a different user who has the file a.exe but runs AndroidOS. However suppose that he has the emulator W4A (i.e. an emulator of WinOS over AndroidOS). The profile of that user would contain:
- WinExecutable(a.exe)
- AndroidOS(mycomputer) // instead of WinOS(mycomputer)
- EmulatorWinAndroid(W4A)
To achieve our goal (i.e. to infer that a.exe is runnable), we have to add one rule for the emulation. We can follow two approaches. The first is to write a rule that concerns the runnable predicate, while the second is to write a rule for classifying the system that is equipped with the emulator to the type of the emulated system:
A. Additional rule for Runable
This relies on adding the following rule:
Runnable(X,Y,Z):- WinExecutable(X), EmulatorWinAndroid(Y), AndroidOS(Z)
Note that since the profile of the user contains the fact EmulatorWinAndroid(W4A) the body of the rule is satisfied (for X=a.exe, Y=W4A, Z=myComputer), i.e. the rule will yield the desired inferred tuple Runnable(a.exe,W4A,mycomputer).
Note that here we added a rule for the runnable which has 3 variables signifying the ternary relationship between executable, emulator and hosting environment.
B. Additional type rule (w.r.t. the emulated Behavior)
An alternative modeling approach is to consider that if a system is equipped with one emulator then it can also operate as the emulated system. In our example this can be expressed by the following rule:
WinOS(X):- AndroidOS(X), EmulatorWinAndroid(Y).
It follows that if the profile of the user has an emulator of type EmulatorWinAndroid (here W4A) and mycomputer is of type AndroidOS, then that rule will infer that WinOS(mycomputer), implying that the file a.exe will be inferred to be runnable due to the basic rule of runnable which is independent of emulators (i.e. due to the rule
Runnable(X,Y,Z):- WinExecutable(X), WinOS(Y)).
Both (A and B) approaches require the introduction of a new unary predicate about the corresponding pair of systems, here EmulatorWinAndroid. Approach (A) requires introducing a rule for making the predicate runnable "emulator-aware", while approach (B) requires a rule for classifying the system to the type of the emulated system. Since emulators are modules that can have their own dependencies, they should be runnable in the hosting system. To require their runnability during an emulation we have to update the above rules as follows (notice that last atom in the bodies of the rules):
A’: Runnable(X,Y,Z):- WinExecutable(X), EmulatorWinAndroid(Y), AndroidOS(Z), Runnable(Y,X)
Synopsis To synopsize, methodologically for each real world task we define two intentional predicates: one (which is usually unary) to denote the performability of the task, and another one (which is usually binary) for denoting the dependencies of task (e.g. Read and Readable). To model a converter and a corresponding conversion we have to introduce one unary predicate for modeling the converter (as we did for the types of digital files) and one rule for each conversion that is possible with that converter (specifically one for each supported type-to-type conversion). To model an emulator (between a pair of systems) we introduce a unary predicate for modeling the emulator and writing one rule for the emulation. Regarding the latter we can either write a rule that concerns the runnable predicate, or write a rule for classifying the system that is equipped with the emulator to the type of the emulated system. Finally, and since converters and emulators are themselves modules, they have their own dependencies, and thus their performability and dependencies (actually their runnability) should be modeled too (as in ordinary tasks).
4. REASONING SERVICES
In general, Datalog query answering and methods of logical inference (i.e. deductive and abductive reasoning) are exploited for enabling the required inference services (performability, risk detection, etc). Here we describe how the reasoning services described at Section 2 can be realized in the proposed framework.
Task-Performability. This service aims at answering if a task can be performed by a user/system. It relies on query answering over the Profiles of the user. E.g. to check if HelloWorld.cc is compilable we have to check if HelloWorld.cc is in the answer of the query Compile(X). As we described earlier, converters and emulators will be taken into account, meaning that a positive answer may be based on a complex sequence of conversions and emulations. This is the essential benefit from the proposed modeling. Furthermore, classical automated planning, e.g. the STRIPS planning method [6], could be applied for returning one of the possible ways to achieve (perform) a task. This is useful in case there are several ways to achieve the task.
Risk-Detection. Suppose that we want to identify the consequences on editability after removing a module, say NotePad. To do so: (a) we compute the answer of the query Edit(X), let A be the returned set of elements, (b) we delete NotePad from the database and we do the same, let B be the returned set of elements5, and (c) we compute and return the elements in A \ B (they are the ones that will be affected).
Computation of Gaps (Missing Modules). The gap is actually the set of facts that are missing and are needed to perform a task. There can be more than one way to fill a gap due to the disjunctive nature of dependencies since the same predicate can be the head of more than one rules (e.g. the predicate TextEditor in the example earlier). One method for informing the curator about the possible ways to fill it is to construct and visualize a graph that contains information about only the related facts and rules. We propose a graph which is actually a form of AND-OR graph. The user can specify the desired depth of that graph, or interactively decide to increase the depth gradually. The graph is actually a compact method for presenting the (possibly numerous) ways to fill a gap. The construction of the graph resembles the way planning algorithms (in particular backwards search-based planners) operate. The algorithm starts from the goal and shows the corresponding rules for achieving that goal. Those atoms of the rules which have a grounding that belongs to (or can be inferred from) the facts of the profile at hand, are visualized differently (e.g. colored in green, or enclosed in squares) so that the user can discriminate the missing from the available facts. Figure 2 shows some indicative examples. In all cases the goal is a grounded atom, i.e. k(1), however the rules and the recorded facts are different in each case. In case (I) the graph shows that the gap is a grounded atom (i.e. C(var)), while in case (II) the graph shows that the gap is a non grounded atom (i.e. C(var)). Case (III) demonstrates a case where more than one rules with the same head are involved, and the depth of the graph is greater than one. The graph makes evident that there are two possible ways to fill the gap; according to the first the
5In an implementation over Prolog, we could use the retract feature to delete a fact from the database.
A recursive algorithm for producing such graphs is given (in pseudocode) at Figure 3. The algorithm takes as input a goal (an atom grounded or not), a depth (a positive integer \( \geq 1 \)) and a prevNode (the previous node, it is used only for the recursive calls). Initially, the algorithm is called with the goal of the user (which is a grounded atom plus the desired depth, and an empty (null) prevNode. The algorithm constructs and returns the corresponding tree graph (like those of Figure 2), whose layout can be derived by adopting one of the several hierarchical graph drawing algorithms.
Note that if we had a greater depth, then the expansion makes evident that to turn a compilable either a Pascal2Java converter would be desired. This and other ways to "inject reasoning" into the query answering, then the time and space costs of query answering, (i.e. the cost of query answering), then the time complexity of the algorithm is in \( O(d \times Q \times |R|) \). Since \( |R| \) is usually low, \( d \) is an input parameter which again cannot be very big, we can say that the complexity is low.
5. IMPLEMENTATION
There are several possible implementation approaches. Below we describe one Semantic Web-based implementation using RDF/S and OpenLink Virtuoso which is a general purpose RDF triple store with extensive SPARQL and RDF support [5]. Its internal storage method is relational, i.e. RDF triples are stored in tables in the form of quads \((g, s, p, o)\) where \(g\) represents the graph, \(s\) the subject, \(p\) the predicate and \(o\) the object. We decided to use this system because of its efficient support for SPARQL and its ability to handle large datasets.
Algorithm \(\text{GapGraph} \) (goal: Atom, depth: Integer, prevNode: Node): Node
\begin{enumerate}
\item \(\text{If (prevNode=null)} \) then \(\text{return headNode}\)
\item \(\text{gNode = create node(goal)}\)
\item \(\text{gNode = prevNode}\)
\item \(\text{hrs = all rules having the predicate of the goal as head}\)
\item \(\text{if (hrs = 0)} \) then \(// the goal predicate is not head in any rule\)
\item \(\text{headNode = gNode}\)
\item \(\text{return headNode}\)
\end{enumerate}
For each hr in hrs
\begin{enumerate}
\item \(\text{if (hr is a goal)} \) then \(// there are > 1 rules having the same head\)
\item \(\text{ORnode = create node(ORnode)}\)
\item \(\text{create link(gNode=ORnode)}\)
\item \(\text{headNode = ORnode}\)
\item \(\text{else}\)
\item \(\text{headNode = gNode}\)
\item \(\text{if (IsGrounded(goal))} \) then \(// e.g. consider the goal A(1)\)
\item \(\text{Ground the corresponding variable in all atoms of the body}\)
\item \(\text{of the rule hr that contain that variable}\)
\item \(\text{Let BodyAtoms be the resulting set of body atoms}\)
\item \(\text{// if the previous step did not ground anything, then BodyAtoms contains the original body atoms}\)
\item \(\text{for each atom in BodyAtoms}\)
\item \(\text{atomNode = Create node(atomic)}\)
\item \(\text{Create link(atomNode = new Node)}\)
\item \(\text{if ((IsGrounded(atomic)) and (exists in the fact set or it can be inferred)) then}\)
\item \(\text{Square(atomNode)}\)
\item \(\text{else}\)
\item \(\text{if (depth > 1)} \) then \(\text{for each atom in BodyAtoms}\)
\item \(\text{If (Square(atomNode) = False)} \) then \(// atomNode corresponds to atom\)
\item \(\text{newNode = GapGraph(atom, depth - 1, atomNode)}\)
\item \(\text{Create link(atomNode \rightarrow newNode)}\)
\item \(\text{end}\)
\item \(\text{Return headNode}\)
\end{enumerate}
cause of its inference capabilities, namely *backward chaining* reasoning, meaning that it does not materialize all inferred facts, but computes them at query level. Its reasoner covers the related entailment rules of \texttt{rdfs:subClassOf} and \texttt{rdfs:subPropertyOf}, while *user defined custom inference rules* can be expressed using rule sets. Practically this means that transitive relations (i.e. \texttt{subClassOf}, \texttt{subPropertyOf}, etc.) are not physically stored in the knowledge base, but they are added to the result set at query answering. *Transitivity* is also supported in two different ways. Given a RDF schema and a rule associated with that schema, the predicates \texttt{rdfs:subClassOf} and \texttt{rdfs:subPropertyOf} are recognized and the inferred triples are derived when needed. In case of another predicate, the option for transitivity has to be declared in the query.
For our case, we have to “translate” our facts and rules to quads of the form \((g, s, p, o)\) which are actually RDF triples contained in a graph \(g\). The support of different graphs is very useful for the cases of profiles; we can use a different graph for each profile. We will start by showing how facts can be “translated” to RDF quads and later we will show how inference rules can be expressed using \texttt{ASK} and \texttt{CONSTRUCT} or \texttt{INSERT} SPARQL queries. Note that if we use \texttt{INSERT} instead of \texttt{CONSTRUCT} then the new inferred triples will be stored in the triple store (materialization of inferred triples). Hereafter we will use only \texttt{CONSTRUCT}. For better readability of the SPARQL statements below we omit namespace declarations.
**Modules:** Module types are modeled using RDF classes while the actual modules are instances of these classes. Module type hierarchies can be defined using the \texttt{rdfs:subClassOf} relationship. For example the fact \texttt{JavaFile('HelloWorld.java')} and the rule for defining the module type hierarchy \texttt{TextFile(X) :- JavaFile(X)} will be expressed using the following quads:
\begin{align*}
g, \text{<JavaFile>}, \text{rdf:type}, \text{rdfs:Class} \\
g, \text{<HelloWorld.java>}, \text{rdf:type}, \text{<JavaFile>} \\
g, \text{<JGrph>, rdfs:subclassof, <TextFile>} \\
g, \text{<HelloWorld.java>, rdf:type, <JavaFile>} \\
g, \text{<NotePad>, rdf:type, <TextEditor>} \\
g, \text{<HelloWorld.java>, rdf:type, <JavaFile>} \\
g, \text{<jGrph>, <javc_1_6>, rdf:type, <JavaCompiler>} \\
g, \text{<VI>, rdf:type, <TextEditor>} \\
g, \text{<jre_1_5>, rdf:type, <JavaVirtualMachine>}
\end{align*}
**Profiles:** We exploit the availability of graphs to model different profiles, e.g. we can model the profiles of James and Helen (including only some indicative modules), as follows:
\begin{align*}
\text{<jGrph>, <NotePad>, rdf:type, <TextEditor>} \\
\text{<jGrph>, <HelloWorld.java>, rdf:type, <JavaFile>} \\
\text{<jGrph>, <javac_1_6>, rdf:type, <JavaCompiler>} \\
\text{<hGrph>, <VI>, rdf:type, <TextEditor>} \\
\text{<hGrph>, <jre_1_5>, rdf:type, <JavaVirtualMachine>}
\end{align*}
**Dependencies:** The rules regarding the performability of tasks and their dependencies are transformed to appropriate SPARQL \texttt{CONSTRUCT} statements which produce the required inferred triples. For example, the rule about the compilability of Java files \(\text{Compilable(X,Y) :- JavaFile(X),JavaCompiler(Y)}\) is expressed as:
\begin{align*}
\text{CONSTRUCT}\{?x \text{<compilable> ?y}} \\
\text{WHERE}\{?x \text{rdf:type <JavaFile>}. \\
\text{?y \text{rdf:type <JavaCompiler>}}\}
\end{align*}
To capture the compilability of other kinds of source files (i.e. \(\text{C++}, \text{pascal etc.}\)) we extend the previous statement using the \texttt{UNION} keyword (this is in accordance with the Datalog-based rules; multiple rules with the same head have union semantics). For example the case of Java and \(\text{C++}\) is captured by:
\begin{align*}
\text{CONSTRUCT}\{?x \text{<compilable> ?y}} \\
\text{WHERE}\{\{?x \text{rdf:type <JavaFile>}. \\
\text{?y \text{rdf:type <JavaCompiler>}}\} \\
\{?x \text{rdf:type <C++File>}. \\
\text{?y \text{rdf:type <C++Compiler>}}\}
\end{align*}
Finally the unary predicate for the performability of task, here \texttt{Compile}, is expressed as:
\begin{align*}
\text{CONSTRUCT}\{?x \text{rdf:type <Compile>}} \\
\text{WHERE}\{\{?x \text{<compilable> ?y}\} \}
\end{align*}
**Converters:** The rules regarding conversion are modeled analogously, e.g. for the case of a converter from Pascal to \(\text{C++}\) we produce:
\begin{align*}
\text{CONSTRUCT}\{?x \text{rdf:type <C++File>}} \\
\text{WHERE}\{?x \text{rdf:type <PascalFile>}. \\
\text{?y \text{rdf:type <ConverterPascal2C>>>}. \\
\text{?y \text{rdf:type <Run>}}\}
\end{align*}
Note the last condition refers is an inferred type triple (Run). If there are more than one converters that change modules to a specific module type then the construct statement is extended using several \texttt{WHERE} clauses separated by \texttt{UNIONS}, as shown previously.
**Services:** To realize the reasoning services (e.g. task performability, risk detection, etc), we rely on SPARQL queries. For example to answer if the file \texttt{HelloWorld.java} can be compiled we can send the \texttt{INSERT} query about the compilability of the files (as shown previously) and then perform the following \texttt{ASK} query on the entailed triples:
\begin{align*}
\text{ASK}\{\text{<HelloWorld.java> <compilable> ?y}}\}
\end{align*}
If this query returns true then there is at least one appropriate module for compiling the file.
The risk-detection service requires \texttt{SELECT} and \texttt{DELETE} SPARQL queries (as discussed at section 4). For example to find those modules whose \texttt{editability} will be affected if we remove the module \texttt{Notepad}, we have to perform
\begin{align*}
\text{SELECT} \ ?x \\
\text{WHERE} \{?x \text{rdf:type <Edit>}\}
\end{align*}
\begin{align*}
\text{DELETE} \ <\text{Notepad}\> \text{rdf:type <TextEditor>}
\end{align*}
From the select query we get a set \(A\) containing all modules which are editable. Then we remove the triple about
Based on the above approach we have implemented a prototype system. Its repository containing the facts and rules of the examples of this paper, and behaving as specified by the theory is accessible through a SPARQL endpoint http://139.91.183.78:8890/sparql.
6. CONCLUDING REMARKS
In this paper we have extended past rule-based approaches for dependency management for capturing converters and emulators, and we have demonstrated that the proposed modeling enables the desired reasoning regarding task performability, which in turn can greatly reduce the human effort required for periodically checking or monitoring whether a task on an archived digital object is performable.
We should clarify that we do not focus on modeling, logging or reasoning over composite tasks in general (as for example it is done in [4]). We focus on the requirements for ensuring the performability of simple (even atomic) tasks, since this is more aligned with the objectives of long-term digital preservation. Neither we focus on modeling or logging the particular workflows or derivation chains of the digital artifacts, e.g. using provenance models like OPM or CRM Dig [13]. We focus only the dependencies for carrying out the desired tasks. Obviously this view is less space consuming, e.g. in our running example we do not have to record the particular compiler that was used for the derivation of an executable (and its compilation time), we just care to have at our disposal an appropriate compiler for future use. However, if a detailed model of the process is available, then the dependency model can be considered as a read-only view of that model.
As regards applicability, note that some tasks and their dependencies can be extracted automatically as it has been demonstrated in [9, 7]. As regards available datasets, [12] describes the P2 registry, which uses Semantic Web technologies to combine the content of the PRONOM Technical Registry, represented as RDF, with additional facts from DBpedia, currently containing about 44,000 RDF statements about file formats and preservation tools.
In the near future we plan to further elaborate on gap visualization methods, while issues for future research include composite objects (e.g. software components, systems), update requirements, and quality-aware reasoning for enabling quality-aware preservation planning.
Acknowledgements
Work done in the context of NoE APARSEN (Alliance Permanent Access to the Records of Science in Europe, FP7, Proj. No 269977), and SCIDIP-ES (SCIence Data Infrastructure for Preservation - Earth Science, FP7).
7. REFERENCES
|
{"Source-Url": "https://services.phaidra.univie.ac.at/api/object/o:293679/download", "len_cl100k_base": 9794, "olmocr-version": "0.1.53", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 29732, "total-output-tokens": 11282, "length": "2e13", "weborganizer": {"__label__adult": 0.0003383159637451172, "__label__art_design": 0.0009551048278808594, "__label__crime_law": 0.0005273818969726562, "__label__education_jobs": 0.00366973876953125, "__label__entertainment": 0.00016021728515625, "__label__fashion_beauty": 0.0002079010009765625, "__label__finance_business": 0.00063323974609375, "__label__food_dining": 0.000339508056640625, "__label__games": 0.0008459091186523438, "__label__hardware": 0.0010728836059570312, "__label__health": 0.00046324729919433594, "__label__history": 0.000823974609375, "__label__home_hobbies": 0.0001703500747680664, "__label__industrial": 0.0004649162292480469, "__label__literature": 0.0007624626159667969, "__label__politics": 0.00031304359436035156, "__label__religion": 0.0004477500915527344, "__label__science_tech": 0.1971435546875, "__label__social_life": 0.0001933574676513672, "__label__software": 0.048583984375, "__label__software_dev": 0.74072265625, "__label__sports_fitness": 0.00021159648895263672, "__label__transportation": 0.0005536079406738281, "__label__travel": 0.0002677440643310547}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 44975, 0.01023]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 44975, 0.74506]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 44975, 0.88468]], "google_gemma-3-12b-it_contains_pii": [[0, 5573, false], [5573, 11493, null], [11493, 16879, null], [16879, 22755, null], [22755, 29190, null], [29190, 32855, null], [32855, 39026, null], [39026, 44975, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5573, true], [5573, 11493, null], [11493, 16879, null], [16879, 22755, null], [22755, 29190, null], [29190, 32855, null], [32855, 39026, null], [39026, 44975, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 44975, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 44975, null]], "pdf_page_numbers": [[0, 5573, 1], [5573, 11493, 2], [11493, 16879, 3], [16879, 22755, 4], [22755, 29190, 5], [29190, 32855, 6], [32855, 39026, 7], [39026, 44975, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 44975, 0.0754]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
6511311e863743f87ee074a2b3fca14c5b19cfa1
|
Fuzz testing for design assurance levels
Marcus Gustafsson
Oscar Holm
Tutor, Eric Elfving
Examiner, Jonas Wallgren
Upphovsrätt
Detta dokument hålls tillgängligt på Internet – eller dess framtida ersättare – under 25 år från publiceringsdatum under förutsättning att inga extraordinära omständigheter uppstår.
Tillgång till dokumentet innebär tillstånd för var och en att läsa, ladda ner, skriva ut enstaka kopior för enskilt bruk och att använda det oförändrat för ickekommersiell forskning och för undervisning. Överföring av upphovsrätten vid en senare tidpunkt kan inte upphäva detta tillstånd. All annan användning av dokumentet kräver upphovsmannens medgivande. För att garantera äktheten, säkerheten och tillgängligheten finns lösningar av teknisk och administrativ art.
Upphovsmannens ideella rätt innefattar rätt att bli nämnt som upphovsman i den omfattning som god sed kräver vid användning av dokumentet på ovan beskrivna sätt samt skydd mot att dokumentet ändras eller presenteras i sådan form eller i sådant sammanhang som är kränkande för upphovsmannens litterära eller konstnärliga anseende eller egenart.
För ytterligare information om Linköping University Electronic Press se förlagets hemsida
http://www.ep.liu.se/.
Copyright
The publishers will keep this document online on the Internet – or its possible replacement – for a period of 25 years starting from the date of publication barring exceptional circumstances.
The online availability of the document implies permanent permission for anyone to read, to download, or to print out single copies for his/hers own use and to use it unchanged for non-commercial research and educational purpose. Subsequent transfers of copyright cannot revoke this permission. All other uses of the document are conditional upon the consent of the copyright owner. The publisher has taken technical and administrative measures to assure authenticity, security and accessibility.
According to intellectual property law the author has the right to be mentioned when his/her work is accessed as described above and to be protected against infringement.
For additional information about the Linköping University Electronic Press and its procedures for publication and for assurance of document integrity, please refer to its www home page:
http://www.ep.liu.se/.
© Marcus Gustafsson & Oscar Holm
Fuzz testing for design assurance levels
Oscar Holm
Linkoping, Sweden
oscho707@student.liu.se
Marcus Gustafsson
Linkoping, Sweden
margu478@student.liu.se
ABSTRACT
With safety critical software, it is important that the application is safe and stable. While this software can be quality tested with manual testing, automated testing has the potential to catch errors that manual testing will not. In addition there is also the possibility to save time and cost by automating the testing process. This matters when it comes to avionics components, as much time and cost is spent testing and ensuring the software does not crash or behave faulty. This research paper will focus on exploring the usefulness of automated testing when combining it with fuzz testing. It will also focus on how to fuzzy test applications classified into DAL-classifications.
INTRODUCTION
In modern airplanes there are parts that need to communicate with each other. In this environment, data needs to be able to flow securely from one part to another. ARINC 653 is a specification for avionics real-time operating systems (RTOS) that explains how data can flow between these parts [1].
Applications resides inside partitions. When a partition is executing, it does not need to finish executing before sending data to a component. The components are parts of a interface which links the partitions with the hardware and operating system. The partitions, interface, operating system and hardware makes up a module. Modules has a component for monitoring and handling software errors and crashes, this is provided by system partitions. This is done to prevent component failures from propagating to other partitions or modules. Since it is important that the airplane can be controlled in a secure and comfortable way, identifying critical sectors is very important. This can be done by using Design Assurance Level (DAL).
DAL is used to classify applications in avionics systems [2]. Classification is based on a risk analysis of the application. There are different DAL-classifications. Higher DAL-classifications has stricter requirements on software reliability, an error in the highest DAL-classification can cause wreckage. Contrary to the lowest DAL-classifications where a software failure will only have a minimal affect to the safety, control over the airplane nor does it strain the crew [3].
Software that is being tested will be referred to as the Software Under Test (SUT). While studies suggests that manual testing is widely used today [4, 5, 6], fuzz testing can make it possible to do more automated and broader testing of the SUT.
Fuzz testing is usually not specified to a certain part of the SUT, but instead tests random paths throughout the whole SUT which might give unexpected results [7]. This kind of testing is achieved by using a variety of strategies and algorithms to mutate the input data fed to the SUT. American Fuzzy Lop (AFL)\(^1\) is a tool which uses a deterministic, whitebox variant of fuzz testing. This variant of fuzz testing tries to understand how the SUT works by using certain strategies and a feedback loop. It can then target specific paths in the software. Non-deterministic fuzzers\(^2\) will not use deterministic steps during mutation of input data but instead just randomize input.
If a fuzzer can recognize important bytes in input data, it categorizes as a smart fuzzer [8]. Likewise, if it is aware of the application structure it will categorize as a whitebox fuzzer. AFL for example learns the application structure when instrumenting the SUT during compilation. This separates AFL from other fuzzers as it is categorized as a smart, deterministic and whitebox fuzzer. When the deterministic strategies are exhausted, AFL will dive into completely non-deterministic, random behaviour. Since deterministic strategies are employed in AFL, the input will be random to a lesser extent than what it would be for a non-deterministic fuzzer.
Purpose
The goal of this research paper is to evaluate what fuzz testing can contribute in testing of different DAL-classifications and outcome in amounts of errors, and implement a solution that can:
1. Generate code from C++-header files and XML-files.
2. Create a layer which links the SUT with AFL where data can flow continuously.
3. Test the generated code with AFL.
4. Log eventual errors found in the SUT.
We are also evaluating how the architecture of the SUT affects the usefulness of fuzz testing and draw conclusions from the correlation between code coverage and outcome.
Research questions
Our main research question is if fuzz testing can be useful for finding errors in software that is written and tested according to different DAL-classifications, and can be broken down into following:
- What is the correlation between amount of errors found with fuzz testing and code coverage?
- What is the relation between amount of faults found and DAL?
\(^1\)http://lcamtuf.coredump.cx/afl/
\(^2\)A fuzzer is a program which employs fuzz testing
Limitation
For this research paper we are using AFL because it is a well-established tool in fuzz testing. AFL has found vulnerabilities in for example Mozilla Firefox\(^3\), OpenSSL\(^4\) and SQLite\(^5\). For the code generation we will only use Dextool and its libraries.
Delimitation
This study will not be about the code generation of ARINC 653 interfaces, it will be about the security of applications that has undergone testing and verification towards their DAL-classification.
THEORY
In this chapter, we will go through the terminology related to our testing environment such as DAL-classifications, ARINC 653, fuzz testing and code generation.
Design Assurance Level
In avionics systems applications can be classified into Design Assurance Levels (DAL). If the system is using DAL, the classification depends on several factors. One of them is the possible result of a hardware failure, where DAL-classification depends on severity of failure, as seen in figure 1. A risk analysis determines what DAL-classification a application should have.
<table>
<thead>
<tr>
<th>DAL-classification</th>
<th>Severity</th>
</tr>
</thead>
<tbody>
<tr>
<td>A</td>
<td>Catastrophic</td>
</tr>
<tr>
<td>B</td>
<td>Hazardous/Severe</td>
</tr>
<tr>
<td>C</td>
<td>Major</td>
</tr>
<tr>
<td>D</td>
<td>Minor</td>
</tr>
<tr>
<td>E</td>
<td>No Safety Effect</td>
</tr>
</tbody>
</table>
Figure 1. DAL-classifications and their severities.
Severity can for example be based on a failure that would affect handling of the aircraft, cause crew members discomfort or both. A failure with severity of minor or no safety could for example be when a number is incorrectly displayed on the instrument board \([2]\). DAL A has the highest requirement for the application to be stable at all times. If a application gets classified into the DAL A it needs to be tested, documented and verified thoroughly. The requirements on testing and verification usually gets lower depending on the DAL classification, and therefore the severity of a fault in the application. DAL E is for example tested and verified towards different requirements than DAL A. In theory, if the DAL classification has been done correctly it ensures that no unnecessary testing has been done \([2]\).
ARINC 653
The ARINC 653 specification defines the structure of software partitioning in avionics real-time operating systems.
\(^4\)https://www.openssl.org/news/secadv_20150611.txt
\(^5\)https://www.sqlite.org/src/info/9e6eae660a0230t
Partitions
All partitions are classified as either a application partition or a system partition. When a partition is executing, it does not need to finish executing before sending data to a component in the connected interface. Applications are partitioned (application partitions) to isolate their execution from the rest of the system. System partitions can provide system functions such as fault handling and device drivers that the interface does not have \([1]\).
In order to isolate the partitions, they do not have any information on where their data is being sent or when they receive data. Partitions have ports for receiving and sending data. It is possible for several partitions to receive data from the same port, data sent to this port is received by all partitions listening to that port. In short, data flows in or out of these ports in order for the partitions to communicate with the interface \([1]\).
Modules
A module is made up of partitions, a interface with components, a operating system and the associated hardware. The architecture for a module in ARINC 653 is specified in such a way, that applications should be portable between modules without any modification to the interface, operating system or the hardware \([1]\).
Application/EXecutive
The interface which acts as a link between hardware, operating system and partitions is called APplication/EXecutive (APEX). The purpose of APEX is to make it possible for everything in the module to communicate, and to handle important system tasks such as process scheduling and health monitoring of partitions \([9]\). This work is done by several components residing in the APEX interface \([1]\).
- Partition management for starting partitions or changing partition modes \([1]\).

Figure 2. Module structure following ARINC 653 specification.
• Process management for killing, starting, stopping processes. Synchronization for processes within a application is achieved with semaphores. This component in short provides important functions for managing processes, it does however not tell APEX how these processes should be handled [1].
• Time management for scheduling processes into correct time frames. A fixed priority preemptive scheduling strategy is used [10]. Processes are associated with a time frame cost for executing and a period specifying how often the process should execute. Processes also have priorities which tells the scheduler how important it is to finish them. The scheduler will need to ask the process management component whenever it wants processes interrupted.
Lastly, the deadline associated to a process tells the scheduler what to do when a process does not finish in time. Deadlines not met will inform the operating system to take action. Example of such action would be killing the process or restarting the partition associated with the process. The time and process management components is what makes APEX able to handle context switching and interrupts of processes.
• Memory management for assigning memory to partitions. It is assumed memory is statically allocated to partitions and processes before they run. For this reason, dynamic memory allocation is not allowed during execution of partitions [1].
• Interpartition communication for allowing partitions to communicate with other partitions [1].
• Intrapartition communication for allowing applications in a partition to communicate with each other [1].
• Health monitoring for the status of the partitions.
Code generation
Genuine avionic real-time operating systems for testing is impractical, as that would require real avionics hardware. For this reason and to be able to inject the middle layer, code is generated to simulate the testing environment. In order to simulate the partitions, code needs to be generated from the XML-records and the code used to describe the ports. The APEX interface can be represented with a generated interface following the ARINC 653 structure, and additionally with the capability of connecting, reading and writing to specified ports as this is a implementation requirement for the software we are testing.
The generated interfaces needs to handle the data generated from AFL and use it to update the data in the ports. A middle layer needs to be implemented to handle the connection between the interfaces and AFL.
Whitebox fuzz testing and blackbox fuzz testing
Whitebox fuzz testing is a variant where the fuzzer has access to the source code and then theoretically knows everything about the SUT. This can be achieved by using instrumentation when compiling the SUT. AFL is able to instrument binaries with both GCC ⁶ and Clang⁷. The blackbox fuzzer does not know anything about the application and will randomly generate or mutate the input data. This approach is not very smart in the sense that it does not know any paths or if it has been able to reach a certain point in the code. The whitebox fuzzer will learn from the output from the SUT, you could say that it knows what code has been tested [11, 12].
Smart and dumb fuzzers
When a fuzzer is aware of the input structure and can understand important bytes in inputs⁸ it is categorized as a smart fuzzer. This is a must if the fuzzer wants to mutate existing input in a smart way as it needs to understand which bytes in the input that is important for the application. It also makes it possible to sort out unnecessary inputs which would yield the same result as an input already tested. When the fuzzer is not able to understand the input structure and just randomly generates or mutates inputs, it is categorized as a dumb fuzzer [8].
Generation or mutation-based fuzzers
There are fuzzers that constantly generates its input from scratch, these are called generated-based fuzzers. Mutation-based fuzzers will however mutate the input data instead of generating completely new data. Usually, a smart fuzzer will also be mutation based, as it is capable of understanding the input structure it can also mutate inputs in a smart way [13, 14, 15].
RELATED WORKS
Fuzz testing can discover vulnerabilities in a application, if given adequate amount of time. In a real world example, there are time constraints which makes it necessary for us to establish a solid understanding of the correlations between the time a fuzzer spent learning and errors found in the SUT. Resources are limited, and we want to test the SUT quickly while still finding most of the vulnerabilities and errors in general. Early data-mining approaches has shown promising results for using feedback based testing to increase the frequency at which errors are discovered when testing software [16].
Patrice Godefroid et. al. explains that whitebox fuzz testing has been successful for automated testing at Microsoft. It was able to find several security vulnerabilities in different kinds of software, covering most kinds of software when testing. A blackbox fuzzer was more efficient if the software had not been tested by a whitebox fuzzer, mainly because it was better at finding simple errors [12].
Once it came to finding the complex bugs, whitebox fuzz testing was found to be the better choice. It was more intelligent because it could find paths in the code in order to provide higher code coverage. This is possible since the whitebox fuzzer knows the software it is testing. Blackbox fuzz testing is testing the software with random input data to see if it crashes. If there is no crash, new input data is generated
---
⁶https://gcc.gnu.org/
⁷https://clang.llvm.org/
⁸For example, image files or plain text
or mutated. It is a common consensus in fuzz testing that it is best to start by looking for the things that are actually wrong. The fuzzer may report that it found errors, which might just be the intended way the software should work and not an actual error [11, 12]. The interesting part comes to investigating if the said error is a real error which would result in a faulty system or if it was able to handle that error in a good way.
Covering all code is a complex task for the fuzzer if it is not supplied with relevant data for the SUT. Input data is very subjective to the SUT, not all input data can be used with every type of the SUT. Generating usable input data for the SUT could therefore take a long time if it is not supplied with relevant input data from the beginning [11, 17]. For one to be able to generate as good test cases as possible one could generate code coverage reports based on the data generated by AFL. Those can then be analyzed to be able to generate better test cases which would cover the entire application as stated in the work by S. Bekrar et. al. [11]. This would reduce the time the fuzzer would need to run before finding any faults in the SUT.
To reach all paths in the application it might be necessary to implement some kind of system that makes the application go through a specific path. Gerlich et al. presented Fault Injection (FI) where they implemented test inputs with NULL in them. This made the branch coverage better as they were able to reach error states in the code [18].
**METHOD**
This chapter will go through our approach for fuzz testing the generated interface following the ARINC 653 specification. It will also describe how we research the correlation between amount of errors and code coverage, and the relation between amount of faults found and DAL.
**AFL**
American Fuzzy Lop (AFL) generates binary data which the interface we want to generate will use. AFL data is not usable in its "normal" form, so we will need to pick bytes of data to determine the type of the random generator, amount of cycles and seed. The type of the random generator will be any of static or Mersenne Twister [19]. The static variant needs something that tells it which port variable should have what value, and which cycles in the execution of binary (Not the same thing as a AFL cycle) this should be set, so we can control and guide the behaviour in AFL.
For this purpose, a configuration file with simple formatting (see figure 3) is used. It defines static values and which cycles the port variable should have the static value and the third part is the value.
<table>
<thead>
<tr>
<th>port1 . var1</th>
<th>100–200</th>
<th>5</th>
</tr>
</thead>
<tbody>
<tr>
<td>port1 . var2</td>
<td>100–200</td>
<td>7</td>
</tr>
<tr>
<td>port3 . var1</td>
<td>201–300</td>
<td>10</td>
</tr>
<tr>
<td>port4 . var4</td>
<td>201–300</td>
<td>15</td>
</tr>
</tbody>
</table>
Figure 3. Space delimited file for setting static variables in AFL. First part is the port variable name, second part is which cycles the port variable should have the static value and the third part is the value.
The amount of cycles that was read from the AFL input will be used to determine when the application should quit, i.e. stop generating data to the ports and quit the application. That will be one full execution in AFL.
As there can be many different data types in one port, we need to handle type casts for different types properly to ensure a port receives valid data. The random generator in each port will then be initialized with the seed. Then it can generate new data for every item in the port based on the requirements on every item. Requirements will be specific types and their ranges. For example an item can be of type `int` and have the range 1 to 99. The random generator for the port will then generate an `int` in the range 1 to 99. These requirements are specified in the XML-files given. Since the entire input data from AFL can be smaller than the bytes needed, that input data will be ignored and the Software Under Test (SUT) will be restarted with new input data from AFL.
For us to be able to realistically simulate an avionics system, we must have changing values. For that reason random generation will be used to get the most out of every cycle the SUT will run. The random generator will update the values with newly randomly generated data every cycle. When the data updates, the SUT can have a different behaviour than the one before, this will make us able to reach even more branches than it would if the data would be static.
The byte range from AFL binary and what each byte represents can be seen in figure 4.
<table>
<thead>
<tr>
<th>Size of input</th>
<th>Randtype</th>
<th>Cycles</th>
<th>Seed</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>1</td>
<td>Variable</td>
<td>Variable</td>
</tr>
</tbody>
</table>
Figure 4. Expected bytes in AFL input.
The testcases for AFL will be small sized binary data, and if possible it will be in a format that looks like the format that we will use (random generator type, cycles and seed).
Testcases are at start loaded into a FIFO-queue. The first testcase from the queue is selected and minimized as much as possible. AFL will employ deterministic strategies such as sequential bit/byte shifting and arithmetics with known integers to mutate data. When the strategies are exhausted, it will mutate data using random behaviour. When a mutation reaches a new state in the SUT, the testcase is saved into the back of the queue and the algorithm is repeated for next case in the queue.
https://lcamtuf.blogspot.se/2014/08/binary-fuzzing-strategies-what-works.html
Code generation
Code generation is a vital part for this research paper since it is needed for simulating the parts of a avionics system we do not have access too, mainly APEX and the underlying system. We use the generated code for injecting our middle layer into the avionics code. The code generation will be somewhat basic. Dextool\(^\text{10}\) is a tool written by Joakim Brännström which makes it possible to parse C++-header files and generate C++ interfaces.
Dextool uses LLVM/Clang\(^\text{11}\) for static analysis of the source code and then parses the data given by LLVM/Clang and provides a easy-to-use interface to handle the code.

The basic functionality of code generation will not be sufficient for using with our simulation. We must write a plugin for Dextool in order for it to output usable code. A compilation database \(^\text{12}\) with the rules used to compile the complete application will be taken as an input parameter. The compilation database will both be used to find all relevant header files and to generate a Makefile. To make sure that only relevant interfaces are generated, one or more directories with XML-files will be taken as another input parameter to the plugin. These XML-files contains representations of the interfaces.
The header files found from the compilation database and all the XML-files will be analyzed. When all the files has been analyzed a filter process will occur, this will remove unnecessary code that does not need to be in the output code. The unnecessary code will be determined by what the XML-files contains and is therefore the relevant interfaces. After the filter process is done it will start to generate code for the interfaces. It is important to note that the generated interfaces are what will represent the APEX interface in each test run. When the generation of the interfaces has been finished, we will use the compilation database to generate a Makefile. This Makefile will be used to merge the middle layer with the interfaces. The interfaces will be compiled to object files which will then link to our middle layer.
A header file from ARINC 653 usually contains nested namespaces and within them classes. A class is representative to a port and will contain getters and setters for port variables and a function for creating the port. What is going to be generated:
- Basic implementation of the function defined in the files.
- Empty constructors and destructors.
- Functions for the port classes which can generate and set new values for the port variables.
While code generation is a vital part of the research paper, we also need to create a middle layer between AFL and the simulated application. The middle layer will be a library which has functions that can generate values for the variables in the ports, create a structure from the AFL data and handle the random generation of data. AFL will therefore only be a small portion of the entire flow of the application, it will only be used to start the application and feed it with data that is used in the SUT. The middle layer will take over the process of running the SUT and feeding it with new data based on the AFL input data. AFL will still be used to handle crashes and hangs.
Sanitizers
GCC and Clang has an option to be able to detect memory errors and undefined behaviour during runtime. This can be used by AFL to detect potential crashes since AddressSanitizer (ASAN)\(^\text{13}\) and UndefinedBehaviorSanitizer (UBSAN)\(^\text{14}\) will send abort signals, if compiled with the AFL compilers, when finding behaviours that are undefined or memory errors. Since AFL registers abort signals as crashes, it is possible to debug these crashes just like any normal crash in AFL. As we use GCC 4.9.4 when compiling with sanitizers the following checks are made when enabling UBSAN: shift, integer-divide-by-zero, unreachable, vla-bound, null, return, signed-integer-overflow. The LeakSanitizer is enabled by default when using ASAN \(^\text{15}\). Using sanitizers will slow down execution speed and increase memory usage but in general allows more errors to be found \([\text{20}]\).
Program flow
Normal applications written for partitions in avionic systems have predefined functions that will not be generated, these functions are specified in the ARINC 653 specification. These are used for initialization, execution and sometimes termination. The initialize function runs at the start of the
---
\(^{10}\)https://github.com/joakim-brannstrom/dextool
\(^{11}\)https://clang.llvm.org/
\(^{12}\)https://clang.llvm.org/docs/JSONCompilationDatabase.html
\(^{13}\)https://clang.llvm.org/docs/AddressSanitizer.html
\(^{14}\)https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html
\(^{15}\)https://github.com/google/sanitizers/wiki/AddressSanitizerLeakSanitizer
application and the execute function will run in a loop (See figure 6). After the execution function is finished, the program will update all the port variables. The port variables will in most cases, when not simulated, use APEX instead of doing it programmatically. This means that port variables will be updated between each execution. If a function for termination exists this should be run after the loop is finished.
1. The simulation will start by reading from input. As AFL sends its data through standard input.
2. Our AFL-parser will parse that data and return the needed parts.
3. The application will initialize, create new ports etc.
(a) The ports created by the application will be saved to a list for easy access.
(b) If a port already exists, it will be reused.
4. Now it will loop as many times as AFL has selected (one of the parts derived from input bytes, also known as "for n cycles").
(a) Update all ports with new values.
(b) Execute the application with the the new values.
5. When the loop is finished it will terminate if a function for that exists.
Workflow when testing an application
To be able to feed an application with AFL input data we need to merge our own middle layer with the Software Under Test (SUT). This is done by using a Dextool plugin. This plugin takes XML directories, a compilation database and an application name as parameters. These are used to produce a fully functioning middle layer. The Dextool plugin does also generate a Makefile, this is used to compile everything to a executable file. AFL then needs to be fed with input data, for it to be correct we created our own script that generates input data that follows our data structure. When the executable file has been compiled it can be started with AFL.
For the coverage data to be generated we will run AFL-cov. AFL-cov needs to have a binary that has code coverage enabled. That binary and its dependent files are compiled with GCC and its code coverage option. AFL-cov will run either at the same time as AFL or after, this does not matter in our case since AFL-cov is pretty fast when running the test cases.
16https://github.com/mrash/afl-cov
AFL-cov uses the input data that is generated by AFL when a new path has been found. This ensures that the coverage data is correct and so that it does not miss any testcases that AFL has generated.
Code coverage
Coverage for applications are measured in lines executed, functions executed and the number of paths found by AFL. If a line or functions in the code is called at any point during fuzz testing it will be added into the code coverage. This is done with GCC builtin code coverage system (gcov). The number of paths found is something that AFL keeps track off and is reported to the user through its graphical interface.
Fault and code coverage correlation
To determine how strong the relation is between amount of faults and code coverage, we must compute the correlation coefficient. There are several methods to do this [21], but given our sample size we will use Pearson correlation coefficient, see figure 8.
\[
\begin{align*}
\Large r &= \frac{n(\sum xy) - (\sum x)(\sum y)}{\sqrt{[n\sum x^2 - (\sum x)^2][n\sum y^2 - (\sum y)^2]}}
\end{align*}
\]
Figure 8. Formula for calculating the fault and code coverage correlation coefficient.
As sample, we will use applications corresponding to DAL A, C and E where we denote x as amount of faults and y as code coverage in percentage. The choosing of DAL-classification A, C and E is because of that it is difficult to find any relevant and interesting applications to fuzz test in DAL-classification B and D. Neither is all applications big enough to have faults in them. We will denote n as amount of applications tested. We aim to fuzz test each application with our implementation for 4 days on identical environments to get the amount of errors and the code coverage. The 4 day goal is an estimation of how long time it will take to find crashes in higher DAL-classifications. We suspect that it will be much harder to find errors in higher DAL-classifications, for that reason the
time needed to fuzzy test and get result from a application of higher DAL-classification sets precedence for the fuzz testing duration of all programs. Since we are testing a few selected applications we can fuzzy test them much longer.
Fault and DAL relation
In order to find a relation between amount of faults and DAL-classifications, we will fuzz test each application in every DAL-classification. We will compare the resulting faults found between DAL-classifications. The computation for this will be very basic, we will take the sum of all faults $F_n$ in every application $P_n$ divided by amount of applications $n$ and then summarize average faults for each DAL-classification $D_n$. See figure 9.
$$D_n = \frac{\sum F_n}{n}$$
Figure 9. Formula for calculating average faults for each DAL-classification.
Logging faults
AFL catches abort signals from the SUT, and will save the data that caused the fault. Since the amount of faults is the main interest of this research paper, we will use the command line interface to collect statistics about amount of faults. In order to verify that the cause of faults is indeed in the SUT and not in our own middle layer, we will check cause of crashes with global debugger and AFL crash triage on all the unique crashes found. Unique crashes will be found in the sync directory specified when starting AFL. AFL does not by default catch ASAN or UBSAN errors.
Maturity level
Every application we are testing is associated with a maturity level. Maturity level can be either "done" or "not done". If an application is "done", it means it has undergone enough testing to fulfill all required security standards for its DAL-classification. Which maturity level an application has, is taken into consideration when evaluating the results we get from fuzz testing.
Test cases
<table>
<thead>
<tr>
<th>Application</th>
<th>DAL-classification</th>
<th>Maturity level</th>
</tr>
</thead>
<tbody>
<tr>
<td>A1</td>
<td>A</td>
<td>Done</td>
</tr>
<tr>
<td>A2</td>
<td>A</td>
<td>Done</td>
</tr>
<tr>
<td>C1</td>
<td>C</td>
<td>Done</td>
</tr>
<tr>
<td>E1</td>
<td>E</td>
<td>Done</td>
</tr>
</tbody>
</table>
Figure 10. Test cases for fuzz testing.
As shown in figure 10, we have four applications being tested. The two applications that has DAL-classification A and is considered "done", is not expected to crash. A crash in any of these applications would indicate something went wrong during the development, testing and verification process. Faults on that level should not remain undetected for that long. For the application that has DAL-classification C (C1), we expect there to be a possibility of faults, although very unlikely.
Application C1 is also considered "done", something that may further deny chances of finding crashes. Lastly, we have a DAL-classification E application (E1) which we expect to find crashes in. E1 is also considered "done". To our understanding DAL-E applications is tested with the least requirements of all DAL-classifications, and for that reason we think the biggest chance we find errors is in application.
RESULTS
We are presenting our results derived from the method and answering our research questions in this chapter.
Fault and code coverage correlation
Each application has been fuzzy tested for 4 days. AFL did not find any crash in any of the applications. The expected result was to have at least one crash in DAL-classification E and maybe one in DAL-classification C. This was not the case.
<table>
<thead>
<tr>
<th>Application</th>
<th>Faults</th>
</tr>
</thead>
<tbody>
<tr>
<td>A1</td>
<td>0</td>
</tr>
<tr>
<td>A2</td>
<td>0</td>
</tr>
<tr>
<td>C1</td>
<td>0</td>
</tr>
<tr>
<td>E1</td>
<td>0</td>
</tr>
</tbody>
</table>
Figure 11. Fault data retrieved from testing of applications.
As shown in figure 11, no faults were found in any of the applications. Therefore it is unnecessary to calculate the Pearson correlation coefficient.
<table>
<thead>
<tr>
<th>Application</th>
<th>Line coverage</th>
<th>Function coverage</th>
<th>Paths</th>
</tr>
</thead>
<tbody>
<tr>
<td>A1</td>
<td>91.7%</td>
<td>90.0%</td>
<td>256</td>
</tr>
<tr>
<td>A2</td>
<td>73.4%</td>
<td>77.7%</td>
<td>532</td>
</tr>
<tr>
<td>C1</td>
<td>63.8%</td>
<td>48.5%</td>
<td>314</td>
</tr>
<tr>
<td>E1</td>
<td>41.7%</td>
<td>72.4%</td>
<td>416</td>
</tr>
</tbody>
</table>
Figure 12. Coverage and path data retrieved from testing of applications.
The code coverage fluctuates a lot, this mainly depends on the total amount of source code in the applications. For example, application A1 has the highest coverage and does also have the least amount of source code (around 200 lines of code). Application E1 is a special case since it seems to have functions with many lines of code, quite the opposite of how the coverage for C1 is.
Fault and DAL relation
No faults were found in neither of the applications and their respective DAL-classifications. The results was expected for the DAL A applications, but not for DAL C and DAL E.
Application | Faults |
-------------|--------|
A | 0 |
C | 0 |
E | 0 |
Figure 13. Data retrieved from the applications
DISCUSSION
In this chapter we evaluate the findings presented in the results, the factors influencing these and how well this answers our research questions.
Threats to validity
Given by the method presented in this paper, there is several flaws that can influence the result. There is a bias issue in selecting test cases, as we chose test cases we expected was worth fuzz testing. Fuzz testing can be a very lengthy process because no real exit conditions exists. For that reason we chose to focus on providing longer fuzz testing sessions as data to the research and less time on compiling many applications. Fuzz testing for a longer time will solve the issue of no exit conditions. We will be able to ascertain that no new paths has been found for a long time, and that further testing is unlikely to yield any result.
There is also an issue of how much testing the applications previously has undergone. If an application is considered "done", it should be very hard to find errors. It might even be the case that AFL needs guiding to reach the state machines that may cause errors. These kind of applications is not desirable to fuzz test if finding crashes is the intention. In theory, our method could be repeated with applications considered "not done" and a bigger sample to yield results answering our research questions better. Considering the amount of crashes found, the test cases chosen was detrimental to our result and there is also the possibility that AFL can not find errors in these kind of applications.
As we started fuzz testing we got some faults, these faults were so called false positives. False positive is, in our case, a fault that did not happen in the application itself but in our middle layer. These false positives are registered as a crash in AFL. As false positives appear in our code, they were easy to fix so that they do not happen again. When a crash appeared we would need to debug the application with the same AFL input data. Multiple crashes appeared when adding UBSAN and ASAN, these were not found when running AFL without sanitizers.
DAL-classifications that are final could have been changed from a higher DAL-classification to a lower classification during development which means that some applications has been tested towards higher requirements than their corresponding DAL-classification. This is a problem as the DAL C or DAL E application could have been tested and verified towards a DAL A-classification, this can lead to misrepresented results on faults on those levels.
Method
The method chosen would have been better if we had found any faults. Now, the method is more of a problem since we could not calculate the Pearson correlation coefficient nor could we calculate the relation between faults and DAL-classification. Some things could have been changed to better fit both cases; no faults found and faults found.
More iterations
AFL mutated and tested the same input data that we fed it at the beginning of the testing. It would be interesting to see what AFL would have done if we restarted the AFL testing with new test cases after an amount of time. We could have setup a Cron job which restarted the session with new test cases.
Debugging test cases
Since the code coverage were not that good on all applications, it would have been interesting to see what could have been done to reach more states in the code. This could have been done by looking through the code coverage data. Then we could have used our configuration file to set static variables which would have guided AFL into those states.
Less time on more applications
Since most of the paths in the code was found at the beginning of the fuzz testing session, it would have been better to test more applications for a shorter time span (say a few hours). The issue with this was that many of the applications had dependencies that needed to be added into compilation flags and these were time consuming to find. It took some time to get a application ready for fuzz testing.
Changing research questions
Our research questions were not good in the sense that they presumed that faults would happen. If those were changed to something that were less quantitative, we would have gotten results that probably would have been more interesting.
Repetitive tasks
A lot of time were put on getting applications to compile. Every application had its own set of dependencies. This was a problem since it took very long time to get the applications to compile the very first time. When the first compilation was done, the Makefile was complete which made the following compilations easier.
Limitations in the developed system
Our implementation has some flaws that could not be fixed at this moment. There is a problem with how AFL mutates the input data. A small change in the input data will usually not be a small change in the SUT. This is because we are using a Mersenne twister random generator seeded by input data to set variables in the SUT. We tried to make it so that not all input data would be random by implementing the configuration file that would be read if the correct byte was set. This allowed some paths to be explored without waiting for the random generator to reach that path.
Memory safety
AFL without any sanitizers tests the SUT for memory safety. The applications has passed this category without any problems. When the programmer follows a code standard that prevents problems with memory safety, that will propagate to the lower DAL-classifications as well. This could mean that applications with lower DAL-classifications in general have less faults than expected.
Results
No errors were found in the applications and the results and that may make it seem like either our implementation was lacking or the tools we used was not good enough for this purpose. There are some interesting aspects of the results though, such as how untested applications would need to be for us to start finding errors.
No faults
Our research questions were aimed at answering whether fuzz testing was a good option or not for testing DAL-classified applications. While our results indicated that it would be hard to find errors in applications that had undergone a lot of testing, it would be interesting to investigate the results from a bigger sample size with more untested applications. Promising code coverage was shown for some applications as well, such as A1. Fuzz testing could perhaps be a viable option for stability testing these applications.
Code generation could be improved
If the code generation in our Dextool plugin is improved further along with our middle layer, more paths could be targeted in the applications thus increasing code coverage. We spent some time implementing the possibility to target certain state machines in the SUT by setting static values on variables, something we did not use for the actual tests. It would not be unreasonable to spend some time debugging the paths AFL can not hit in the SUT, and target these by setting static values on variables that makes us reach these paths. We believe there is great possibilities here, not only for setting up a full test suite for automated testing but also performing extensive crash and stability testing. Everything as a complete process for quality assurance for software with DAL-classification.
Sanitizers
Sanitizers gave us more interesting results as we started finding errors when we used them. These errors turned out to be false positives. We had tested our middle layer and the underlying applications for 4 days before using sanitizers. At that time no errors was found, but when using sanitizers ASAN would start picking up errors. These errors were found in our own middle layer. Our middle layer has not been unit tested at all, and since AFL had not found any errors we have always assumed that the middle layer was stable enough. This would never had caused a crash immediately without ASAN enabled, making the bug avoiding detection until either the SUT would crash by coincidence or maybe it would never get discovered at all.
Trusting the DAL-classifications
DAL-classifications can change. Some applications can therefore have been tested before the final DAL-classification had been selected. This can be the case for the lower DAL-classifications if the developers had expectations for getting a application into a DAL-classification that required more testing than the final DAL-classification. This makes it uncertain towards what requirements the application has been tested. For example E1 could have been tested towards requirements for DAL C while it is actually a DAL E application.
Interfaces
The code generated for the interfaces are from our own interpretation of what each interface should implement and what it should do. This can of course be a misunderstanding of how it actually should work. This can lead to not enough paths being reached by AFL and that would also lead to bad code coverage. While this is probably not an issue, it could make a big difference when trying to reach many of the smaller code blocks.
CONCLUSION
Fuzz testing is a promising alternative to automate testing within avionics software development. It scales well, is fairly smart when testing and can be directed to certain code states if needed, so with knowledge of the SUT it is possible to target certain code paths. It is possible to use AFL within avionics system software testing as well, proven by our implementation. We do not think the result we got is a good indicator how useful fuzz testing is for avionics software as the amount of applications tested and the knowledge of the applications was low.
All the applications we tested had a maturity level considered "done", which means that finding error is unexpected. For our research questions, we would have needed to test many applications which were not considered "done". If we did that, it is more likely we would have found crashes in for example DAL C and DAL E applications making it relevant to calculate the Pearson correlation coefficient.
Another interesting thought is at what stage in the development of these applications would fuzz testing start finding crashes? Which gives further interesting questions to ask in new studies, such as in what stage of software development is fuzz testing most efficient and useful. We suspect application E1 was tested and verified for a higher DAL-classification. Taking that into consideration, it was not out of expectations that we did not find any crashes. The sample size and selection was not ideal for our chosen method, we would have needed more applications to test and also applications that had undergone less testing.
The use of sanitizers was great for finding errors in our code and found errors that AFL could not find directly. When we tested our code we did a 4 day run with AFL without ASAN and UBSAN. We found no errors at all. As we did not found any errors without sanitizers, we compiled the SUT with ASAN and UBSAN support. Then we got some errors, but these were false positives since the errors was in our middle layer. In that matter fuzz testing can be used early in the development of the application to find problems with undefined behaviour and memory problems as it does not require any engineer hours to be allocated looking for bad code.
Considering that we got a decent code coverage in our tests and that it could be improved by having a better understanding of the applications that are tested. The implementation shows that fuzz testing could be used to perform stability tests, not in purpose of finding errors, but simply to ensure a application is stable. The line coverage in application E1 is questionable, further debugging and understanding would be needed to understand why AFL can not target more code paths in this case. It is quite possible that it has many state machines which is hard to reach without guiding AFL, something that is possible with our implementation as long as the user has knowledge about the application.
The results was quite disappointing since we did not find any errors, there is more to be done. One problem we had was that we did not have enough knowledge about the applications we
tested. The applications we tested have a large amount of lines of code (some up to 5000 lines of code) and it would take a lot of time and effort to find state machines that AFL could not get into. If one would have known the structure of the application, it would have been easier to test specific parts known to be problematic. As we only looked into pure crashes of the SUT, another interesting aspect would be to look at what the actual output from the SUT is.
REFERENCES
USER MANUAL
We will go through dependencies and how to build and use our work to fuzz test avionic applications.
Dependencies
- Dextool and its dependencies\(^{17}\)
- GCC 4.9.4+
- American Fuzzy Lop (AFL)
- afl-cov
- lcov
Note: Developed and tested only on Linux and macOS.
Building
To use our implementation, first build all dependencies and make sure they work. The Dextool fork contains our plugin, middle-layer and some examples to test on.
**Building Dextool with fuzz plugin**
Before using commands, make sure you are in Dextool root directory. Then run:
```
$ mkdir build && cd build
$ cmake ..
$ make
```

**Fuzzing the SUT**
Once again, make sure you are in Dextool root directory. To generate the SUT code, run:
```
$ ./build/dextool fuzz --xml-dir yourapp/namespaces
--compile-db compile_commands.json --app-name APP_Name
```

This command will generate a Makefile, fuzz.cpp, fuzz.hpp, main.cpp and main.hpp. The fuzz-files will contain the interfaces that it found. Main.cpp and main.hpp will contain the scheduler that will run the complete application including our middle layer. The Makefile can be used to compile the complete application without code coverage support.
Compile with AFL and start fuzzing:
```
$ make -f Makefile_fuzz
$ afl-fuzz --i input/ --o output/ ./a.out
```

**Sanitizers**
When you are using sanitizers, such as ASAN or UBSAN, you need to use AFL_USE_ASAN=1 to the Makefile_fuzz rules, just like you would do for the sanitizers. This setting is useful to find memory errors that do not count as a crash.
**AFL variables**
If you are not using any sanitizers, you can add AFL_HARDEN=1 to the Makefile_fuzz rules, just like you would do for the sanitizers. This setting is useful to find memory errors that do not count as a crash.
**Utilities**
AFL has some useful commands for minimizing amount of test files for input and to remove unnecessary data in the files. This doesn’t mean AFL is missing to test things, just that the removed files and bytes were tested by some other test case or byte previously. The commands are:
- afl-cmin: Use to remove unnecessary files from the corpus.
- afl-tmin: Remove unnecessary bytes in corpus file.
Ideally, you run AFL for some hours and then stop. Run cmin and tmin on all files in the queue and then restart AFL with the new corpus as input. This process can be repeated several times to improve corpus, yielding a more exhaustive result each iteration.
**Debugging faults**
When a crash or hang has been found by AFL it just shows up as a number in the interface. To be able to investigate more in the crash, AFL saves the input data to a file which can be found in the specified output directory (-o flag). To debug this further you could either use gdb or AFL’s own script called crash-triage. Crash-triage prints the basic information about all the crashes found in the crashes directory. Gdb can only use one input file at a time but gives better information on what is happening in the application. Gdb is in that case better when some more investigation is needed.
**Guiding AFL**
Guiding AFL can be necessary when AFL has been running for a while but without any new paths found. This can be the case if there is a lot of state machines that are hard to get into. What state machines not reached can be found out by using AFL-cov for better information about what lines which has been reached.
To ease this problem a configuration file can be written. This file can set variables in ports and in what cycles this variable should have this value.
**Configuration file**
```
fun.V0 100--200 1337
fum.V0 100--200 1337
```

The configuration file is straightforward. It is space delimited which means that there is a space between each of the fields. The first field is which variable that should have the value chosen. The second field is during which cycles the variable should have this value.
\(^{17}\)https://github.com/ploq/dextool
should have the value chosen. The third field is the value that the variable should have during the cycles.
The example configuration file says that fun.V0 should have the value 1337 between 100 to 200 cycles. The same file also says that fun.V0 should have the same value and that the variable should be set between 100 to 200 cycles.
Example
```xml
<Interface name="Bar">
<SubType name="MyInt" type="IntT" min="1" max="2000" unit="km"/>
</Types>
<ContinuousInterface name="Fun"
direction="From_Provider">
<DataItem name="V0" type="IntT"/>
<DataItem name="V1" type="IntT"/>
</ContinuousInterface>
<ContinuousInterface name="Fum"
direction="To_Provider">
<DataItem name="V0" type="IntT"/>
</ContinuousInterface>
</Interface>
```
Figure 18. A XML file which is representative of an ARINC 653 interface and is used by the following code example.
The XML file contains two interfaces, Fun and Fum. Fun and Fum then contains two variables or dataitems, V0. The XML file also contains a type with a range specification, it says that the values it handles are between 1 and 2000.
Consider a example application with the following execute function:
```c
void APP_Name_Execute()
{
if (comp_y->Get_Port().Get_Fun().V0 == 1337)
if (comp_x->Get_Port().Get_Fun().V0 == 1337)
port_z->Get_Fun().V1 = comp_x->Get_Port().Get_Fun().V1;
// Division by zero state machine
if (comp_x->Get_Port().Get_Fun().V0 /
comp_y->Get_Port().Get_Fun().V0) {
comp_y->Get_Port().Put_Fun(uni(rng));
//random number between 0–8000
}
}
```
Figure 19. An example application which uses the interface defined in the example XML file.
We have component x, y and z. They are interfaces for the ports residing within them. The ports have all the functions used for modification of any variables. This application will risk crashing on the last if-statement due to a division by zero when the fun.V0 variable in component y is 0. However, the conditions for that to happen is very specific:
- Fun.V0 of component y must be 1337.
- Fun.V0 of component x must be 1337.
- The uniform random generator must generate a 0, and it randomly generates integers between 0 and 8000.
AFL would have a hard time to reach this state many times for it to crash. So in order to reach this state machine consistently with AFL we will have to set some static variables. We want to set component x fun.V0 to 1337 and component y fun.V0 to 1337. This can be done by creating a "config.txt" file in the root directory of Dextool. See the "Configuration file"-section for an example of how the configuration file can look for this example application. The result of doing this for this example is that the application would crash quite often and AFL would register these as crashes.
Code Coverage
gcov
For code coverage support you will need to compile the application with gcc and with the flags -fprofile-arcs -ftest-coverage. This will generate a new executable file and .gcno files. Next time you start your program it will collect code coverage data that you can read with either gcov or lcov. This is used by AFL-cov to get code coverage data from your AFL run.
AFL-cov
AFL-cov is written as a wrapper for lcov. AFL-cov can be used to get coverage data during or after an AFL run. It takes all the input data that AFL has saved and then runs your program with those files.
To use AFL-cov you either need to start it before you start AFL, during AFL runs or after. If you are running AFL-cov as the two first cases you’ll need to have the –live flag. If you are not you can omit that flag.
```bash
$ afl --coverage-cmd "/a.out < AFL_FILE" --code-dir .
```
This command will look for AFL input data files in output/. The command it will run to start your application is taken as a parameter called coverage-cmd, it is important that you do not omit the AFL_FILE as that is replaced with the correct path to an AFL input data file.
If you already have ran AFL on your program and want to get code coverage data for that run, you’ll need to recompile your program with the flags covered in the gcov-section. After that you can start AFL-cov with the –live flag removed.
|
{"Source-Url": "http://liu.diva-portal.org/smash/get/diva2:1114878/FULLTEXT01.pdf", "len_cl100k_base": 12773, "olmocr-version": "0.1.53", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 44541, "total-output-tokens": 14910, "length": "2e13", "weborganizer": {"__label__adult": 0.0003058910369873047, "__label__art_design": 0.0002932548522949219, "__label__crime_law": 0.0002300739288330078, "__label__education_jobs": 0.0006060600280761719, "__label__entertainment": 5.84721565246582e-05, "__label__fashion_beauty": 0.00013113021850585938, "__label__finance_business": 0.0001342296600341797, "__label__food_dining": 0.00025463104248046875, "__label__games": 0.0008220672607421875, "__label__hardware": 0.0010471343994140625, "__label__health": 0.00023365020751953125, "__label__history": 0.00018286705017089844, "__label__home_hobbies": 8.171796798706055e-05, "__label__industrial": 0.00031256675720214844, "__label__literature": 0.0001894235610961914, "__label__politics": 0.00014901161193847656, "__label__religion": 0.00031113624572753906, "__label__science_tech": 0.01274871826171875, "__label__social_life": 6.99162483215332e-05, "__label__software": 0.007110595703125, "__label__software_dev": 0.9736328125, "__label__sports_fitness": 0.00022470951080322263, "__label__transportation": 0.0004930496215820312, "__label__travel": 0.00016248226165771484}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 62989, 0.02355]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 62989, 0.54723]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 62989, 0.92057]], "google_gemma-3-12b-it_contains_pii": [[0, 117, false], [117, 2358, null], [2358, 7395, null], [7395, 11796, null], [11796, 17555, null], [17555, 23029, null], [23029, 27900, null], [27900, 32027, null], [32027, 37238, null], [37238, 43046, null], [43046, 49317, null], [49317, 54593, null], [54593, 58816, null], [58816, 62989, null]], "google_gemma-3-12b-it_is_public_document": [[0, 117, true], [117, 2358, null], [2358, 7395, null], [7395, 11796, null], [11796, 17555, null], [17555, 23029, null], [23029, 27900, null], [27900, 32027, null], [32027, 37238, null], [37238, 43046, null], [43046, 49317, null], [49317, 54593, null], [54593, 58816, null], [58816, 62989, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 62989, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 62989, null]], "pdf_page_numbers": [[0, 117, 1], [117, 2358, 2], [2358, 7395, 3], [7395, 11796, 4], [11796, 17555, 5], [17555, 23029, 6], [23029, 27900, 7], [27900, 32027, 8], [32027, 37238, 9], [37238, 43046, 10], [43046, 49317, 11], [49317, 54593, 12], [54593, 58816, 13], [58816, 62989, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 62989, 0.08271]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
6ff14687356cd43d885608fe08f525c6d2f7a934
|
C Programming and Numerical Analysis
An Introduction
Synthesis Lectures on Mechanical Engineering
Synthesis Lectures on Mechanical Engineering series publishes 60–150 page publications pertaining to this diverse discipline of mechanical engineering. The series presents Lectures written for an audience of researchers, industry engineers, undergraduate and graduate students. Additional Synthesis series will be developed covering key areas within mechanical engineering.
C Programming and Numerical Analysis: An Introduction
Seiichi Nomura
2018
Mathematical Magnetohydrodynamics
Nikolas Xiros
2018
Design Engineering Journey
Ramana M. Pidaparti
2018
Introduction to Kinematics and Dynamics of Machinery
Cho W. S. To
2017
Microcontroller Education: Do it Yourself, Reinvent the Wheel, Code to Learn
Dimosthenis E. Bolanakis
2017
Solving Practical Engineering Mechanics Problems: Statics
Sayavur I. Bakhtiyarov
2017
Unmanned Aircraft Design: A Review of Fundamentals
Mohammad Sadraey
2017
Introduction to Refrigeration and Air Conditioning Systems: Theory and Applications
Allan Kirkpatrick
2017
Resistance Spot Welding: Fundamentals and Applications for the Automotive Industry
Menachem Kimchi and David H. Phillips
2017
MEMS Barometers Toward Vertical Position Detection: Background Theory, System Prototyping, and Measurement Analysis
Dimosthenis E. Bolanakis
2017
Engineering Finite Element Analysis
Ramana M. Pidaparti
2017
ABSTRACT
This book is aimed at those in engineering/scientific fields who have never learned programming before but are eager to master the C language quickly so as to immediately apply it to problem solving in numerical analysis. The book skips unnecessary formality but explains all the important aspects of C essential for numerical analysis. Topics covered in numerical analysis include single and simultaneous equations, differential equations, numerical integration, and simulations by random numbers. In the Appendices, quick tutorials for gnuplot, Octave/MATLAB, and FORTRAN for C users are provided.
KEYWORDS
C, numerical analysis, Unix, gcc, differential equations, simultaneous equations, Octave/MATLAB, FORTRAN, gnuplot
# Contents
Preface ........................................................................................................... xi
Acknowledgments ........................................................................................... xiii
PART I Introduction to C Programming ...................................................... 1
1 First Steps to Run a C Program ................................................................. 5
1.1 A Cycle of C Programming ................................................................. 5
1.2 UNIX Command Primer ................................................................. 8
1.3 Overview of C Programming ........................................................... 10
1.3.1 Principles of C language ......................................................... 10
1.3.2 Skeleton C program .............................................................. 11
1.4 Exercises ......................................................................................... 14
2 Components of C Language ................................................................. 17
2.1 Variables and Data Types ................................................................. 17
2.1.1 Cast Operators ................................................................... 18
2.1.2 Examples of Data Type ......................................................... 19
2.2 Input/Output ..................................................................................... 20
2.3 Operators between Variables ............................................................ 22
2.3.1 Relational Operators ............................................................. 23
2.3.2 Logical Operators ................................................................. 23
2.3.3 Increment/Decrement/Substitution Operators ..................... 24
2.3.4 Exercises ............................................................................. 25
2.4 Control Statements ........................................................................... 25
2.4.1 if Statement ......................................................................... 26
2.4.2 for Statement ....................................................................... 27
2.4.3 while Statement ................................................................... 31
2.4.4 do while Statement ............................................................. 32
2.4.5 switch Statement .................................................................. 33
2.4.6 Miscellaneous Remarks ........................................... 34
2.4.7 Exercises .......................................................... 38
2.5 Functions ............................................................. 39
2.5.1 Definition of Functions in C .................................... 39
2.5.2 Locality of Variables within a Function ...................... 41
2.5.3 Recursivity of Functions ........................................ 42
2.5.4 Random Numbers, \texttt{rand()}, ................................ 44
2.5.5 Exercises .......................................................... 50
2.6 Arrays ................................................................. 52
2.6.1 Definition of Arrays .............................................. 52
2.6.2 Multi-dimensional Arrays ....................................... 54
2.6.3 Examples .......................................................... 55
2.6.4 Exercises .......................................................... 58
2.7 File Handling .......................................................... 60
2.7.1 I/O Redirection (Standard Input/Output Redirection) ....... 60
2.7.2 File Handling (From within a Program) ....................... 61
2.8 Pointers ............................................................... 63
2.8.1 Address Operator \& and Dereferencing Operator * .......... 63
2.8.2 Properties of Pointers ........................................... 65
2.8.3 Function Arguments and Pointers ............................. 68
2.8.4 Pointers and Arrays ............................................. 70
2.8.5 Function Pointers ................................................ 72
2.8.6 Summary .......................................................... 73
2.8.7 Exercises .......................................................... 74
2.9 String Manipulation .................................................. 75
2.9.1 How to Handle a String of Characters (Text) ............... 75
2.9.2 String Copy/Compare/Length ................................. 78
2.10 Command Line Arguments ............................................ 80
2.10.1 Entering Command Line Arguments ......................... 80
2.10.2 Exercises .......................................................... 82
2.11 Structures ........................................................... 83
2.11.1 Mixture of Different Types of Variables .................... 83
2.11.2 Exercises .......................................................... 86
## PART II Numerical Analysis
<table>
<thead>
<tr>
<th>Section</th>
<th>Page</th>
</tr>
</thead>
<tbody>
<tr>
<td>Note on Numerical Errors</td>
<td>93</td>
</tr>
<tr>
<td>Roots of ( f(x) = 0 )</td>
<td>99</td>
</tr>
<tr>
<td>4.1 Bisection Method</td>
<td>99</td>
</tr>
<tr>
<td>4.2 Newton's Method</td>
<td>102</td>
</tr>
<tr>
<td>4.2.1 Newton's Method for a Single Equation</td>
<td>102</td>
</tr>
<tr>
<td>4.2.2 Newton's Method for Simultaneous Equations (Optional)</td>
<td>106</td>
</tr>
<tr>
<td>4.2.3 Exercises</td>
<td>108</td>
</tr>
<tr>
<td>Numerical Differentiation</td>
<td>109</td>
</tr>
<tr>
<td>5.1 Introduction</td>
<td>109</td>
</tr>
<tr>
<td>5.2 Forward/Backward/Central Difference</td>
<td>109</td>
</tr>
<tr>
<td>5.3 Exercises</td>
<td>114</td>
</tr>
<tr>
<td>Numerical Integration</td>
<td>115</td>
</tr>
<tr>
<td>6.1 Introduction</td>
<td>115</td>
</tr>
<tr>
<td>6.2 Rectangular Rule</td>
<td>115</td>
</tr>
<tr>
<td>6.3 Trapezoidal Rule</td>
<td>117</td>
</tr>
<tr>
<td>6.4 Simpson's Rule</td>
<td>118</td>
</tr>
<tr>
<td>6.5 Exercises</td>
<td>121</td>
</tr>
<tr>
<td>Solving Simultaneous Equations</td>
<td>123</td>
</tr>
<tr>
<td>7.1 Introduction</td>
<td>123</td>
</tr>
<tr>
<td>7.2 Gauss-Jordan Elimination Method</td>
<td>126</td>
</tr>
<tr>
<td>7.3 LU Decomposition (Optional)</td>
<td>129</td>
</tr>
<tr>
<td>7.4 Gauss-Seidel Method (Jacobi Method)</td>
<td>133</td>
</tr>
<tr>
<td>7.5 Exercises</td>
<td>135</td>
</tr>
<tr>
<td>Differential Equations</td>
<td>137</td>
</tr>
<tr>
<td>8.1 Initial Value Problems</td>
<td>137</td>
</tr>
<tr>
<td>8.1.1 Euler's Method</td>
<td>138</td>
</tr>
<tr>
<td>8.1.2 Runge-Kutta Method</td>
<td>143</td>
</tr>
<tr>
<td>8.2 Higher-order Ordinary Differential Equations</td>
<td>144</td>
</tr>
<tr>
<td>8.3 Exercises</td>
<td>146</td>
</tr>
<tr>
<td>A</td>
<td>Gnuplot</td>
</tr>
<tr>
<td>---</td>
<td>---</td>
</tr>
<tr>
<td>B</td>
<td>Octave (MATLAB) Tutorial for C Programmers</td>
</tr>
<tr>
<td></td>
<td>B.1 Introduction</td>
</tr>
<tr>
<td></td>
<td>B.2 Basic Operations</td>
</tr>
<tr>
<td></td>
<td>B.2.1 Principles of Octave/MATLAB</td>
</tr>
<tr>
<td></td>
<td>B.2.2 Reserved Constants</td>
</tr>
<tr>
<td></td>
<td>B.2.3 Vectors/Matrices</td>
</tr>
<tr>
<td></td>
<td>B.2.4 Graph</td>
</tr>
<tr>
<td></td>
<td>B.2.5 I/O</td>
</tr>
<tr>
<td></td>
<td>B.2.6 M-files</td>
</tr>
<tr>
<td></td>
<td>B.2.7 Conditional Statement</td>
</tr>
<tr>
<td></td>
<td>B.3 Sketch of Comparison Between C and Octave/MATLAB</td>
</tr>
<tr>
<td></td>
<td>B.4 Exercises</td>
</tr>
<tr>
<td>C</td>
<td>FORTRAN Tutorial for C Programmers</td>
</tr>
<tr>
<td></td>
<td>C.1 FORTRAN Features</td>
</tr>
<tr>
<td></td>
<td>C.2 How to Run a FORTRAN Program</td>
</tr>
<tr>
<td></td>
<td>C.3 Sketch of Comparison Between C and FORTRAN</td>
</tr>
<tr>
<td></td>
<td>C.4 Exercises</td>
</tr>
<tr>
<td></td>
<td>Author's Biography</td>
</tr>
<tr>
<td></td>
<td>Index</td>
</tr>
</tbody>
</table>
Preface
This book is aimed at those who want to learn the basics of programming quickly with immediate applications to numerical analysis in mind. It is suitable as a textbook for sophomore-level STEM students.
The book has two goals as the title indicates: The first goal is to introduce the concept of computer programming using the C language. The second goal is to apply the programming skill to numerical analysis for problems arising in scientific and engineering fields. No prior knowledge of programming is assumed but it is desirable that the readers have a background in sophomore-level calculus and linear algebra.
C was selected as the computer language of choice in this book. There have been continuous debates as to what programming language should be taught in college. Until around the 1990s, FORTRAN had been the dominating programing language for scientific and engineering computation which was gradually taken over by modern programming languages as PASCAL and C. Today, MATLAB is taught in many universities as a first computer application/language for STEM students. Python is also gaining popularity as a general purpose programming language suitable as the first computer language to be taught.
Despite many options for the availability of various modern computer languages today, adopting C for scientific and engineering computation still has several merits. C contains almost all the concepts and syntax used in the modern computer languages less the paradigm of object-oriented programming (use C++ and Java for that). It has been observed that whoever learns C first can easily acquire other programming languages and applications such as MATLAB quickly. The converse, however, does not hold. C is a compiled language and preferred over interpreted languages for programs that require fast execution.
There is no shortage of good textbooks for the C language and good textbooks for numerical analysis on the market but a proper combination of both seems to be hard to find. This book is not a complete reference for C and numerical analysis. Instead, the book tries to minimize the formality and limits the scope of C to these essential features that are absolutely necessary for numerical analysis. Some features in C that are not relevant to numerical analysis are not covered in this book. C++ is not covered either as the addition of object-oriented programming components offers little benefit for numerical analysis. After finishing this book, the reader should be able to work on many problems in engineering and science by writing their own C programs.
The book consists of two parts. In Part I, the general syntax of the C language is introduced and explained in details. gcc is used as the compiler which is freely available on almost all platforms. As the native platform of gcc is UNIX, a minimum introduction to the UNIX operating system is also presented.
In Part II the major topics from numerical analysis are presented and corresponding C programs are listed and explained. The subjects covered in Part II include solving a single equation, numerical differentiation, numerical integration, solving a set of simultaneous equations, and solving differential equations.
In Appendix A, gnuplot which is a visualization application is introduced. The C language itself has no graphical capabilities and requires an external program to visualize the output from the program.
In Appendix B, a brief tutorial of Octave/MATLAB is given. This is meant for those who are familiar with C but need to learn Octave/MATLAB in the shortest possible amount of time.
In Appendix C, a brief tutorial of FORTRAN is given. Again, this is meant for those who are already familiar with C to be able to read programs written in FORTRAN (FORTRAN 77) quickly.
This book is based on the course notes used for sophomore-level students of the Mechanical and Aerospace Engineering major at The University of Texas at Arlington.
Seiichi Nomura
March 2018
I want to thank the students who took this course for their valuable feedback. I also want to thank Paul Petralia of Morgan & Claypool Publishers and C.L. Tondo of T&T TechWorks, Inc. for their support and encouragement.
All the programs and tools used in this book are freely available over the internet thanks to the noble vision of the GNU project and the Free Software Foundation (FSF).
Seiichi Nomura
March 2018
PART I
Introduction to C Programming
In Part I, the basic syntax of the C language is introduced so that you can quickly write a program for problems in science and engineering to be discussed in Part II. This is never meant to be a complete reference for the C language. It covers only those items relevant to scientific/engineering computation. However, after Part I, you should be able to explore missing topics on your own. A minimum amount of computer environments is needed and all the programs listed should run on any version of gcc.
The only way to learn programming is to write a program by yourself. You never learn programming if you just read books sitting on a sofa.
C H A P T E R 1
First Steps to Run a C Program
In this chapter, the basic cycle of running a C program is explained. To execute a C program, it is necessary to first write a C code using a text editor, save the code with the file extension, “.c”, launch a C compiler to translate the text into an binary code, and, if everything goes well, run an executable (called a.out in UNIX). If this is the first time you program in C, it is important that you try every single step described in the following sections.
1.1 A CYCLE OF C PROGRAMMING
There are a variety of ways to access a C compiler and run a C program. Almost all schools run a UNIX server open to the students. You should be able to activate your account on the UNIX server, connect to the server via an ssh\textsuperscript{1} client such as PuTTY over the internet, and run a freely distributed C compiler, gcc.\textsuperscript{2} It is also possible to have a similar setup at home by running your own Linux server or installing a PC/Mac version of gcc. If you come from a Windows or Mac environment, you are accustomed to the graphical user interface (GUI) clicking an icon to open an application. However, to use gcc, you must use the character-based interface (CUI) to compile and run your program in a UNIX shell, in a command line (DOS) window (Windows) or in Terminal App (Mac). To run gcc on the Windows system, you can go to www.mingw.org and download the gcc installer, mingw-get-setup.exe.
In what follows in this book, we use PuTTY\textsuperscript{3} (terminal emulation software) to access a UNIX server and run gcc on the server. Figure 1.1 shows an opening screen when PuTTY is launched on the Windows system. In the box circled, enter the name of a server that runs gcc and press the Open button. It will prompt you to enter your username (case sensitive) and password (won't echo back). Once you are logged on the server, you are prompted to enter a command from the console (see Figure 1.2). If you have never used a UNIX system before, you may want to play with some of the essential UNIX commands. Try the following:
1. Login to the server via PuTTY.
\textsuperscript{1}ssh (Secure Shell) is a networking protocol by which two computers are connected via a secure channel.
\textsuperscript{2}gcc is an abbreviation for GNU Compiler Collection. It is a compiler system produced by the GNU Project.
\textsuperscript{3}PuTTY is a free and open-source terminal emulator available for the Windows system that can be downloaded from www.putty.org. The size of the executable is less than 1 MB and the program loads very fast.
6 1. FIRST STEPS TO RUN A C PROGRAM

Figure 1.1: Opening screen of PuTTY.
2. Using nano, a simple text editor, compose your C program (Figure 1.3).
```bash
$ nano MyProgram.c
```
The symbol, $, is the system prompt so do not type it. Enter the following text into nano. Note that all the input in UNIX is case-sensitive.
```c
#include <stdio.h>
int main()
{
printf("Hello, World!\n");
return 0;
}
```
4) nano is a simple editor that comes with all the installation of UNIX. It is a clone of another simple text editor, pico.
3. After you finish entering the text, save the file (Control-O) by entering `MyProgram.c`\(^6\) as the file name to be saved and press Control-X to exit from nano. This will save the file you just created permanently under the name of `MyProgram.c`.
4. The file you created with nano is a text file that is not understood by the computer. It is necessary to translate this text file into a code which can be run on the computer. This translation process is called compiling and the software to do this translation is called a compiler. We use `gcc` for this purpose.
At the system prompt ($), run a C compiler (gcc) to generate an executable file (a. out\(^7\)).
`$ gcc MyProgram.c`
If everything works, gcc will create an executable binary file whose default name is a. out.
---
\(^5\)Hold down the control key and press O.
\(^6\)The file name is case sensitive.
\(^7\)a. out is an abbreviation for *assembler* output.
1. FIRST STEPS TO RUN A C PROGRAM
5. Run the executable file.
$ ./a.out
6. If there is a syntax error, go back to item 2 and reissue nano.
$ nano MyProgram.c
7. If there is no syntax error, run the executable file.
$ ./a.out
8. To logoff from the server, enter exit, logout, or hit control-D.
1.2 UNIX COMMAND PRIMER
In a perfect world, you could compose a C program, compile it, and run a.out and you are done with it. This scenario may work for a program of less than 10 lines but as the size of the program grows or the program depends on other modules, it is necessary to manipulate and organize files on the UNIX system. Even though this is not an introductory book of the UNIX operating
---
\(^8\)
\(^8\)“/” represents the current directory. If the current directory is included in the PATH environmental variable, “/” is not necessary.
1.2. UNIX COMMAND PRIMER
System, a minimum amount of knowledge about the UNIX operating system is needed. The
following are some of the UNIX commands that are used often. Try each command yourself
from the system prompt and find out what it does. It won't damage the machine.
- `ls` (Directory listing.)
- `ls -l` (Directory listing in long format.)
- `ls -lt | more` (Directory listing, one screen at one time, long format, chronological order.)
- `dir` (alias for `ls`)
- `ls .` (Lists the current directory.)
- `cd ..` (Moves to the directory one level up.)
- `pwd` (Shows the present working directory.)
- `cd /` (Moves to the top directory.)
- `cd` (Returns to the home directory.)
- `mkdir MyNewFolder` (Creates a new directory)
- `nano myfile.txt` (Creates a new file.)
- `cp program1.c program2.c` (Copies program1.c to program2.c.)
- `mv old.c new.c` (Renames old.c to new.c.)
- `rm program.c` (Deletes program.c.)
- `rm *.c` (Do not do this. It will delete all the files with extension c.)
- `whoami` (Shows your username.)
- `who` (Shows who are logged on.)
- `cal` (Shows this year's calendar.)
- `cal 1980` (Shows the calendar of 1980.)
To quickly move while entering/editing a command line and in nano sessions, master the fol-
lowing shortcuts. `^f` means holding down the control key and pressing the f key.
- `^f` (Moves cursor forward by one character, f for forward.)
- `^b` (Moves cursor backward by one character, b for backward.)
1. FIRST STEPS TO RUN A C PROGRAM
• \^d (Deletes a character on cursor, d for delete.)
• \^k (Deletes entire line, k for kill.)
• \^p (Moves to previous line, same as up arrow, p for previous.)
• \^n (Moves to next line, same as down arrow, n for next.)
• \^a (Moves to top of line, a for the first alphabet.)
• \^e (Moves to end of line, e for end.)
1.3 OVERVIEW OF C PROGRAMMING
Arguably, the most important book on the C language is a book known as “K&R” written by Kernighan and Ritchie\(^9\) who themselves developed the C language. It is concise yet well-written and is highly recommended for reading.
1.3.1 PRINCIPLES OF C LANGUAGE
Surprisingly, the C language is based on a few simple principles. They are summarized as follows:
1. A C program is a set of functions.
2. A function in C is a code that follows the syntax below:
\[
\begin{align*}
\text{type } & \text{name(type var)} \\
\{ & \\
\text{your C code here.....} \\
\text{......} & \\
\text{return value;} & \\
\}
\end{align*}
\]
3. A function must be defined before it is used.
4. A function must return a value whose type must be declared (one of int, float, double, char). The last line of a function must be a return xx statement where xx is a value to be returned upon exit.
5. A function must take arguments and must have a placeholder () even if there is no argument.
6. The content of a function must be enclosed by “{” and “}”.
1.3. OVERVIEW OF C PROGRAMMING
7. A special function, int main(), is the one which is executed first. It is recommended that this function returns an integer value of 0.
8. All the variables used within a function must be declared.
1.3.2 SKELETON C PROGRAM
The following program is absolutely the smallest C program that can be written:
```c
int main()
{
return 0;
}
```
You can compile and execute this program by issuing the following commands:
```
$ gcc MyProgram.c
$ ./a.out
```
where `MyProgram.c` is the name under which the file was saved. Even though it is the smallest C program, the program itself is a full-fledged C code. Of course, this program does nothing and when you issue `.a.out`, the program simply exits after being executed and you are returned to the system prompt.
Here is a line-by-line analysis of the program above. Refer to Section 1.3.1 for the list of items. The first line, `int main()`, indicates that a function whose name is `main` is declared that returns an integer value (int) upon exit (Item 4). This function takes no arguments (empty parameters within the parentheses) (Item 5). The program consists of only one function, `main()`, which is executed first (Item 7). The content of the function, `main()`, is the line(s) surrounded by `{ and }` (Item 6). In this case, the program executes the `return 0` statement and exits back to the operating system returning a 0 value to the operating system. As C is a free-form language, the end of each statement has to be clearly marked. A semicolon `;` is placed at the end of each statement. Hence, `return 0;`
The following program is a celebrated code that appeared first in the K&R book in the Getting Started section and later adapted in just about every introductory book for C as the first C program that prints “Hello, World!” followed by an extra blank line.
```c
#include <stdio.h>
int main()
{
printf("Hello, World!\n");
return 0;
}
```
12 1. FIRST STEPS TO RUN A C PROGRAM
Each line in the above program is now parsed. The first line, \#include <stdio.h>, is a bit confusing but let’s skip this line for the time being and move to the subsequent lines. If you compile the program and execute a.out, you will find out that the program prints Hello, World! followed by a new line on the screen. Hence, you can guess that the odd characters, \n, represents a blank line. As there is no character that represents a blank line, you figure out that \n can be used as printing a blank line.
Next, note that the part printf is followed by a pair of parentheses and therefore, it is a function in C (Item 5). It is obvious that this function, printf(), prints a string, Hello, World!, and quits. As it is a function in C, it has to be defined and declared before it is used. However, no such definition is found above the function, main(). The first line, \#include <stdio.h>, is in fact referring to a file that contains the definition of printf() that is preloaded before anything else. The file, stdio.h, is one of the header (hence the extension, h) files available in the C library that is shipped with gcc. As the name indicates (stdio = Standard Input and Output), this header file has the definition of many functions that deal with input and output (I/O) functions.
Finally, a function must have information about the type of the value it returns such as int, float, double, etc…(Item 4). In this case, the function int main() is declared to return an integer value upon exit. Sure enough, the last statement return 0; is to return 0 when the execution is done and 0 is an integer.
Here is how gcc parses this program line by line:
**Line 1** Before anything else, let’s load a header file, <stdio.h>, that contains the definition of all the functions that deal with I/O from the system area.
**Line 2** This is the start of a function called main(). This function returns an integer value int upon exit. This function has no parameters to pass so the content within the parentheses is empty.
**Line 3** The { character indicates that this is the beginning of the content of the function, main().
**Line 4** This line calls the function, printf(), that is defined in <stdio.h> and prints out a string of Hello, World! followed by a blank line. A semicolon, ;, marks the end of this function.
**Line 5** This is the last statement of the function, main(). It will return the value 0 to the operating system and exit.
**Line 6** The } character indicates the end of the content of the function, main().
You can execute this program by
$ nano hello.c
$ gcc hello.c
(If it is not compiled, reedit hello.c.)
$ ./a.out
Hello, World!
Here is another program that does some scientific computation.
```c
#include <stdio.h>
#include <math.h>
/* This is a comment */
int main()
{
float x, y;
x = 6.28;
y = sin(x);
printf("Sine of \%f is \%f.\n", x, y);
return 0;
}
```
This program computes the value of $\sin x$ where $x = 6.28$. The program can be compiled as
```
$ gcc MyProgram.c -lm
```
Note that the `-lm` option is necessary when including `<math.h>`.
Here is a line by line analysis of the program:
**Line 1** The program preloads a header file, `<stdio.h>`.
**Line 2** The program also preloads an another header file, `<math.h>`. This header file is necessary whenever mathematical functions such as $\sin(x)$ are used in the program.
**Line 3** This entire line is a comment. Anything surrounded by `/*` and `*/` is a comment and is ignored by the compiler.
**Line 4** This is the declaration of a function, `main()`, that returns an integer value but with no parameter.
---
10"-1" is to load a library and "m" stands for the math library.
11`<math.h>` only contains protocol declarations for mathematical functions. It is necessary to locally load the mathematical library, `libm.a` by the `-lm` option.
12A comment can also start with `//`. This for one-line comment originated in C++.
14 1. FIRST STEPS TO RUN A C PROGRAM
Line 5 The {} character indicates that this is the beginning of the content of the function, main().
Line 6 Two variables, x and y, are declared both of which represent floating numbers.
Line 7 The variable, x, is assigned a floating number, 6.28.
Line 8 The function, sin(x), is evaluated where x is 6.28 and the result is assigned to the variable, y.
Line 9 The result is printed. First, a literal string of “Sine of” is printed followed by the actual value of x and “is” is printed followed by the actual value of y, a period and a new line.
Line 10 The function, main(), exits with a return value of 0.
Line 11 The } character indicates that this is the end of the content of the function, main().
There are several new concepts in this program that need to be explained. The second line is to preload yet another header file, math.h, as this program computes the sine of a number. In the fourth line, two variables, x and y, are declared. The float part indicates that the two variables represent floating numbers (real numbers with the decimal point). The fifth line says that a number, 6.28, is assigned to the variable, x. The equal sign (=) here is not the mathematical equality that you are accustomed to. In C and all other computer languages, an equal sign (=) is exclusively used for substitution, i.e., the value to the right of = is assigned to the variable to the left of =. In the eighth line, the printf() function is to print a list of variables (x and y) with formatting specified by the double quotation marks (“...”). The way formatting works is that printf() prints everything literally within the parentheses except for special codes starting with the percentage sign (%). Here, %f represents a floating number which is to be replaced by the actual value of the variable. As there are two %f’s, the first %f is replaced by the value of x and the second %f is replaced by the value of y. The details of the new concepts shown here will be explained in detail in Chapter 2.
1.4 EXERCISES
It is not necessary to know all the syntax of C to work on the following problems. Each problem has a template that you can modify. Start with the template code, keep modifying the code and understand what each statement does. It is essential that you actually write the code yourself (not copy and paste) and execute it.
1. Write a C program to print three blank lines followed by “Hello, World!” Use the following code as a template:
#include <stdio.h>
int main()
{
printf("\nHello, World!\n\n");
return 0;
}
\n prints a new line.
2. Write a program to read two real numbers from the keyboard and to print their product. Use the following code as a template. Do not worry about the syntax, just modify one place.
#include <stdio.h>
int main()
{
int a, b; /* to declare that a and b are integer variables */
printf("Enter two integer numbers separated by space =");
scanf("%d %d", &a, &b); /* This is the way to read two integer numbers and assign them to a and b. */
printf("The sum of the two numbers is %d.\n", a+b); /* %d is for integer format. */
return 0;
}
3. Write a program to read a real number, \( x \), and outputs its sine, i.e., \( \sin(x) \). You need to use <math.h> and the \(-l m\) compile option. Use the following template program that computes \( e^x \).
#include <stdio.h>
#include <math.h>
int main()
{
float x;
printf("Enter a number ="); scanf("%f", &x);
printf("x= %f exp(x)=%f\n", x, exp(x));
return 0;
}
1. FIRST STEPS TO RUN A C PROGRAM
You have to use the `-lm` option when compiling:
```
$ gcc MyProgram.c -lm
$ ./a.out
```
In this chapter, the essential components of the C language are introduced and explained. The syntax covered in this chapter is not exhaustive but after this chapter you should be able to write a simple C program that can solve many problems in engineering and science.
2.1 VARIABLES AND DATA TYPES
Every single variable used in C must have a type which the value of the variable represents. There are four variable types listed in Table 2.1.
Table 2.1: Data types
<table>
<thead>
<tr>
<th>Type</th>
<th>Content</th>
<th>Format</th>
<th>Range</th>
<th>Example</th>
</tr>
</thead>
<tbody>
<tr>
<td>int</td>
<td>Integer</td>
<td>%d</td>
<td>–2147483647 ~ +2147483647</td>
<td>10</td>
</tr>
<tr>
<td>float</td>
<td>Floating number</td>
<td>%f</td>
<td>±2.9387e – 39 ~ ± 1.7014e + 38</td>
<td>3.14</td>
</tr>
<tr>
<td>double</td>
<td>Double precision</td>
<td>%lf</td>
<td>2⁻⁶³ ~ 2⁺⁶³</td>
<td>3.14159265358979</td>
</tr>
<tr>
<td>char</td>
<td>Character</td>
<td>%c</td>
<td>ASCII code</td>
<td>'a'</td>
</tr>
</tbody>
</table>
In Table 2.1, the third column shows the format of each data type which is used in the printf() and scanf() functions.
- int represents an integer value. The range of int depends on the hardware and the version of the compiler. In most modern systems, int represents from -2147483647 to 2147483647.
- float represents a floating number. This will take care of most non-scientific floating numbers (single precision). For scientific and engineering computation, double must be used.
- double is an extension of float. This data type can handle a larger floating number at the expense of the amount of memory used (but not much).
- char represents a single ASCII character. This data type is actually a subset of int in which the range is limited to 0 ~ 255. The character represented by char must be enclosed by a single quotation mark (‘).
2. COMPONENTS OF C LANGUAGE
2.1.1 CAST OPERATORS
When an operation between variables of different types is performed, the variables of a lower type are automatically converted to the highest type following this order:
\[ \text{int} = \text{char} < \text{float} < \text{double} \]
For example, for \( a \times b \) in which \( a \) is of \text{int} type and \( b \) is of \text{float} type, then, \( a \) is converted to the \text{float} type automatically and the result is also of \text{float} type. There are times when two variables are both \text{int} type yet the result of the operation is desired to be of \text{float} type. For example,
```c
#include <stdio.h>
int main()
{
int a, b;
a=3; b=5;
printf("%f\n", a/b);
return 0;
}
```
The output is
```
$ gcc prog.c
2.c: In function 'main':
2.c:6:10: warning: format '%f' expects argument of type 'double',
but argument 2 has type 'int' [-Wformat=]
printf("%f\n", a/b);
^
$ ./a.out
-0.000000
```
It prints 0 with a warning even though the result is expected to be 0.6. To carry out this operation as intended,\(^1\) a cast operator (an operator to allow to change the type of a variable to a specified type temporarily) must be used as
```c
#include <stdio.h>
int main()
{
int a,b;
int a, b;
a=3; b=5;
printf("%f\n", (double)a/b);
return 0;
}
```
\(^1\)Another way of achieving this is to modify \( a/b \) to \( 1.0 \times a/b \).
2.1. VARIABLES AND DATA TYPES
a=3; b=5;
printf("%f
", (float)a/b);
return 0;
}
The output is
$ gcc prog.c
$ ./a.out
0.600000
The (float)a/b part forces both variables to be of float type and returns 0.6 as expected.
2.1.2 EXAMPLES OF DATA TYPE
1. This program prints a character, “h”.
```c
#include <stdio.h>
int main()
{
char a='h';
printf("%c\n", a);
return 0;
}
Note that the variable, a, is declared as char and initialized as “h” on the same line.
2. This program prints an integer 10.
```c
#include <stdio.h>
int main()
{
int a=10;
printf("%d\n", a);
```
2. COMPONENTS OF C LANGUAGE
```c
return 0;
}
```
Note that the variable, `a`, is declared as `int` and initialized as 10 on the same line.
3. This program prints a floating number 10.5.
```c
/* Print a floating number */
#include <stdio.h>
int main()
{
float a=10.5;
printf("%f\n",a);
return 0;
}
```
Note that the variable, `a`, is declared as `float` and initialized as 10.5 on the same line.
4. This program prints two floating numbers, 10.0 and -2.3.
```c
/* Print floating numbers */
#include <stdio.h>
int main()
{
float a, b=9.0, c;
a=10.0; c=-2.3;
printf("a = %f\n",a);
printf("c = %f\n",c);
return 0;
}
```
2.2 INPUT/OUTPUT
Almost all C programs have at least one output statement. Otherwise, the program won’t output anything on the screen and there is no knowing if the program ran successfully or not. The most common input/output functions are `printf()` and `scanf()` both of which are defined in the header file `stdio.h`.
Use `printf()` (Print with Format) for outputting data to the console and `scanf()` (Scan with Format) for inputting data from the keyboard.
|
{"Source-Url": "http://www.morganclaypoolpublishers.com/catalog_Orig/samples/9781681733128_sample.pdf", "len_cl100k_base": 9329, "olmocr-version": "0.1.50", "pdf-total-pages": 34, "total-fallback-pages": 0, "total-input-tokens": 75903, "total-output-tokens": 10779, "length": "2e13", "weborganizer": {"__label__adult": 0.0004954338073730469, "__label__art_design": 0.000583648681640625, "__label__crime_law": 0.0002694129943847656, "__label__education_jobs": 0.005741119384765625, "__label__entertainment": 0.00014495849609375, "__label__fashion_beauty": 0.00024271011352539065, "__label__finance_business": 0.00031256675720214844, "__label__food_dining": 0.0007915496826171875, "__label__games": 0.0010385513305664062, "__label__hardware": 0.0020732879638671875, "__label__health": 0.00066375732421875, "__label__history": 0.0003490447998046875, "__label__home_hobbies": 0.0002114772796630859, "__label__industrial": 0.0008745193481445312, "__label__literature": 0.00064849853515625, "__label__politics": 0.00029778480529785156, "__label__religion": 0.0007762908935546875, "__label__science_tech": 0.0694580078125, "__label__social_life": 0.0001728534698486328, "__label__software": 0.00560760498046875, "__label__software_dev": 0.90771484375, "__label__sports_fitness": 0.0004525184631347656, "__label__transportation": 0.00089263916015625, "__label__travel": 0.0002236366271972656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 37508, 0.05619]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 37508, 0.64675]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 37508, 0.83639]], "google_gemma-3-12b-it_contains_pii": [[0, 53, false], [53, 996, null], [996, 1439, null], [1439, 1439, null], [1439, 1439, null], [1439, 2172, null], [2172, 4760, null], [4760, 7298, null], [7298, 9084, null], [9084, 9855, null], [9855, 12761, null], [12761, 13838, null], [13838, 14257, null], [14257, 14257, null], [14257, 14295, null], [14295, 14295, null], [14295, 14940, null], [14940, 14940, null], [14940, 17548, null], [17548, 18124, null], [18124, 19060, null], [19060, 19934, null], [19934, 21390, null], [21390, 22888, null], [22888, 24842, null], [24842, 27466, null], [27466, 28832, null], [28832, 31326, null], [31326, 32374, null], [32374, 32499, null], [32499, 34349, null], [34349, 35797, null], [35797, 36388, null], [36388, 37508, null]], "google_gemma-3-12b-it_is_public_document": [[0, 53, true], [53, 996, null], [996, 1439, null], [1439, 1439, null], [1439, 1439, null], [1439, 2172, null], [2172, 4760, null], [4760, 7298, null], [7298, 9084, null], [9084, 9855, null], [9855, 12761, null], [12761, 13838, null], [13838, 14257, null], [14257, 14257, null], [14257, 14295, null], [14295, 14295, null], [14295, 14940, null], [14940, 14940, null], [14940, 17548, null], [17548, 18124, null], [18124, 19060, null], [19060, 19934, null], [19934, 21390, null], [21390, 22888, null], [22888, 24842, null], [24842, 27466, null], [27466, 28832, null], [28832, 31326, null], [31326, 32374, null], [32374, 32499, null], [32499, 34349, null], [34349, 35797, null], [35797, 36388, null], [36388, 37508, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 37508, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 37508, null]], "pdf_page_numbers": [[0, 53, 1], [53, 996, 2], [996, 1439, 3], [1439, 1439, 4], [1439, 1439, 5], [1439, 2172, 6], [2172, 4760, 7], [4760, 7298, 8], [7298, 9084, 9], [9084, 9855, 10], [9855, 12761, 11], [12761, 13838, 12], [13838, 14257, 13], [14257, 14257, 14], [14257, 14295, 15], [14295, 14295, 16], [14295, 14940, 17], [14940, 14940, 18], [14940, 17548, 19], [17548, 18124, 20], [18124, 19060, 21], [19060, 19934, 22], [19934, 21390, 23], [21390, 22888, 24], [22888, 24842, 25], [24842, 27466, 26], [27466, 28832, 27], [28832, 31326, 28], [31326, 32374, 29], [32374, 32499, 30], [32499, 34349, 31], [34349, 35797, 32], [35797, 36388, 33], [36388, 37508, 34]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 37508, 0.11262]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
5fa6451d220c3b3421b54cac42e5987fc38cd80e
|
PTUNE: A TOOL FOR ONLINE AUTOTUNING WITH OPTIMIZING COMPILERS
THESIS
Presented to the Graduate Council of Texas State University-San Marcos in Partial Fulfillment of the Requirements for the Degree
MASTER OF SCIENCE
by
SANTOSH R. SARANGKAR
San Marcos, Texas December 2011
PTUNE: A TOOL FOR ONLINE AUTOTUNING WITH OPTIMIZING COMPILERS
Committee Members Approved:
________________________
Apan Qasem, Chair
________________________
Carol Hazlewood
________________________
Yijuan Lu
Approved:
________________________
J. Michael Willoughby
Dean of the Graduate College
FAIR USE AND AUTHOR’S PERMISSION STATEMENT
Fair Use
This work is protected by the Copyright Laws of the United States (Public Law 94-553, section 107). Consistent with fair use as defined in the Copyright Laws, brief quotations from this material are allowed with proper acknowledgment. Use of this material for financial gain without the author’s express written permission is not allowed.
Duplication Permission
(Choose one of the two below and type only it on the page)
As the copyright holder of this work I, Santosh Sarangkar, authorize duplication of this work, in whole or in part, for educational or scholarly purposes only.
OR
As the copyright holder of this work I, Santosh Sarangkar, refuse permission to copy in excess of the “Fair Use” exemption without my written permission.
ACKNOWLEDGEMENTS
It is with enormous gratitude that I acknowledge the support and help from Dr. Apan Qasem, who had been my advisor in my thesis, and academics. Especially the support and help I experienced during my master’s degree was extraordinary. I am so fortunate to have him as my thesis advisor. Also, I would like to thank him for always being available to help. It would have been next to impossible to write this thesis without his timely help and guidance.
Also, I want to thank my committee members Dr. Yijuan Lu and Dr. Carol Hazlewood, for their valuable suggestions. This thesis is dedicated to my parents Raosaheb and Chanchal, brother Devidas, sister Mangal, and sister-in-law Namrata who have given me the opportunity of education from the best institutions and support throughout my life. Lastly, I offer my honors and blessings to all of those who supported me in any respect during the completion of my master’s degree.
This manuscript was submitted on August 1, 2011.
# TABLE OF CONTENTS
<table>
<thead>
<tr>
<th>Chapter</th>
<th>Title</th>
<th>Page</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>INTRODUCTION</td>
<td>1</td>
</tr>
<tr>
<td>2</td>
<td>RELATED WORK</td>
<td>6</td>
</tr>
<tr>
<td>2.1</td>
<td>Autotuned Libraries</td>
<td>6</td>
</tr>
<tr>
<td>2.2</td>
<td>Search-based Tuning of Applications</td>
<td>7</td>
</tr>
<tr>
<td>2.3</td>
<td>Tunable Transformations</td>
<td>10</td>
</tr>
<tr>
<td>2.4</td>
<td>Component Tools</td>
<td>11</td>
</tr>
<tr>
<td>3</td>
<td>TUNING FRAMEWORK</td>
<td>14</td>
</tr>
<tr>
<td>3.1</td>
<td>Overview</td>
<td>14</td>
</tr>
<tr>
<td>3.2</td>
<td>Search Space Specification</td>
<td>15</td>
</tr>
<tr>
<td>3.3</td>
<td>Exposing Optimization Flags in the GNU Compiler</td>
<td></td>
</tr>
</tbody>
</table>
Collection (GCC) .................................................. 17
3.4 Search Algorithms ........................................... 20
3.4.1. Genetic Search ......................................... 21
3.4.2 Random Search .......................................... 24
3.4.3 Direct Search ............................................ 25
3.4.4 Simulated Annealing ................................... 27
4: EXPERIMENTAL RESULTS ....................................... 28
4.1 Experimental Setup ...................................... 28
4.2 Overall Performance Improvement ....................... 29
4.3 Search Space Exploration ............................... 31
5: CONCLUSION .................................................. 35
REFERENCES ........................................................ 36
**LIST OF TABLES**
<table>
<thead>
<tr>
<th>Table</th>
<th>Page</th>
</tr>
</thead>
<tbody>
<tr>
<td>1: gcc Compiler Optimizations</td>
<td>18</td>
</tr>
<tr>
<td>2: Benchmark Applications and their Descriptions</td>
<td>29</td>
</tr>
<tr>
<td>3: Platform Specifications</td>
<td>29</td>
</tr>
</tbody>
</table>
# LIST OF FIGURES
<table>
<thead>
<tr>
<th>Figures</th>
<th>Page</th>
</tr>
</thead>
<tbody>
<tr>
<td>1: PTUNE Framework</td>
<td>14</td>
</tr>
<tr>
<td>2: Example configuration file for PTUNE</td>
<td>16</td>
</tr>
<tr>
<td>3: Specifying Flags</td>
<td>20</td>
</tr>
<tr>
<td>4: Randomly Generated Population</td>
<td>23</td>
</tr>
<tr>
<td>5: Generating New Population</td>
<td>24</td>
</tr>
<tr>
<td>6: Performance Improvement over the baseline</td>
<td>30</td>
</tr>
<tr>
<td>7: automotive_bitcount: Random Vs. Genetic</td>
<td>32</td>
</tr>
<tr>
<td>8: Search Space Exploration bzip2e: Random Vs. Genetic</td>
<td>32</td>
</tr>
<tr>
<td>9: Network_dijkstra: Random Vs. Genetic</td>
<td>33</td>
</tr>
<tr>
<td>10: Security_blowfish_d: Random Vs. Genetic</td>
<td>33</td>
</tr>
<tr>
<td>11: telecom_CRC: Random Vs. Genetic</td>
<td>34</td>
</tr>
<tr>
<td>12: telecom_gsm: Random Vs. Genetic</td>
<td>34</td>
</tr>
</tbody>
</table>
CHAPTER 1: INTRODUCTION
Over the last several decades we have witnessed tremendous change in the landscape of computer architecture. New architectures have emerged at a rapid pace with greater computing capabilities that have often exceeded our expectations. However, the rapid rate of architectural innovations has also been a source of major concern for the high-performance computing community. Each new architecture or even a new model of a given architecture has brought with it new features that have added to the complexity of the target platform. As a result, it has become increasingly difficult to exploit the full potential of modern architectures for complex scientific applications. It often requires many person-months to tailor large applications to microprocessor-based parallel systems to achieve a high fraction of peak performance. Manual tuning of applications not only creates problems with code maintainability and verification but also decreases programmer productivity.
The emergence of multicore processors has greatly amplified this problem. Although chip multiprocessors have dramatically increased the performance potential of computer systems, much of the responsibility of harnessing this potential falls on software. In particular, software needs to play a bigger role in discovering hidden parallelism, automatically transforming code for parallel execution and perhaps most importantly,
exploit parallelism and data locality at multiple levels. Hence, it is likely that in the future even more time and effort will need to be devoted into manual performance tuning. To address this problem, several research groups have proposed methods and techniques for automatic performance tuning [3,12,16,17,19,30,31,38,41,44,49,51]. In an autotuning framework, a code is analyzed, alternate code variants are generated with tuning parameters and then a heuristic search is performed based on execution time feedback to obtain an implementation that yields optimal or near-optimal performance for the target platform. All of these tasks are performed with minimal to no human intervention. Autotuned libraries for specific domains, such ATLAS [51] for linear algebra and SPIRAL [41] and FFTW [19] for signal processing have been quite successful. Several other research efforts are under way that aims to apply the ATLAS-model for general scientific codes [12,16]. Although many of these research endeavors hold great promise, for autotuning to gain widespread acceptance in the scientific computing community, several key challenges need to be overcome.
A main impediment to practical and efficient automatic tuning is the enormous search space of transformation sequences and parameters containing billions of feasible points. Over the years compiler writers have developed a rich array of program transformations, which restructure programs in complex ways. There is strong interaction between many of these transformations and most of them are sensitive to underlying hardware parameters. Some transformations are also sensitive to the input data set of an application. All of these factors combine to give rise to a large and complex multi-dimensional search space. Hence, for autotuning to be more practical, we need a strategy
that can both cut down the search space to manageable proportions and apply advanced search heuristics to navigate that space. Another limitation of most autotuning frameworks is that the search module is tightly coupled with the transformation engine within the framework. Thus, exploiting these tools for automatic search space generation and exploration becomes problematic.
This thesis addresses these two challenges and describes the design and implementation of PTUNE, a heuristic search-driven autotuning framework. PTUNE facilitates navigation of the optimization search space with greater flexibility and efficiency through a number of novel features. These features are highlighted below:
(1) Use of Genetic Algorithms: PTUNE implements a Genetic Algorithm to search the space of optimization phase-orderings. Prior to this work, genetic algorithms had only been used to explore a subspace of the optimization search space. We provide an intuitive way to represent optimization sequences as chromosomes in Genetic Algorithms and propose new strategies for mutation and crossover.
(2) Enhanced Feedback: The quality of feedback plays a key role in the success of any automatic tuning system. Focusing on one particular performance metric provides a myopic view of application performance and generally does not lead to effective code optimization. To be able to tailor codes to complex systems, the application developer needs to identify the causes behind the performance problems. Looking at a variety of performance metrics to identify performance bottlenecks is common practice in the world
of manual tuning. However, in the context of automatic tuning, its application is almost non-existent. Consequently, applying even the most intelligent search methods generally does not lead to the best program variant in any reasonable amount of time. Another issue with feedback in the context of autotuning is granularity. Most search-based systems use whole program execution time as the only feedback metric. Collecting performance measurements at the program level is usually sufficient when tackling small kernels, where one loop nest dominates the entire execution. However, for larger applications in which execution time is distributed over several loop nests, whole program granularity is no longer sufficient. This is particularly true when dealing with loop transformations. Loop transformations such as tiling, if applied to multiple loop nests within a program, can have widely varying effects on each of those nests. Thus, to accurately determine the effects of changing loop transformation parameters, the search algorithms require feedback at a finer granularity. Relying on whole program feedback generally leads to longer search times.
The search engine in PTUNE collects fine-grain feedback, through HPCToolkit [62] and PAPI [63], at procedure-, loop- and statement-levels. These feedback metrics are exploited in parallel search algorithms that explore multiple code regions concurrently, thus reducing overall tuning time. Additionally, a variety of performance metrics is used as a guiding heuristic to speed up steepest descent search methods.
(3) Tuning of multi-threaded kernels: By integrating OpenMP and MPI flags in the search space, PTUNE provides the ability to tune multi-threaded kernels. Given the
growing trend of placing an increasing number of cores per chip, the ability to tune multi-threaded kernels is of paramount importance. To our knowledge, no other framework explicitly provides this level of control with GCC.
(4) Tool Sharing: PTUNE is designed to work as an independent search engine and provides a search API that can be used by other autotuning frameworks. This modular design of the search engine facilitates greater tool integration within the autotuning community. In particular, PTUNE can be easily integrated into PERI and MILEPOST GCC, two of the most prominent autotuning frameworks. On the other hand, PTUNE leverages the complementary strengths of several open-source software including LoopTool and HPCToolkit, thus increasing their applicability.
CHAPTER 2: RELATED WORK
We divide the discussion of related work into four parts. The first section reviews related work in autotuned libraries, the second section discusses more general autotuning frameworks, the third section provides a review of code transformations commonly used in autotuning and finally, the fourth section briefly discusses some of the tuning tool that can be integrated with PTUNE.
2.1. Autotuned Libraries
A number of successful empirical tuning systems provide efficient library implementations for important scientific domains, such as those for dense and sparse linear algebra \([7,17,51]\), signal processing \([19,41]\) and tensor contraction \([6]\). Among these, ATLAS \([51]\), is the most widely used within the scientific community and has become the \textit{de facto} standard for evaluating other autotuning systems. The ATLAS-model has even found its way into commercial compilers in the form the Math Kernel Library (MKL) distributed with the Intel compiler suite \([25]\). ATLAS produces highly optimized linear algebra routines by probing the underlying hardware for platform specific information and using a global search to find the best transformation parameters, searching for these parameters one transformation at a time. The transformations considered by ATLAS include multi-level tiling, unroll-and-jam and pipeline scheduling.
Unlike ATLAS, the SPIRAL [41] and FLAME [7] projects have looked at the problem at a higher-level and concentrated more on the issue of algorithmic choice rather than exploring options of alternate implementations of the same algorithm. In the SPIRAL framework, signal-transform routines are expressed generates by mathematical formulas using a special purpose language [55] and a suitable implementation is chosen based on matrix factorization calculations and a simple sequential search. More recently, the Peta Bricks project has adopted the SPIRAL and FLAME approach for general algorithmic tuning [4].
2.2. Search-based Tuning of Applications
The success of automatically tuned domain specific libraries has sparked considerable interest in applying search-based methods for tuning general applications. Research efforts in whole application tuning can be broadly classified into two categories based on the parameter search space on which they operate. Several ongoing research projects tackle the phase-ordering problem using empirical methods [3,31,37,38,49]. That is, they aim to find the best sequence of transformations that minimizes some objective function such as execution time or power. On the other hand, some of the work in autotuning concentrates on finding the best parameter values for transformations that use numerical parameters [12,16,30,44]. More recent efforts strive to combine the two methods to provide a more unified solution that involve compile-time tuning with source-to-source transformations and runtime tuning and optimization [5]. Our proposed methods are aligned with this integrated approach of autotuning, since the enhancements to the feedback mechanism can be utilized for both offline and online search. Earlier work in
autotuning mainly focused on finding good search strategies or modifying existing ones to reduce tuning times. Genetic algorithms [3,31], stochastic hill climbers [3] and greedy constructive algorithms [3] have been used to explore the search space of optimizations sequences.
In terms of the search space of numerical parameters, there has been work in applying direct search methods [44,45], simulated annealing [20,28,42,45], pyramid search [29,42], window search [28,42], binary search [12] and random search [28,42]. However, none of the search strategies proved particularly effective in the context of autotuning; in most cases, yielding at most a 5% improvement over random search [42,59]. The limited success of search algorithms lead to research in model-based tuning where some form of analytical modeling or guidance is used to prune the search space, guide search heuristic or reduce time spent in program evaluation during tuning. The issue of model-guided tuning has been approached from several different angles. Most notable among these is the use of compiler-based analytical models in limiting the search space [13,31,44,48,58]. Chen et al. showed the analytical models can significantly cut down the search space for a set of transformations including tiling, loop interchange and unroll-and-jam [12]. Qasem and Kennedy have used models for pruning the combined search space of loop fusion and tiling [44]. The Active Harmony project focuses on runtime optimizations and use analytical models to establish an ordering of transformations rather than reducing the size of the search space [23]. The OSE compiler uses static heuristics for generating a pruned search space for optimization sequences [48].
Kulkarni et al. use techniques such as detecting redundant sequences and identifying equivalent code to cut down the number of program evaluations [31]. Apart from compiler models, machine learning techniques have been applied to tune unroll factors [47] and also for selecting the best optimization set (without re-ordering) [1]. There has been some work in using statistical models to explore the search space of optimization parameters. Vuduc et al. establish early stopping criteria to eliminate less promising search space regions on-the-fly [50]. Pinkers et al. use a statistical method based on orthogonal arrays to choose the optimal sequence of transformations [40]. To avoid running the target program in every search step and save tuning time, some autotuning systems employ static performance estimators to predict the best program variant. This strategy has been proven effective in cutting down tuning time for the OSE compiler [48] and the PEAK tuning system [39]. However, as one would expect, using static estimators inevitably leads to some sacrifice in performance. More recent work, advocates using check pointing to terminate execution of long running loops to save tuning time [5].
Although none of the above mentioned model-guided strategies proved to be the holy grail for autotuning, it is generally agreed that some form of modeling or guidance is required to realize the long term vision of autotuning. Our framework does not aim to subsume any of the approaches mentioned above, rather it complements most of these strategies. The use of enhanced feedback in the search algorithm can be used in conjunction with existing pruning strategies to speed up the overall tuning process. Moreover, detailed feedback methods can also be used to validate or verify static estimators, and statistical or analytical models.
2.3. Tunable Transformations
In a world of massively-parallel applications and high availability of computational resources, data locality optimizations still play a crucial role in improving overall application performance. Not surprisingly, the literature on memory hierarchy transformations is vast and spans more than three decades. Here, we briefly discuss the work that is most relevant to the set of transformations supported by our framework. Most data locality optimizations aim to improve the memory hierarchy performance by concentrating on the dominant loop nests in an application. Improved memory performance is achieved either by restructuring loop nests to improve temporal and spatial locality or by modifying the data layout in memory to avoid conflicts at different levels of the hierarchy.
LoopTool supports a wide array of loop nest and data layout transformations. Among the transformations currently supported are loop fusion [26,27,32], tiling [10,11,35,54], unroll-and-jam [2], loop unrolling, array contraction, iteration space splicing [43] and scalar replacement [8]. In addition, LoopTool also supports several enabling transformations such as loop alignment and targeted code motion. For fusion, tiling and unroll-and-jam, LoopTool also provides architecture-aware heuristics for selecting the best tile sizes, unroll factors and fusion configurations in an integrated fashion [42]. Although the literature is replete with heuristics for selecting tile sizes and choosing unroll factors [9,10,11,14,18,35,54], attempts at integrating all these transformations have been less common [43,46,53]. Song et al. [46] present a model that combines loop fusion, loop alignment and array contraction. In their model, the primary goal is reducing
bandwidth requirements by reducing the size of arrays. Although they apply conditions to check for excessive register pressure and cache capacity they do not address the issue of conflict misses.
Wolf et al. [53] describe a strategy that combines loop distribution, loop fusion, tiling and unrolling. Although they look at a larger class of transformations their model does not capture all of the interactions between loop fusion and tiling. In their model, the tiling decisions are made after the optimal loop structure has been determined through fusion and distribution. Thus, LoopTool’s ability of applying transformations in integrated fashion is one of its most useful features.
2.4. Component Tools
PTUNE utilizes the complementary strengths of several open-source tools. Each tool can play an important role in the overall effectiveness of our tuning strategy. As background to our proposed work, we provide a description for each tool summarizing their key features.
**HPCToolkit:** This tool suite consists of components for measuring the performance of fully-optimized executables generated by vendor compilers, analyzing application binaries to correlate measurements with program structure, and novel analysis techniques for pinpointing performance bottlenecks in parallel programs [34].
HPCToolkit uses hardware performance counters to measure a variety of performance metrics including number of cache misses at different levels, TLB misses and number of stalled cycles. This tool suite is also capable of providing synthetic metrics such as bandwidth consumption. Another key feature of HPCToolkit is its ability to measure performance measurements at a fine granularity. HPCToolkit provides file-, subroutine-, loop and statement-level performance metrics, which make it a very useful tool for both manual and automatic tuning of applications. HPCToolkit has been used for performance analysis and manual tuning of several large-scale scientific applications [33].
**LoopTool:** LoopTool is a source-to-source transformation tool that assists expert human programmers by transforming Fortran loop nests for high performance [43]. It enables application developers to apply a complex set of well-known loop transformations to improve data reuse at various levels of the memory hierarchy. The transformations supported by LoopTool include tiling, unroll-and-jam, multi-level loop fusion, array contraction, and iteration space splicing.
The key feature in LoopTool that makes it a suitable for use in an empirical tuning system, is its ability to provide fine-grain control over transformation parameters, through the use of source-level directives. This feature is extremely important for our research, since in order to exploit fine-grain feedback we need a transformation tool that exposes the control at a fine level. This level of fine-grain control over transformations is usually not available in commercial compilers and many of the source code restructuring tools used in autotuning systems. For example, MIPSPro allows a user-specified tile size, but
applies it to every loop nest in the compilation unit. Loop-level optimization parameters cannot be specified at the command-line in any useful way. To specify an unroll factor for a particular loop at the command-line, the user would need to specify the index of the loop in lexical order and also its nesting depth. Specifying unique parameters for multiple optimizations and multiple loops would require the user to input a long complicated string that the compiler would then need to parse. Thus, the use of source directives in LoopTool provides a novel and useful way of specifying optimization parameters at loop-level granularity.
CHAPTER 3: TUNING FRAMEWORK
3.1 Overview
Fig. 1 provides an overview of the PTUNE framework and shows it’s interconnections with other tuning tools. The major components of the framework include a source-to-source transformer (LoopTool), a set of performance measurement tools (HPCToolkit),
and the search module that uses the measurements to guide selection of program transformations. At each step in the tuning process, the search module generates a set of optimization parameters that are applied to the input program by LoopTool. The program is then compiled using the native compiler and run on the target machine. During program execution, performance measurement tools collect variety performance measurements to feed to the search module. The search module uses these metrics in combination with results from previous passes to generate the next set of tuning parameters. This process continues until some pre-specified optimization time limit is reached or the search algorithm converges to a local minimum. Although the structure of our autotuning framework is not dramatically different from that of other systems, there are several key ideas that make our framework unique. Unlike most other automatic tuning systems, our framework uses a full-scale dependence-based transformation tool, which enables us to verify the legality of complex loop transformations. Another unique feature of our system is the use of loop-level performance measurements and the application of transformations at loop-level granularity. In addition, the search module can operate on both the search space of parameterized transformations and the search space of transformation parameters. The rest of this section discusses the core components of our framework in some detail.
3.2 Search Space Specification
Input to PTUNE is a configuration file that describes the search space of optimization parameters. Fig. 2 shows an example configuration file. The syntax for describing a search space is fairly simple. Each line in the configuration file describes one search
dimension. A dimension can be one of three types: range (R), permutation (P) or enumerated (E). Range is used to specify numeric transformation parameters such as tile sizes and unroll factors. Permutation specifies a transformation sequence and is useful when searching for the best phase sequence. An enumerated type is a special case of the range type. It can be used to describe a dimension where only a subset of points is feasible within a given range. An example of an enumerated type is the prefetch distance in software prefetching. In addition, PTUNE supports inter-dimensional constraints for all three dimension types. For example, if the unroll factor of an inner loop needs to be smaller than the tile size of an outer loop then this constraint is specified using a simple inequality within the configuration file.
```
100 # maximum number of program evaluations
1 # number of dimensions in the search space
R 1 10 # range : 1 .. 10
P 4 # permutation : sequence length 4
E 2 8 10 # enumerated : two possible value 8 and 10
```
**Figure 2: Example configuration file for PTUNE**
For some optimizations all integer values within a specified range are not always legal. For example, the prefetch distance for software prefetching might be specified in multiples of the cache line size within a certain range. An enumerated type may be used
to describe this search dimension. The user may wish to provide additional information within the configuration file that may be useful to the search engine. One piece of information that can be supplied in the configuration file is an inter-dimensional constraint. For example, if the unroll factor of an inner loop needs to be always smaller than the tile size of an outer loop then this constraint is specified using a simple inequality within the configuration file. Information specific to a search algorithm is specified elsewhere. For example, for simulated annealing the alpha and beta factors for each dimension is specified in a separate file. The parameters for the search algorithm have been deliberately kept separate to make the search space representation more general. Both the configuration file and the search parameter file can be written by hand or automatically generated by a transformation engine. This feature facilitates the use of PTUNE with model-based search strategies.
3.3 Exposing Optimization Flags in the GNU Compiler Collection (GCC)
A main contribution of this thesis is the development of a software interface that allows us to utilize the GNU compilers within an autotuning framework. GCC supports over a hundred different code optimizations and this number is growing with each new release. Because the optimization set is large and certain optimization flags subsumes by others (e.g., enabling optimization flag i, might enable optimizations i, j and k), leveraging the code transformation framework within GCC for autotuning has been challenging. For instance, in GCC 4.1.2, both -O and -O1, although listed as separate flags perform exactly the same set of optimizations. Thus, inclusion of both –O and -O1 flags in the search space leads to unnecessary increase in search space dimensionality and can
potentially skew the search results. On the other hand, applying the –O3 flag enables a wide range of loop transformation in concert, making it difficult to evaluate the effects of individual transformations. To address this problem, we conducted a systematic study of the compiler optimizations in GCC, that enabled us to isolate the command-line flag for each optimization and also discard flags that are ineffective or subsumed by others. Table 1 lists the set of specific optimization flags that were selected from this study.
To allow tuning of these optimizations, we developed an interface that takes as input a bit-stream representing an optimization sequence and maps each bit in the stream to the corresponding optimization flag in GCC. All optimization flags are then concatenated and output as a command-line string that can be directly embedded into a Makefile. Fig. 3 shows an example of how a bit-stream is translated into a set of optimization flags in GCC.
Since our approach only uses the command-line interface and is independent of the underlying GCC implementation, it provides a flexible and extensible method of interfacing GCC not just with our tuning framework but other autotuning systems as well. In particular, it can be easily integrated into the PERI autotuning system [64].
**Table 1: gcc compiler optimizations**
<table>
<thead>
<tr>
<th>No.</th>
<th>Optimization Flags</th>
<th>No.</th>
<th>Optimization Flags</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>-fdefer-pop</td>
<td>27</td>
<td>-fgcse-lm</td>
</tr>
<tr>
<td>Table 1 Continued</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>-------------------</td>
<td>----------------</td>
<td></td>
<td></td>
</tr>
<tr>
<td>2</td>
<td>-fguess-branch-probability</td>
<td>28</td>
<td>-fexpensive-optimizations</td>
</tr>
<tr>
<td>3</td>
<td>-fcprop-registers</td>
<td>29</td>
<td>-fstrength-reduce</td>
</tr>
<tr>
<td>4</td>
<td>-floop-optimize</td>
<td>30</td>
<td>-frun-cse-after-loop</td>
</tr>
<tr>
<td>5</td>
<td>-fif-conversion</td>
<td>31</td>
<td>-frun-loop-opt</td>
</tr>
<tr>
<td>6</td>
<td>-fif-conversion2</td>
<td>32</td>
<td>-fcaller-saves</td>
</tr>
<tr>
<td>7</td>
<td>-ftree-ccep</td>
<td>33</td>
<td>-fpeephole2</td>
</tr>
<tr>
<td>8</td>
<td>-ftree-dce</td>
<td>34</td>
<td>-fschedule-insns</td>
</tr>
<tr>
<td>9</td>
<td>-ftree-dominator-opts</td>
<td>35</td>
<td>-fschedule-insns2</td>
</tr>
<tr>
<td>10</td>
<td>-ftree-dse</td>
<td>36</td>
<td>-fsched-interblock</td>
</tr>
<tr>
<td>11</td>
<td>-ftree-ter</td>
<td>37</td>
<td>-fsched-spec</td>
</tr>
<tr>
<td>12</td>
<td>-ftree-lrs</td>
<td>38</td>
<td>-fregmove</td>
</tr>
<tr>
<td>13</td>
<td>-ftree-sra</td>
<td>39</td>
<td>-fstrict-aliasing</td>
</tr>
<tr>
<td>14</td>
<td>-ftree-copyrename</td>
<td>40</td>
<td>-fdelete-null-pointer-checks</td>
</tr>
<tr>
<td>15</td>
<td>-ftree-fre</td>
<td>41</td>
<td>-freorder-blocks</td>
</tr>
<tr>
<td>16</td>
<td>-ftree-ch</td>
<td>42</td>
<td>-freorder-functions</td>
</tr>
<tr>
<td>17</td>
<td>-funit-at-a-time</td>
<td>43</td>
<td>-falign-functions</td>
</tr>
<tr>
<td>18</td>
<td>-fmerge-constants</td>
<td>44</td>
<td>-falign-jumps</td>
</tr>
<tr>
<td>19</td>
<td>-fomit-frame-pointer</td>
<td>45</td>
<td>-falign-loops</td>
</tr>
<tr>
<td>20</td>
<td>-ftree-sra</td>
<td>46</td>
<td>-falign-labels</td>
</tr>
<tr>
<td>21</td>
<td>-fthread-jumps</td>
<td>47</td>
<td>-ftree-vrp</td>
</tr>
<tr>
<td>22</td>
<td>-fcrossjumping</td>
<td>48</td>
<td>-ftree-pre</td>
</tr>
<tr>
<td>23</td>
<td>-foptimize-sibling-calls</td>
<td>49</td>
<td>-finline-functions</td>
</tr>
<tr>
<td>24</td>
<td>-fcse-follow-jumps</td>
<td>50</td>
<td>-funswitch-loops</td>
</tr>
<tr>
<td>25</td>
<td>-fcse-skip-blocks</td>
<td>51</td>
<td>-fgcse-after-reload</td>
</tr>
<tr>
<td>26</td>
<td>-fgcse</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Specifying flags in PTUNE:
**Figure 3: Specifying Flags**
E.g. gcc -O0 -c -ftree-dse -ftree-ter -ftree-lrs -ftree-sra …… -ftree-copyrename foo.c
### 3.4 Search Algorithms
PTUNE implements a number of search strategies including genetic algorithm, direct search, window search, taboo search, simulated annealing and random search. This section provides brief descriptions of each implemented strategy.
3.4.1 Genetic Search
Our genetic search algorithm (GA) finds better sequence of optimizations for execution time. It uses the previous set of transformations to find out next set of transformations to apply. It finds the best possible optimization by merging part of the previous optimization sequences and changing few flags randomly. This is a good fit algorithm in iterative self-tuning and finds a better sequence in fair amount of time [60].
We compare the final optimization sequence we get from Genetic algorithm with three searches: Random Search, Direct Search and Simulated Anneal search and we found that Genetic search is doing better than all the others. See the experimental section for the results.
In our framework, we have 51 optimizations and an optimization is analogous to chromosome. So, the length of chromosome is 51. Each chromosome is either 0 or 1. Chromosome 1 means the corresponding optimization is ON and if it is 0 then it is OFF.
Population is number of sets of chromosomes. Initial solutions are randomly selected to form an initial population. The default population size in PTUNE is 5. The user can specify the population size through command line. Based on the population, compile the program is compiled and run with the transformations and execution time is measured. Fitness value is execution time of the program based on the population. The population with the highest fitness value, which is the maximum execution time, has the worst performance and the population with the lowest fitness value has the best performance.
The population is sorted in ascending order (best one in 0\textsuperscript{th} position and worst at the 4\textsuperscript{th} position) based on the fitness values.
Merging previous population and randomly changing a bit from each population generate reproduction of population for the next step. Ignore last population from the previous sequence and divide other each population in two half. For first two populations, we replace second half of first population with the first half of second population. We do same thing for the third and fourth population. Then we change one bit from each one randomly. Randomly generated new population replaces the fifth population, which has the highest fitness value.
Each generation in the experiments consists of the following steps
**Step 1: Initial Generation**
User can select the number of populations. The default population size is five. We use random generator to generate five random numbers. We pass those five random numbers to a function, which converts those random numbers into binary sequence. The length of each generated binary sequence is 51, which is equal to length of chromosomes.
Figure 4: Randomly Generated Population
**Step 2:** Rearranging the population
Each population is converted into sequence of optimizations based on whether it is 0 or 1. The code being compiled is passed through the compiler and the code is optimized based on the sequence defined by population. The corresponding fitness value is stored in an array. The population with the highest fitness value is considered as worst performance and vice versa. The population is stored in the ascending order based on the fitness value stored in the array. The lowest fitness value is stored at the 0th position and highest execution time in 4th location of the array. We have comparatively best population in population #1 and worst population in population #5.
**Step 3:** New Generation
We generate the new population by exchanging the chromosomes from the previous populations. Also, we change a bit from each new population randomly. We divide each
population in two halves. We exchange the second half of first population with the first half of second population. Do the same thing for third and fourth population.
One population selected randomly and replaced with the fifth population. The exchange of chromosomes is shown by arrow in the following figures.
**Population 1:**
```
0 1 0 1 1 1 0 0 1
```
**Population 2:**
```
1 1 0 1 0 0 0 0 0
```
**Population 3:**
```
1 1 0 0 1 1 1 1 1
```
**Population 4:**
```
1 0 1 0 0 1 0 1 1
```
**Population 5:**
```
1 0 0 0 0 0 1 1 1
```
**Figure 5: Generating New Population**
**Step 4:** Repeat the same procedure for number of iterations.
**Step 5:** Done.
### 3.4.2. Random Search
We include random in our framework as a benchmark search strategy. A search algorithm is considered effective only if it does better than random on a given search space. The chief reason for including a random search in our framework is for us to be able to compare the performance of difference search strategies. A random search picks random points within the search space and keeps track of the best value found at every step.
Unlike the other search strategies described above, random search does not use any heuristics and it does not have any convergence criteria. The search is terminated after a pre-specified number of evaluations.
3.4.3 Direct Search
There are two main flavors of direct search that have been used for exploring the optimization search space. The simplex method is usually applied for a continuous search space, whereas the pattern-based method is used for discrete search spaces. Since the search space of transformation parameters is discrete we implemented the pattern-based direct search method in our framework.
The variant of direct search implemented in our framework is the pattern-based method, originally proposed by Hooke and Jeeves [24]. This algorithm works on a discrete space and aims to find the optimal point in the search space using a method of steepest descent. The algorithm proceeds by making a set of exploratory moves and pattern moves. By visiting neighboring locations, the exploratory moves identify a promising direction of movement from the current position. Once this direction has been identified, the search takes a leap in that direction (pattern move) and then explores neighbors of that new location. This process continues until the exploratory moves fail to find a new promising direction for some point. This point is returned as the optimal location.
The major steps of the algorithm are sketched below:
• N denotes an n-dimensional search space, where each dimension represents a transformation parameter that is being tuned
• p = (p1, p2, ...pn) denotes a point in the search space where pi is the value of the its parameter
• f (p1, p2, ...pn) denotes the execution time for the program compiled with transformation parameters p1, p2, ...pn
• s denotes the step size, this value determines the size of the subspace that is explored during the exploratory moves
Step 1: Pick an initial base point p. This is done by choosing the midpoint within the range for each parameter.
Step 2: Make exploratory moves. For each parameter pi we first increment its value by step size s and evaluate the program at p'(p1, ...pi +s, ..., pn). If the execution time at p' is less than the current minimum then we set the value of parameter pi to (pi+s) and move on to the next parameter. Otherwise we decrement the value of the parameter by s and evaluate the program at p'(p1, ...pi − s, ..., pn). If f(p') is less than the current minimum then we set the value of parameter pi to (pi − s). Otherwise the value of the parameter remains unchanged. Once all the parameters have been explored, we move to Step 3.
Step 3: Make pattern move. The series of exploratory moves gives us a new point p’ in N where we are likely to find a value that is less than the current minimum. The pattern move moves the base point in the direction of p', that is p p' − p. The execution time at
this new point is evaluated. If this execution time is less than the current base point execution time then we go to Step 2. Otherwise we move to Step 4.
**Step 4:** Reduce step size. If we have reached the minimum step size then we move to Step 5. Otherwise, we reduce the step size by the step size reduction factor and go back to Step 2.
**Step 5:** Done.
### 3.4.4 Simulated Annealing
Initially a random point is selected in the search space and its neighboring points are explored. At each step, the search moves to a point with the lowest value or depending on the current temperature, to a point with a higher value. The temperature is decreased over time and the search converges when no further moves are possible.
CHAPTER 4: EXPERIMENTAL RESULTS
4.1 Experimental Setup:
For the experiments, we have used five cBench benchmark applications and one SPEC benchmark application. cBench Benchmark: It is a collection of open-source sequential programs with many datasets assembled by the cTuning community [61]. CBench is partially based on modified MiBench benchmark. cBench has been used for benchmarking simulators including GCC, LLVM, Intel, Open64, and Pathscale. We have selected five cBench benchmark applications for our experiment. Table 2 provides short description of each of the benchmarks.
SPEC CPU2006 is industry-standardized, CPU-intensive benchmark suite, emphasizing a system's processor, memory subsystem and compiler [62]. SPEC designed CPU2006 to provide a comparative measure of compute-intensive performance across the widest practical range of hardware using workloads developed from real user applications. These benchmarks are provided as source code. We have used bzip2e application for our experiments.
### Table 2: Benchmark Applications and their Descriptions
<table>
<thead>
<tr>
<th>No.</th>
<th>Benchmark Applications</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>1.</td>
<td>Automotive_bitcount</td>
<td>print bit pattern of bytes formatted to string</td>
</tr>
<tr>
<td>2.</td>
<td>network_dijkstras</td>
<td>Shortest Path Algorithm</td>
</tr>
<tr>
<td>3.</td>
<td>network_blowfish</td>
<td>Symmetric block cipher with a variable length key</td>
</tr>
<tr>
<td>4.</td>
<td>telecom_CRC32</td>
<td>Telecommunication Application</td>
</tr>
<tr>
<td>5.</td>
<td>telecom_gsm</td>
<td>Telecommunication Application</td>
</tr>
<tr>
<td>6.</td>
<td>bzip2e</td>
<td>Compression Algorithm</td>
</tr>
</tbody>
</table>
We use core 2 duo as our main platform and the specifications are given in Table 3.
### Table 3: Platform Specifications
<table>
<thead>
<tr>
<th>No.</th>
<th>Platform</th>
<th>Details</th>
</tr>
</thead>
<tbody>
<tr>
<td>1.</td>
<td>Compiler</td>
<td>Gcc version 4.1.2</td>
</tr>
</tbody>
</table>
| 2. | Architecture | - Model: Intel i386, CPU E6550 @ 2.33 GHz
| - Cache size: 4096 KB
| - Address sizes: 36 bits physical, 48 bits virtual |
| 3. | Operating System | - Red Hat 4.1.2-33 |
4.2 Overall Performance Improvement
We first consider the overall performance achieved on the selected benchmarks using our search-based strategy. Fig. 3 shows the speedup obtained using four different search strategies: Genetic Algorithm (GA), random search (random), Direct Search (direct), and Simulated Annealing (anneal). The speedup reported is the one achieved over the fully optimized variant generated by GCC and is computed using the following formula:
\[
\text{Speedup} = \frac{\text{Execution time for fully optimized code}}{\text{Execution time of tuned code}}
\]
To keep the comparisons fair, each search algorithm is initiated from a random point in the search space and are allowed to converge independently, with a maximum ceiling of 100 evaluations.
Overall, GA yields the best performance across all benchmarks, achieving as much as a 1.16 speedup for bzip2e and telecom and 1.09 speedup on average. Moreover, unlike other search methods, GA obtains at least some performance improvement for all applications. direct performs well on telecom and bticount but causes performance degradation on nDiskstra. Similarly anneal does well on sblowfish and telecom but fails on bitcount and bzip2e. Somewhat surprisingly, random performs quite well and delivers performance close to that obtained through GA. One reason for random producing better results than direct and anneal is that both these searches tended to converge to a local minima much sooner then the maximum allotment of 100 evaluations. Thus, these results reiterate the difficulties of using steepest descent methods for exploring the optimization search space.
4.3 Search Space Exploration
In this section, we examine the manner in which the search space is explored by GA and random. Fig. 4-9 show the best values found by GA and random after each program evaluation. As before both searches are terminated after 100 evaluations. The Y-axis in these figures indicates program execution times in seconds. Thus, a lower y-value indicates a better performance point.
For automotive_bitcount, GA finds the best value after 50 iterations and the best improvements in performance comes between iterations 30 and 50. The trend line for random reveals that only 4 probes among 100 leads to better execution time. These results indicate a relatively smoother search space for automotive_bitcount with fewer
peaks and troughs. Also, the results indicate that a forced termination of the search space at an earlier phase might be beneficial.
Figure 7: automative_bitcount: Random Vs. Genetic
For bzip2e, GA finds the best value after 60 iterations and the best improvements in performance comes between iterations 50 and 60. The graph indicates a relatively smoother search space for bzip2e with fewer peaks and troughs.
Figure 8: bzip2e: Random Vs. Genetic
For Network_dijkstras, GA finds the best value after 80 iterations and the best improvements in performance comes between iterations 70 and 80. The trend line for genetic reveals that it is finding the better performance gradually. These results indicate a relatively smoother search space for Network_dijkstras with fewer peaks and troughs. Also, the results indicate that a continuing exploration of the search space would have helped more to get the better performance.

**Figure 9: Network_dijkstra: Random Vs. Genetic**
For security_blowsfish, genetic search found the minimum value at 40\textsuperscript{th} iteration but Random search could not find the minimum value.

**Figure 10: Security_blowsfish_d: Random Vs. Genetic**
For telecom_CRC, the graph shows how genetic search is finding the minimum value step by step.
Figure 11: telecom_CRC: Random Vs. Genetic
For telecom_GSM, Genetic search found the minimum value after 40 iteration but it took around 80 iterations for Random search to find the minima.
Figure 12: telecom_gsm: Random Vs. Genetic
CHAPTER 5: CONCLUSION
This thesis described the implementation and experimental results of genetic search in PTUNE, an automatic performance tuning tool. We provided a way to represent optimization sequences as chromosomes in Genetic Algorithms and proposed new strategies for mutation and crossover. We conducted experiments to demonstrate the effectiveness of genetic search algorithm and the tuning tool. We compared the results of genetic search with other search techniques: Random Search, Simulated Annealing Search, and Direct Search. The results show that the genetic search is doing better in improving application’s performance and tuning time than all the other searches.
REFERENCES
63. L. Adhianto, S. Banerjee, M. Fagan, M. Krentel, G. Marin, J. Mellor-Crummey, and
N. R. Tallent. HPCToolkit: Tools for performance analysis of optimized parallel
programs. Concurrency and Computation: Practice and Experience, 22(6):685–701,
64. D. Terpstra, H. Jagode, H. You, and J. Dongarra. Collecting Performance Data with
Hovland, S. Moore, K. Seymour, J. Shin, A. Tiwari, S. Williams, and H. You. PERI
6596/125/1/012089.
Santosh Raosaheb Sarangkar was born in a farmer’s family in Rajuri, Maharashtra, India on December 25, 1982, the son of Raosaheb Babaji Sarangkar and Chanchal Raosaheb Sarangkar. He received the degree bachelor of engineering (B.E) in Electronics and Tele-Communication from Vidya Pratishthan College of Engineering Baramati, Maharashtra, India in 2006. He entered in Masters of Science (M.S) degree program at Texas State University- San Marcos in August 2008. During his masters he was employed as HPC programmer, research assistant, and lab instructor with Texas State University- San Marcos. Also, he received the awards graduate research excellence, graduate academic excellence, outstanding graduate student nomination, and excellence in service at Texas State University. During the summer of 2010 and the summer of 2011, he was employed as a summer research intern at Freescale Semiconductor Inc., Austin, Texas and National Center for Atmospheric Research (NCAR), Boulder, Colorado respectively.
Permanent Email Address: santosh.sarangkar@gmail.com
This thesis was typed by Santosh R. Sarangkar.
|
{"Source-Url": "https://digital.library.txstate.edu/bitstream/handle/10877/2466/SARANGKAR-THESIS.pdf?sequence=1", "len_cl100k_base": 11252, "olmocr-version": "0.1.53", "pdf-total-pages": 57, "total-fallback-pages": 0, "total-input-tokens": 108892, "total-output-tokens": 16613, "length": "2e13", "weborganizer": {"__label__adult": 0.0003974437713623047, "__label__art_design": 0.00048232078552246094, "__label__crime_law": 0.0003299713134765625, "__label__education_jobs": 0.002986907958984375, "__label__entertainment": 0.0001093149185180664, "__label__fashion_beauty": 0.0002193450927734375, "__label__finance_business": 0.00035381317138671875, "__label__food_dining": 0.0003943443298339844, "__label__games": 0.0008573532104492188, "__label__hardware": 0.0018606185913085935, "__label__health": 0.0006308555603027344, "__label__history": 0.0004010200500488281, "__label__home_hobbies": 0.00015091896057128906, "__label__industrial": 0.0005645751953125, "__label__literature": 0.0003681182861328125, "__label__politics": 0.00033020973205566406, "__label__religion": 0.0006418228149414062, "__label__science_tech": 0.088623046875, "__label__social_life": 0.00013685226440429688, "__label__software": 0.00678253173828125, "__label__software_dev": 0.89208984375, "__label__sports_fitness": 0.0004208087921142578, "__label__transportation": 0.0008192062377929688, "__label__travel": 0.00023734569549560547}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 62372, 0.04674]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 62372, 0.18958]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 62372, 0.83354]], "google_gemma-3-12b-it_contains_pii": [[0, 278, false], [278, 579, null], [579, 579, null], [579, 1376, null], [1376, 2370, null], [2370, 2805, null], [2805, 3623, null], [3623, 3810, null], [3810, 4589, null], [4589, 6011, null], [6011, 7848, null], [7848, 9455, null], [9455, 11190, null], [11190, 11968, null], [11968, 13349, null], [13349, 15115, null], [15115, 16839, null], [16839, 18680, null], [18680, 20449, null], [20449, 21754, null], [21754, 23531, null], [23531, 24170, null], [24170, 24463, null], [24463, 26231, null], [26231, 27596, null], [27596, 29441, null], [29441, 30970, null], [30970, 32220, null], [32220, 32625, null], [32625, 34192, null], [34192, 35340, null], [35340, 36285, null], [36285, 37410, null], [37410, 38854, null], [38854, 40319, null], [40319, 41047, null], [41047, 42062, null], [42062, 43381, null], [43381, 44152, null], [44152, 45764, null], [45764, 46216, null], [46216, 47050, null], [47050, 47380, null], [47380, 48064, null], [48064, 48959, null], [48959, 50114, null], [50114, 51323, null], [51323, 52520, null], [52520, 53669, null], [53669, 54769, null], [54769, 55963, null], [55963, 57069, null], [57069, 58283, null], [58283, 59378, null], [59378, 60496, null], [60496, 61266, null], [61266, 62372, null]], "google_gemma-3-12b-it_is_public_document": [[0, 278, true], [278, 579, null], [579, 579, null], [579, 1376, null], [1376, 2370, null], [2370, 2805, null], [2805, 3623, null], [3623, 3810, null], [3810, 4589, null], [4589, 6011, null], [6011, 7848, null], [7848, 9455, null], [9455, 11190, null], [11190, 11968, null], [11968, 13349, null], [13349, 15115, null], [15115, 16839, null], [16839, 18680, null], [18680, 20449, null], [20449, 21754, null], [21754, 23531, null], [23531, 24170, null], [24170, 24463, null], [24463, 26231, null], [26231, 27596, null], [27596, 29441, null], [29441, 30970, null], [30970, 32220, null], [32220, 32625, null], [32625, 34192, null], [34192, 35340, null], [35340, 36285, null], [36285, 37410, null], [37410, 38854, null], [38854, 40319, null], [40319, 41047, null], [41047, 42062, null], [42062, 43381, null], [43381, 44152, null], [44152, 45764, null], [45764, 46216, null], [46216, 47050, null], [47050, 47380, null], [47380, 48064, null], [48064, 48959, null], [48959, 50114, null], [50114, 51323, null], [51323, 52520, null], [52520, 53669, null], [53669, 54769, null], [54769, 55963, null], [55963, 57069, null], [57069, 58283, null], [58283, 59378, null], [59378, 60496, null], [60496, 61266, null], [61266, 62372, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 62372, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 62372, null]], "pdf_page_numbers": [[0, 278, 1], [278, 579, 2], [579, 579, 3], [579, 1376, 4], [1376, 2370, 5], [2370, 2805, 6], [2805, 3623, 7], [3623, 3810, 8], [3810, 4589, 9], [4589, 6011, 10], [6011, 7848, 11], [7848, 9455, 12], [9455, 11190, 13], [11190, 11968, 14], [11968, 13349, 15], [13349, 15115, 16], [15115, 16839, 17], [16839, 18680, 18], [18680, 20449, 19], [20449, 21754, 20], [21754, 23531, 21], [23531, 24170, 22], [24170, 24463, 23], [24463, 26231, 24], [26231, 27596, 25], [27596, 29441, 26], [29441, 30970, 27], [30970, 32220, 28], [32220, 32625, 29], [32625, 34192, 30], [34192, 35340, 31], [35340, 36285, 32], [36285, 37410, 33], [37410, 38854, 34], [38854, 40319, 35], [40319, 41047, 36], [41047, 42062, 37], [42062, 43381, 38], [43381, 44152, 39], [44152, 45764, 40], [45764, 46216, 41], [46216, 47050, 42], [47050, 47380, 43], [47380, 48064, 44], [48064, 48959, 45], [48959, 50114, 46], [50114, 51323, 47], [51323, 52520, 48], [52520, 53669, 49], [53669, 54769, 50], [54769, 55963, 51], [55963, 57069, 52], [57069, 58283, 53], [58283, 59378, 54], [59378, 60496, 55], [60496, 61266, 56], [61266, 62372, 57]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 62372, 0.20787]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
cd697df2426244c937379e9ac9908c42c25c98f2
|
Citation for published version
DOI
https://doi.org/10.1006/ijhc.1997.0119
Link to record in KAR
http://kar.kent.ac.uk/21542/
Document Version
UNSPECIFIED
Copyright & reuse
Content in the Kent Academic Repository is made available for research purposes. Unless otherwise stated all content is protected by copyright and in the absence of an open licence (eg Creative Commons), permissions for further reuse of content should be sought from the publisher, author or other copyright holder.
Versions of research
The version in the Kent Academic Repository may differ from the final published version. Users are advised to check http://kar.kent.ac.uk for the status of the paper. Users should always cite the published version of record.
Enquiries
For any further enquiries regarding the licence status of this document, please contact: researchsupport@kent.ac.uk
If you believe this document infringes copyright then please contact the KAR admin team with the take-down information provided at http://kar.kent.ac.uk/contact.html
Comparison of contrasting Prolog trace output formats
MUKESH J. PATEL
Department of Psychology, University of Newcastle, Newcastle upon Tyne, UK
BENEDICT DU BOULAY†
School of Cognitive and Computing Sciences, The University of Sussex, Brighton, BN1 9QN, UK
CHRIS TAYLOR
Computer Science Department, City University, London, UK
(Received 19 March 1996 and accepted in revised form 10 February 1997)
This paper reports on a comparative study of three Prolog trace packages. Forty-three students of an introductory Prolog course solved five different Prolog programming problems in each of three different conditions (using isomorphic problem variants to disguise recurring tasks). Each of the three conditions provided subjects with static screen-snapshot-mockups derived from one of three different trace packages ("conventional" Spy; "graphical AND/OR tree-based" TPM*; "informative textual" EPTB). When traces explicitly displayed the information asked for in the problem, subjects solved the problems more quickly. Conversely, when trace output obscured the required information (or necessitated difficult detective work to uncover the information), solution times were longer and answers less accurate. Deciding on a "good" format for display is thus a task-dependent decision, and impacts directly on the user's cognitive ability to solve a problem.
1. Introduction
The ongoing development of visual programming languages and program visualization techniques is underpinned by the assumption that program design and debugging is made easier by diagrammatic or graphic representations (see Price, Baecker & Small, 1993, for a review). Theoretical accounts of how external representations work suggest that diagrammatic or graphical representations are not automatically more effective than informationally equivalent sentential or textual representations. A diagrammatic representation is likely to be more computationally efficient than a sentential representation if it reduces search among, aids recognition of or assists inference from information items in the external problem representation (Larkin & Simon, 1987; Larkin, 1989). The interplay between the internal cognitive processes of the problem-solver and the external representations on which they operate is still an active area of research (see Scaife & Rogers, 1996, for a critical review of this issue). Whether such efficiency gains are realized in practice depends on the nature of the problem-solving task and the degree of expertise and experience which the problem-solver can bring to bear on that task (see
† Corresponding author.
In the case of programming language notations, the empirical evidence for the predicted value of graphic program language notations is not strong (see e.g. Green, Petre & Bellamy, 1991). By contrast, Cunniff and Taylor (1987) have shown that comprehension of graphically represented program segments was both faster and more accurate than that of textually represented equivalents though performance may have been affected by visual aptitude.
This paper is concerned with program trace output, an external representation typically used to assist in understanding and debugging the behaviour of a program, itself also typically available as an external representation. For an expert programmer the trace will often offer no information that could not, in principle, be inferred by scrutiny of the program itself.† In practical terms however, especially in debugging, the program trace plays a crucial role in that it enables a comparison between what the programmer believes a program should do and what it actually does. Reasoning about programming behaviour may not only be “forwards” or “backwards” (Green, 1977), but may be abductive, from the trace to the program (e.g. “which bit of the program produced this behaviour and why”) or deductive, from the program to the trace (e.g. “where is the effect of this piece of code?”). The usefulness of the trace will depend on the programmer’s declarative knowledge of the programming language and “strategic knowledge” of what to use the trace for and how to use it, together with processing constraints such as working memory capacity, visual discrimination capability and so on (Davies, 1993b).
Green and his colleagues have delineated many of the factors (such as “role expressiveness”) which make a programming language notation psychologically effective (see e.g. Gilmore & Green, 1988; Green, 1989, 1991). Less effort has been expended on the effectiveness of representation of trace outputs from programs, but it is clear that they can be understood in similar terms and in terms of display-based reasoning.
The role of external representations in programming has been explored by Green, Bellamy and Parker (1987) in their “Parsing/Gnisrap” model of program development and by Davies (1993a) in his experiments on the tradeoff between the use of working memory and the display for storing fragments of an evolving program. Davies’ experiments, in particular, suggest that one facet of programming expertise is precisely the ability to make good use of (and, indeed, to depend on) the display as an external representation. While his work was not concerned with trace outputs it indirectly underlines their importance because it elucidates how display-based strategies, in general, provide a means for coping with the complexity of programming tasks.
Two issues are explored here in relation to trace outputs for Prolog programs. The first concerns the relative efficiency of informationally equivalent textual and graphic trace notations in support of the kind of information lookup and inference tasks that occur in debugging simple programs. The second issue concerns the relationship between the trace notation and the program notation from which it derives. In a comparison of informationally equivalent textual and graphic traces, we might expect to observe the kinds of speedup in information access predicted by Larkin’s models for a well-designed graphic trace notation. But we might also expect to see some counteracting slowdown for
† There are cases where this is obviously untrue, for example, debugging programs involving timing interactions or those affected by external events.
graphic trace notations where the debugging task required trace/program comparisons, if the graphic trace notation items were less easily matched than textual trace notation items to their corresponding textual Prolog program components.
1.1. PROLOG AND PROLOG TRACER OUTPUT FORMAT
In the study reported here we focus on the extent to which the format of Prolog tracer output helps to determine its usefulness to novice Prolog programmers. It is assumed that format is an important (though clearly not the sole) determiner of the clarity of information in programming notations (Green, 1991). For a programming language as internally complex as Prolog this is especially the case because the format of the tracer output can be a significant determiner, amongst other things, of both the overall perspective and the level of detail about program execution. Both these aspects, perspective and the level of detail, accordingly affect the ease of access to the information given by a particular combination of a tracer and its format (Patel, du Boulay & Taylor, 1991a, b). Other significant determiners of information access are the control interface to the tracer and the user herself; no matter how clear or concise the information presented is, if it fails to match the user's cognitive model of the program domain and the language it is unlikely to be optimally useful. By “control interface to the tracer” we mean the manner in which the user may control the tracer itself—choosing which predicates to examine, how to set breakpoints, methods of controlling the amount and level of information both globally and at different points in the execution, ways of moving around the trace output and so on. What might otherwise be an excellent format could be marred by poor user control and what otherwise might be regarded as an impoverished format may continue to be effective where control issues work well. For example, Davies’ manipulation of the control interface to an editor has indicated how important this aspect of a tool is in increasing or reducing programming errors (Davies, 1993a).
This study concentrates on format rather than on control or user characteristics. Our research aim was not to find out why some novices have problems with learning Prolog but to evaluate the relative usefulness of certain trace outputs in solving simple problems. While a better understanding of the ways in which tracers as a whole are useful (rather than simply their static outputs) would no doubt lead to a better cognitive model of Prolog novices’ problems, this too was beyond the scope of this work.
Prolog is a complex and powerful programming language. As with all programming languages many key aspects remain implicit (or “hidden”) during program execution, which adds to the difficulty in comprehension and confusion among novice programmers. Prolog is special in that the internal mechanisms governing flow of control and variable binding are more complex than many procedural languages (such as Basic or Pascal) for which there is ample evidence of novice confusion about flow of control and variables (see e.g. du Boulay, 1986, for a brief review). Prolog on-line help systems can be typically divided into a number of groups which include both high-level debugging and tutoring systems or tracers.
Tracers, in general, are used by novices for a variety of tasks such as learning the language as well as program development and debugging (see e.g. Mann, Linn & Clancy, 1994). In the case of Prolog, trace output can enhance a novice’s understanding of the implicit aspects of Prolog execution such as flow of control, backtracking, variable
binding and variable unbinding and the general relationship between the static code of
the program and the dynamic behaviour it engenders via a query. Of course, tracers do
not provide information on how to write Prolog programs to achieve specific goals nor
do they provide guidance and reasons for a program's failure, though they are very
helpful in debugging tasks. Some of these areas are covered by more sophisticated
debugging systems (see e.g. Brna, Brayshaw, Bundy, Dodd, Elsom-Cook & Fung, 1991)
or tutoring systems (see e.g. Looi, 1991; Gegg-Harrison, 1991) which were not evaluated
in this study. For a more extended account of criteria for evaluating Prolog tracers, see
The hidden mechanisms of Prolog can be described and/or explained in more than one
way (Pain & Bundy, 1987) with different perspectives emphasising different aspects, such
as variable binding, flow of control, recursion, search space, etc. Often it is not possible to
present information about all aspects both simultaneously and equally clearly. This is
partly because of the nature of the languages; emphasis on one aspect, such as flow of
control, often precludes the possibility of other aspects without loss of clarity.
Format is another major determinant of clarity of information. For example, a per-
spective focusing on the flow of control in program execution can be presented inter alia
as a graphical AND/OR tree rooted at the top and growing downwards (as in TPM
Eisenstadt & Brayshaw, 1988) or as a left-to-right, sideways, textual tree (as in Mellish,
1984). Independent of the perspective, tracers provide differing amounts of information
necessary for reconstructing the whole “story” of a program’s execution, though in some
sense each tracer is a more or less equally valid (and theoretically adequate) description.
In some tracers the user has to make more complex inferences about what has happened
than in others.
Apart from perspective and format, tracers vary in terms of information content. As
with perspective, up to a certain extent, information about how a program is executed
does not vary across trace outputs (even if perspective and format do). The formal
properties of Prolog leave no room for ambiguity in execution; so, given an adequate
grasp of the underlying logic and the source code, it is usually possible to comprehend
a trace output.† What does vary, however, is the level of detail and explicitness of
information. Together, these affect the ease of access to information, and it is this
property of trace outputs that we regard as an important determiner of tracer usefulness.
Usually, it is far more appropriate to compare tracers in terms of the extent to which
information about key aspects of Prolog is more or less explicit rather than whether it is
absent or present. Of course, the ease of access to information will also be partly
determined by format. We will return to this issue in the next section.
2. Details of tracer outputs
Three tracer output formats were compared: Spy (or Byrd Box) (Byrd, 1980),
Enhanced Prolog Tracer for Beginners (EPTB) (Dichev & du Boulay, 1989) and a variant
† There are minor differences in the way that different versions of Prolog implement, e.g. clause matching.
Some systematically scan through all clauses with a given functor and the correct arity, others employ various
pre-matching techniques. These differences ought to show up in any trace output that worked at that level of
detail.
on the Transparent Prolog Machine (TPM) (see e.g. Eisenstadt, Brayshaw & Paine, 1991).
The tracers were not used truly as tracers in a dynamic way. In each case an appropriate screen dump of the relevant tracer output was shown in its entirety, so users could not “grow” the trace nor add or delete information from it. Thus, variation between method of control between tracers was eliminated. As we have already indicated, our intention was not to examine the tracers “in the round” in the manner of Mulholland (1994, 1995) but to focus on the influence of format and level of detail in static trace outputs.
2.1. SPY TRACE OUTPUT
Spy is a basic, linear† textual tool and is included in this study because subjects were familiar with it and in order to provide a baseline for comparing with others. The version used in this study (POPLLOG Prolog) did not show system goals and did not emphasize trace structure via indentation. This tracer provides most of the basic information necessary for programming or debugging in Prolog, but much of it is implicit. In particular, the relationship between the source code and the trace output is not as clearly displayed as it is in TPM* and EPTB. Given the basic lack of clarity in information presentation, Spy was not expected to perform better than TPM* and EPTB. Figure 1 shows a simple program (top) and query together with its Spy trace (top, right).
2.2. EPTB TRACE OUTPUT
EPTB (Enhanced Prolog Tracer for Beginners) is a linear, textual tracer developed by Dichev and du Boulay (1989). Its most characteristic feature is an emphasis on providing information about data structures and variable binding in the trace output. It also has some other features not present in a Spy tracer, such as a facility for distinguishing between different reasons for failure of goals plus a wide range of options for adjusting the degree of detail shown. The tracer generally makes use of labels and symbols to describe different sections of a trace output (like PTP, see Eisenstadt, 1984). Hence, it is expected to be more helpful than Spy for solving problems involving data structures and variable binding. The tracer can be set to include or exclude messages about various aspects of Prolog execution history. Figure 1 (bottom left) shows the EPTB trace for the same program and query, with the trace options set in one particular way.
2.3. TPM* TRACE OUTPUT
The TPM (Transparent Prolog Machine) is a tracing and tutoring tool which makes use of a modified and extended AND/OR tree representation, known as the “AORTA” representation. TPM exists in a number of forms: As a notation used in courses offered by the Open University (e.g. Eisenstadt & Dixon, 1988) and as described in the research
† A linear tracer always adds new trace statements to the growing end of the trace and does not either overwrite or insert material elsewhere within it.
Figure 1. Simple program (top left) with Spy trace (top right), EPTB trace (bottom left) and TPM* trace (bottom right).
literature (see e.g. Eisenstadt & Brayshaw, 1988); and as implementations available on various machines in differing degrees of conformance with the printed versions. At the time of the experiment we had access to a commercially available version of the TPM, marketed by Chemical Design Limited and hence referred to as CDL-TPM. We used the “detailed view” tree notation from this tracer as the graphical notation for the experiment. However, in this detailed notation, the CDL-TPM tracer could only display at once a single node plus its parent and immediate children. Thus, the tracer itself could not generate “detailed view” trees that were big enough for the experimental examples, which involved up to 20 nodes or so. Consequently, the static traces used were constructed artificially, using the same notation, but applied to bigger trees than the tracer could actually show in that notation.† To emphasize the fact that the “traces” were not actually generated from the CDL-TPM tracer, the tree notation used is referred to as “TPM*”.
The spatial layout of TPM* provides a clearer and less cluttered view of the structure of the search space than the two linear textual tracers. On the other hand, the use of a vertical tree format limits the horizontal space available between sibling nodes for displaying textual information such as predicate names, arguments and variable bindings. (By contrast, in linear, unindented textual tracers, virtually the entire window width is available for displaying such information.) This point is not relevant to the simple examples in the experimental study, in which the call terms were all fairly short syntactically. However, in the CDL-TPM tracer, it rapidly becomes a problem when long predicate names or long terms (e.g. lists) are used, necessitating the use of expandable and scrollable sub-windows for textual information, with a resultant decrease in the efficiency of access to textual information. (Note: In other versions of the TPM, a simultaneous textual trace using a Spy-like notation is available in a separate sub-window—although this takes up further screen space.) A tracer output in TPM* for the same program above is shown in Figure 1 (bottom right).
3. Trace outputs: information and format
All three tracers have slightly different perspectives, though this is not assumed to have any significant effect on their usefulness for our experimental task. For the purpose of this study it was assumed that the three tracers provide the minimal—in the case of Spy this could be very minimal indeed—information, and that any main differences in their usefulness is due to format and access to information. More realistically, it is obvious that in most cases there would be some interaction between format and information content, and so any explanation of helpfulness of tracers would have to give an account of such an interaction.
3.1. INFORMATION ACCESS
Apart from the effect of perspective, how do trace outputs vary in terms of overall information content? In this context, the term information is used in a specific way; it
† With the wisdom of hindsight, it might well have been better to have used detailed, printed trace diagrams from the TPM literature.
refers to information about when, how and which clauses are matched, how variables are bound to (and unbound from) values at particular points and the overall flow of control, including backtracking, together with the success or failure of goals. Information about the operations of a Prolog program, that is, the states it passes through, the variable bindings at each important step and the amount of backtracking involved, is useful in understanding and debugging Prolog programs. Trace outputs are designed to provide access to this information, but they can vary in terms of the exact nature of information provided. Part of the variation can be due to the level of detail. For example, the Spy trace does not indicate which clause of a predicate is being used at any point, whereas EPTB and TPM* do. Spy refers to program variables by their internal names such as “.405”, while CDL-TPM systematically labels variables with letters (current implementations of TPM and printed TPM diagrams make use of the programmers’ variable names, suitably subscripted), and unlike both, EPTB uses the names chosen by the programmer appended with a numerical subscript to distinguish between copies. Though all three methods serve the same function, they are not equally efficient in providing relevant information for the sorts of problems that were used in this evaluative study.
Further, trace outputs have different degrees of explicit information. For example, information about the number of sub-goals of a clause can be highly implicit, as in a Spy trace, or fairly explicit, as in TPM* (as long as the clause succeeds) and in EPTB (independent of whether the clause succeeds or not). While it is not difficult to provide examples to refine our notion of level of detail and explicitness of information, in reality these two aspects are often closely inter-related. However, this is not a serious drawback as long as it is clear that whatever the level of detail and the degree of information explicitness, it is the effect on access that determines a trace output’s usefulness. Thus, it follows that simply having more detailed or explicit information does not necessarily increase usefulness, because too much detail may be a hindrance in some cases. This tension between being explicit and overwhelming the user with “unnecessary” detail (or redundant information) is an important determiner of ease of access to information and therefore tracer usefulness. Hence, one of the questions that this study addresses is the amount and sort of information that is useful to novice programmers.
We are taking a rather “all or nothing” view here because of the nature of the experiment, which was based on static snapshots of trace outputs. In employing a tracer for a realistic task the user would expect to be able to adjust the level of detail dynamically both globally and locally. Note that with real tracers solution times might well have been longer.
3.2. FORMAT: GRAPHIC AND TEXTUAL
Aesthetic aspects of graphic representation can improve communication efficiency (Shu, 1988), but such improvement often depends on the nature of the information being displayed. In the case of Prolog programs there is a limited number of ways in which information about flow of control and backtracking can be graphically displayed. For example, flow of control can be represented as an AND/OR tree or an OR-tree (Hook, Taylor & du Boulay, 1990) or in terms of flow of satisfaction arrows (Clocksin & Mellish, 1981). In such cases, the spatial arrangement of certain aspects of information plays a crucial role in capturing essential and important relationships and would be expected
to be differentially helpful in solving problems related to flow of control and backtrack-
ing. So assuming that the information content of a tracer is fixed for the most significant
aspects of Prolog, how can format effect their usefulness?
Trace outputs can be displayed in mainly graphic or mainly textual format. This basic
distinction will be described before a detailed consideration of their effect on trace output
usefulness. The following list of advantages of non-linear graphic format also serves to
highlight the disadvantages of linear text format. It is included to anchor some of the
basic differences between non-linear graphic and linear text formats. It is, of course,
possible to have non-linear textual formats—which have some of the advantages of both
(Taylor et al., 1991; Taylor, du Boulay & Patel, 1994).
1. Correlation between spatial display and information, e.g. relative distance and
proportion or bringing together related information items.
2. Clarity in display of non-linearly ordered information such as loops and
backtracking.
3. Enables zooming in and out to access the essential relationship between informa-
tion points.
4. Can display more than one perspective, if information points are related across
more than one dimension. Simultaneous display of different perspectives is not
necessary.
5. Greater possibility of displaying dynamic processes (animation) with real time
updates. Though possible in text formats they may lack impact and clarity.
Note that some of the foregoing advantages are not exclusive to graphic formats (since
non-linear textual formats share some of the same features), and others are of limited
value unless augmented with textual information. The format of trace outputs can have
an important effect on the extent to which different aspects of Prolog can be presented
clearly and simultaneously. Its interaction with perspective and effect on ease of access to
information has already been described. For example, TPM*’s graphic format supports
a clearer presentation of flow of control information which renders temporal (historical)
information about clause matching a bit less accessible; broadly speaking, the opposite is
the case for EPTB.
Aside from trivial cases, the distinction between graphic and textual format is rarely
clear cut. Even in a standard, linear textual format the serial ordering of information
about states at each step in a program run reflects an implicit notion of temporal
ordering; information at any step $n$ is partly dependent on information at step $n - 1$ and
will itself determine the information content of step $n + 1$. Serial ordering of information
in such a case is not textual but graphical. Conversely, a graphical tracer would be of
limited value without embedded textual information such as the values of bound
variables. However, a crude distinction between the two categories of tracer formats is
sufficient here because our aim is to concentrate on differences in usefulness due to very
broad parameters; e.g. the difference due to the representation of flow control as
a graphical AND/OR tree or as a linear ordering of textual information.
The relative advantages of graphic formats over linear textual formats might intuit-
ively appear to be considerable. However, the major advantages of a graphic format may
be adversely affected by the modality mismatch between textual program code (assuming
traditional textual Prolog) and its graphic trace. The difference in modalities between
Prolog source code and its representation in a graphic trace is a likely source of ambiguity, which may influence ease of access to information. Thus, the advantages of graphic format cannot be assumed to be uniformly beneficial in all applications.
To recap, assuming that all the traces could be used to obtain the information required (in some cases in combination with a display of the corresponding source code and a basic knowledge of Prolog execution), but that they varied in terms of access to the information, how would this variation in access affect the subject’s ability to obtain the information? In this study, the task required subjects to study trace outputs in order to solve problems. Typically, they would have to work out whether a particular clause was matched or whether it affected the execution of another part of the program. These problems required the subjects to make inferences based on trace output. It was assumed that the more explicit the relevant information, the fewer inferences necessary and therefore the less time spent on solving the problem and the less likelihood of errors in the solution.
4. Task and motivation
The trace output format evaluation task was primarily designed to check for the effect of format on the relative usefulness of traces in solving simple Prolog problems. The task was presented on a workstation and response time and errors were collected. Each subject was assumed to know about the underlying principle of Spy traces (e.g. Byrd Box) and was given a tutorial on the other two trace outputs.
The task was a very simplified and abstract version of part of a normal debugging task. The subject group, while adequately proficient in Prolog, had to learn about two new trace outputs (TPM* and EPTB) in order to do the task. Because of this learning load, it was felt that subjects should not have to learn to manipulate actual tracers. Thus the problem solving task was not interactive. Subjects were given a static copy of the final trace output of the relevant program to enable them to solve the problems. Solution to the problems did not depend on being able to run the programs, which were very simple as the examples show. A subject was presented with a problem and a single window of trace output and required to solve the problem with aid of that trace. Hence, it was a static version of a normal debugging task but adequate to check for the impact of format in solving one sort of Prolog problem.
Since there are information content differences between the trace outputs, in general, the stimuli problems were designed to evaluate access to information which was common to all three trace types. Solutions to problems did not depend on obvious major differences such as the “cut”, which can be very explicitly and graphically displayed in TPM* but is totally obscure in Spy. However, there are minor differences between the tracers and the purpose of this paper is to investigate the problem \( \times \) tracer interaction.
The problems were limited by the non-interactive nature of the task and by the requirement that they should be concerned largely with accessing information from the tracers as opposed to focusing on obscure features of the problem domain. That is, they were concerned with activities that form part of the debugging process but were not supposed to be tests of the subjects’ debugging ability.
A more sophisticated study than this might have extracted the problems from actual course materials in order to ensure their ecological validity. This was not done, but we
tried to ensure that the problems chosen were representative of the kinds of sub-tasks that might occur when novice programmers are trying to reconcile a program, its behaviour and a trace. We ensured as far as possible that solutions depended on basic understanding of Prolog rather than high levels of expertise.
The null hypothesis was that it would be possible to answer each question with the help of each trace output without significant differences in either response time or mean error.
4.1. PROBLEMS
The stimuli consisted of five problems. Three were very simple problems which could be solved with information on backtracking and clauses tried. The solutions to the other two problems depended on information about recursion, system goals, goals with variables and list manipulation. Each problem was presented three times in overall random order; once with each trace output. Care was taken to disguise any similarity between question across different trace output types. This was achieved by altering words and phrases in the problem statements, as well as predicate and variable names. For examples of “disguised” EPTB and TPM* questions, see the appendix.†
Each of the five problems used in described and illustrated in this section in their Spy trace form, together with the corresponding traces for EPTB and TPM*. Note that, because of the disguising, the predicate and variable names in the corresponding EPTB and TPM* traces are not the same. Each of the problems was presented with a multiple choice question the order of whose answers was randomised on each presentation of the question.
Problem 1: To solve this simple problem, subjects have to work out how often particular procedures are called. To ensure that subjects used the trace output rather than work from the program, it is presented without the program, see Figure 2.
The Spy textual trace largely requires linear scanning and counting. The first option requires that the subject establishes that there is only a single line of the form ** (nn) Call : t, where “nn” means a small integer. The second option requires that the subject count-up lines of the form ** (nn) Call : f1. The third option requires the subject to establish that there are no lines of the form ** (nn) Call : h. The fourth option requires that the subject find two lines of the form ** (nn) Exit : g.
The EPTB textual trace also required linear scanning and counting, see Figure 3. While there are more lines to scan, the task is essentially similar to the Spy version.
For the TPM* trace the subjects have to count the number of call boxes labelled with the name of the procedure in question, see Figure 4. Checking the success of a call requires noticing that its clause status box shows a tick.
Problem 2: This problem, also presented without the program, requires subjects to find out how many subgoals a particular clause contained, see Figure 5. The same traces as in problem 1 are used.
† It is possible that variations in the wording of the questions or small changes in the names of variables and predicates may have contributed to reducing the equivalence of the questions.
Given the trace to be shown, which one of the following statements is CORRECT?
1. There is one call of t*
2. f1 is called thrice
3. There are no calls of h
4. Two calls of g succeed
?- e.
** (1) Call : e
** (2) Call : f
** (3) Call : f1
** (3) Exit : f1
** (2) Exit : f
** (4) Call : g
** (5) Call : h
** (5) Exit : h
** (6) Call : prolog_error(UNDEFINED PREDICATE, [i])
** (6) Fail : prolog_error(UNDEFINED PREDICATE, [i])
** (5) Redo : h
** (5) Fail : h
** (7) Call : j
** (8) Call : f1
** (8) Exit : f1
** (7) Exit : j
** (9) Call : k
** (10) Call : j
** (11) Call : f1
** (11) Exit : f1
** (10) Exit : j
** (12) Call : f
** (13) Call : f1
** (13) Exit : f1
** (12) Exit : f
** (9) Exit : k
** (14) Call : l
** (15) Call : t
** (15) Exit : t
** (16) Call : f2
** (16) Exit : f2
** (14) Exit : l
** (4) Exit : g
** (1) Exit : e
yes
Figure 2. Spy Question 1 (top) and trace (bottom). The starred answer is correct.
CONTRASTING PROLOG TRACE OUTPUT FORMATS
??- k.
**(1) Call: k
1) k:-l,m
=> k:-l,m
...
**(2) Call: l
1) l:-l1
=> l:-l1
...
**(3) Call: l1
1) l1
=> l1
...
**(4) Call: m
1) m:-n,o,a
=> m:-n,o,a
...
**(5) Call: n
1) n
=> n
...
**(6) Exit: n
1) n
=> n
...
**(7) Call: o
1) o
=> o
...
**(8) Fail-match: o
1) o
=> o
...
**(4) Retry: m
1) m:-p,q,r
=> m:-p,q,r
...
**(6) Call: p
1) p:-l1
=> p:-l1
...
**(6) Call: l1
1) l1
=> l1
...
**(6) Exit: l1
1) l1
=> l1
...
**(7) Call: q
1) q:-p,l
=> q:-p,l
...
**(8) Call: p
1) p:-l1
=> p:-l1
...
**(9) Call: l1
1) l1
=> l1
...
**(9) Exit: l1
1) l1
=> l1
...
**(8) Exit: p
1) p:-l1
=> p:-l1
...
(CONTINUED FROM PREVIOUS COLUMN)
Key to Spy
question 1
Spy EPTB
e = k
f = l
f1 = l1
g = m
h = n
i = o
k = q
t = z
(CONTINUED NEXT COLUMN)
Figure 3. EPTB Question 1 trace.
The picture of the trace shows the output for the goal
?- e.
for a program which contains several simple rules of which the following is a single example
g :- j, k, l.
Which one of the following statements is TRUE?
1. The first clause for k has two subgoals*
2. The first clause for g has five subgoals
3. The second clause for g has five subgoals
4. e is defined by a single recursive clause
The picture of the trace shows the output for the goal
?- e.
for a program which contains several simple rules of
which the following is a single example
g :- j, k, l.
Which one of the following statements is FALSE?
1. The first clause for g may have more than two subgoals
2. The second clause for g has exactly three subgoals
3. The second clause for f is not called
4. i succeeds*
**Figure 6. Spy Question 3.**
In the Spy trace output for the first three options this information is not explicitly stated and needs to be inferred. Because Spy does not assign numbers to matching clauses during execution, this simple problem is rendered more complicated. Subjects have to infer that the first “call” line for a procedure corresponds to the matching of the first clause of that procedure and locate a corresponding “exit” line further down. All calls to the sub-goals appear between these two lines, so subjects have to count all sub-goal “call” and “exit” line pairs at the relevant level. While doing so they have to ensure that calls to sub-sub-goals of the relevant sub-goals which also appear in the same intervening section are not included in the count. The fourth option asks the subject to ascertain whether “e” is defined by a single recursive clause. Given that the goal “e” succeeds, there would have to be an “e” sub-goal for this to be true.
With an EPTB trace output the subject can locate a line showing that clause in the trace and count off the number of sub-goals shown on that line. This information is explicitly displayed. The more long-winded counting strategy, described for Spy, can also be applied.
In TPM* the required information is available explicitly in the trace and subjects need only count the number of sub-goal nodes that are child-nodes of a box for the relevant clause. In the case of the fourth option, one of the child nodes of “e” (in fact “a” in the TPM* trace) would also have to be “e”.
Problem 3: For this problem subjects are presented with the same traces as Problems 1 and 2, but have to choose a false statement from a choice of four. The required false answer (the fourth option) is that the call to “i” succeeds, where, in fact, the call fails because no procedure with the corresponding name and arity has been defined; the Prolog program was not given, see Figure 6.
Suppose the goal
?- trundle([7,11], [5,7,10,12], L).
is evaluated against the program
trundle([], _, []).
trundle(_, [], []).
trundle([X|Xs], [X|Ys], [X|Zs]):-
trundle(Xs, Ys, Zs).
trundle([X|Xs], [Y|Ys], Zs):-
X < Y, trundle(Xs, [Y|Ys], Zs).
trundle([X|Xs], [Y|Ys], Zs):-
X > Y, trundle([X|Xs], Ys, Zs).
From the trace you are shown, how many times does the head of the 4th clause of "trundle" match a call to "trundle"?
1. Once
2. Three times*
3. Four times
4. Not at all
?- trundle([7,11], [5,7,10,12], L).
** (1) Call : trundle([7, 11], [5, 7, 10, 12], _1)?
** (2) Call : trundle([7, 11], [7, 10, 12], _1)?
** (3) Call : trundle([11], [10, 12], _2)?
** (4) Call : trundle([11], [12], _2)?
** (5) Call : trundle([], [12], _2)?
** (5) Exit : trundle([], [12], [])?
** (4) Exit : trundle([11], [12], [])?
** (3) Exit : trundle([11], [10, 12], [])?
** (2) Exit : trundle([7, 11], [7, 10, 12], [7])?
** (1) Exit : trundle([7, 11], [5, 7, 10, 12], [7])?
L = [7] ?
yes
Figure 7. Spy Question 4 (top) and trace (bottom).
For the Spy trace the subject’s procedure should be largely the same as in Problem 2, except for the correct/false inversion. The first two options are establishing the number of sub-goals in a clause. In the third option the subject need to check whether there is a "fail" at the immediate subsidiary level between a ** (nn) Call: f and
its corresponding ** (nn) Exit : f or ** (nn) fail : f. In the case of the fourth option, once the relevant subsection of the trace has been identified, the subject needs only to find the phrase “UNDEFINED PREDICATE” in order to confirm the statement.
As for Problem 2, the first two options in \textbf{EPTB}'s textual format require either systematic linear scanning, or looking up the information which is explicitly stated in the trace. For the third option, the subject can apply the same procedure as for the Spy trace or look for an explicit ** (nn) \textit{Retry : f} line (in fact "l"). In order to solve the fourth option, the subject has to locate the relevant procedure call for "i" (in fact "o") and check its success or failure to match on the following line.
With the \textbf{TPM*} trace, the first three options are dealt with by counting within the groupings of sub-goals of the relevant clause box. In order to verify the statement in the
Suppose the goal
?- j([2, [7, 2]], x).
is evaluated against the program below:
p([], x, x).
p([A|R], X, Y) :- j(A, B), p(R, [B|X], Y).
j(E, E) :- atomic(E).
j(X, Y) :- p(X, [], Y).
You are shown a number of statements, only one of which is correct. With the help of the trace you are shown, pick the CORRECT statement.
(N.B. A clause is "invoked" if its head matches a call - it will then succeed if all its subgoals succeed, and fail otherwise.)
1. 2nd clause of p is invoked 4 times*
2. 1st clause of p is invoked 3 times
3. 2nd clause of j is invoked 3 times
4. 1st clause of j is invoked 3 times
?- j([2, [7, 2]], x).
** (1) Call : j([2, [7, 2]], _1)
** (2) Call : p([2, [7, 2]], [], _1)
** (3) Call : j(2, _2)
** (3) Exit : j(2, 2)
** (4) Call : p([[7, 2]], [2], _1)
** (5) Call : j([7, 2], _3)
** (6) Call : p([7, 2], [], _3)
** (7) Call : j(7, _4)
** (7) Exit : j(7, 7)
** (8) Call : p([2], [7], _3)
** (9) Call : j(2, _5)
** (9) Exit : j(2, 2)
** (10) Call : p([], [2, 7], _3)
** (10) Exit : p([], [2, 7], [2, 7])
** (8) Exit : p([2], [7], [2, 7])
** (6) Exit : p([7, 2], [], [2, 7])
** (5) Exit : j([7, 2], [2, 7])
** (11) Call : p([], [[2, 7], 2], _1)
** (11) Exit : p([], [[2, 7], 2], [[2, 7], 2])
** (4) Exit : p([[7, 2]], [2], [[2, 7], 2])
** (2) Exit : p([2, [7, 2]], [], [[2, 7], 2])
** (1) Exit : j([2, [7, 2]], [[2, 7], 2])
X = [[2, 7], 2]?
yes
Figure 10. Spy Question 5 (top) and trace (bottom).
fourth option subjects have to either identify a box with the relevant successful call to disprove the statement or seek out a horizontal bar which indicates that no clauses matching the call exist.
Problem 4: To solve this problem, which includes the program source code, subjects are asked about the number of times a particular clause is invoked when the program is executed to prove a given query. The correct answer is three times, see Figure 7.
FIGURE 12. TPM* Question 5 trace.
Key to Spy question 5
Spy TPM*
j = e
p = k
2 = g
7 = h
X = A
When attempting to solve this problem with the help of the Spy trace output subjects have to look through the trace and count up call lines in which either the call term unifies with the fourth clause but with no earlier clauses or the call term was unifiable with both the fourth and fifth clauses, and it can then be deduced that the fourth clause failed for that call.
In the EPTB trace, as in question 2 above (which is a less complicated version of this type of problem), subjects can either use the same strategy as for the Spy trace or, more simply, count the lines showing an invocation of the specified clause, indicated in the EPTB format by the display of the clause itself, preceded by an explicit clause number (i.e. 4) mayrphi..., see Figure 8.
In the TPM* trace version the subjects have to scan clause boxes which explicitly show clause number 4, see Figure 9. They must note that the initial clause box which shows 5 must have matched clause 4 earlier and failed.
Problem 5: This problem is presented with the program code and subjects are asked to pick out a true statement referring to the number of times a particular clause is invoked given a particular query, see Figure 10.
In the Spy trace version subjects must systematically scan through the trace identifying how call lines have unified with clauses. The task requires the subject to recognize that certain call lines for “j” refer to more than a single invocation and involve both clauses, see Figure 10.
With the EPTB trace it is necessary to find explicit references to the particular clause, recognizing that both a call line and a retry line involve an invocation, see Figure 11.
With the TPM* trace, subjects need to count the number of boxes labelled with that procedure which contain the relevant clause number. They also have to count the number of boxes labelled with the same procedure but with a later clause number with a failed sub-tree corresponding to the relevant clause, see Figure 12.
5. Method
5.1. DESIGN
The experiment was a within-subjects design: all subjects attempted to solve all the stimuli problems (see Figure 13). Problems were presented in a pseudo-random order; no problem was allowed to be followed immediately by another of the same type but with a different trace. Subjects responded by selecting from multiple choice answers. These choices were randomized for order. The choice of “giving up” with no selection was also
provided for each problem. Data on time taken to solve a problem (or give it up) as well as the chosen response were collected.
5.2. SUBJECTS
Forty-three undergraduate novice Prolog programmers at Sussex University fully completed the problem solving task, and were paid five pounds for taking part in both parts of the experiment. The students were taking a 10 week course in Prolog as part of degrees in computer science or artificial intelligence and had learned at least one other programming language prior to learning Prolog. The students were familiar with the Spy tracer from their Prolog course and learned about EPTB and TPM* as part of the experiment.
5.3. PROCEDURE
The instructions, tutorial and problem solving task were presented on Sun workstations in three stages. The entire process was self-administered by the subject, who responded by pressing a few keys on the keyboard. Following the preliminary instructions explaining the aim of the study and the nature of the problem solving task, subjects were given a tutorial on non-interactive modified trace outputs based on TPM* and EPTB tracers. Descriptions of various features of both type of trace outputs assumed a basic understanding of Spy traces. Subjects who felt that they had an inadequate understanding of Spy tracers did not participate any further in the study. In the next stage subjects were required to pass a criterion test designed to ensure that they had the necessary understanding of TPM* and EPTB to be able to attempt solving the problems. The criterion test had 11 questions on various features of both tracers, and subjects had to get at least 9 correct in order to proceed to the main part of the experimental task. Subjects were allowed two attempts to reach this criterion. Following an incorrect response, subjects were given feedback explanations of the correct response. Those who failed to meet the criterion did not take part in the rest of the study. This procedure tried to ensure that only subjects with an adequate understanding of Prolog as well as of TPM* and EPTB were included in the results reported here. Of the original 64 subjects who embarked on the experiment, 18 failed to meet the criterion or withdrew before the next stage.
The third stage was the main problem-solving task. Each problem was presented on one side of the screen as a multiple choice question which the subjects were requested to read through before pressing a key to see the accompanying trace which then appeared on the other side of the screen. This enabled us to collect data on time spent for reading the question separately from time spent in trying to solve the problem with the aid of a trace output. Subjects picked a response (or a "give up" response) which they had to confirm by pressing an appropriate key which ensured the possibility of altering unintended responses. Response data were recorded, but subjects were given no feedback on them. Subjects were asked to complete the task as fast and as accurately as possible. 46 subjects embarked on the test but two failed to finish and one chose the "give up" response to every single question and their data was discarded. This left 43 subjects who completed the problem solving task (though some of these subjects “gave up” on some of the individual problems).
6. Results
6.1. SOLUTION TIMES
An ANOVA of solution times was carried out with subjects as the random factor and Trace Output (3 levels) and Problem (5 levels) as fixed factors. Solution time was the overall time from first being exposed to the question until the moment that the chosen response was confirmed, see Table 1. There is a significant main effect of trace output, $F(2,84) = 11.32, p \leq 0.001$, indicating that time required to access relevant information varied between trace outputs. There is a significant main effect of problems, $F(4,168) = 42.14, p \leq 0.001$. This was expected since problems varied in difficulty.
There is also a significant interaction between trace outputs and problems, $F(8,336) = 3.53, p \leq 0.01$. Given the interaction, a problem by problem post hoc analysis of simple effects was carried out. Two kinds of comparison were made. (i) A comparison of the mean time of solution for the Spy type trace output compared to the mean of the times for the TPM* and EPTB trace outputs combined. (ii) A comparison of the mean time of solution for the TPM* and EPTB trace outputs.
<table>
<thead>
<tr>
<th>Trace</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
</tr>
</thead>
<tbody>
<tr>
<td>TPM*</td>
<td>55.2 (23.1)</td>
<td>87.1 (54.3)</td>
<td>60.2 (28.9)</td>
<td>75.3 (54.9)</td>
<td>131.9 (62.6)</td>
</tr>
<tr>
<td>Spy</td>
<td>63.8 (30.2)</td>
<td>102.7 (58.5)</td>
<td>77.3 (44.5)</td>
<td>107.4 (56.3)</td>
<td>163.9 (100.0)</td>
</tr>
<tr>
<td>EPTB</td>
<td>70.4 (34.5)</td>
<td>75.8 (61.1)</td>
<td>81.7 (51.9)</td>
<td>69.7 (36.8)</td>
<td>112.4 (63.6)</td>
</tr>
<tr>
<td>Mean</td>
<td>63.1 (30.1)</td>
<td>88.6 (58.7)</td>
<td>73.1 (43.5)</td>
<td>84.1 (52.4)</td>
<td>136.1 (79.7)</td>
</tr>
</tbody>
</table>
Table 1
Solution times mean (SD) in seconds of all responses (n = 43)
The resultant F values are shown in Table 2. In order to contain familywise (FW) error the Scheffé test was used to compute critical values of F. The table shows that at $p = 0.05$ Spy type trace outputs produced significantly slower times than combined TPM* and EPTB times for Problems 4 and 5. It also shows that at $p = 0.05$, TPM* trace outputs produced faster solution times than EPTB trace outputs for Problem 1 but slower solution times for Problem 3.
Table 3 illustrates mean solution times of correctly solved problems only. The overall pattern of differences between trace outputs is similar to that in Table 1 which includes all solution times, and on which the solution time ANOVA and analysis of simple effects is based.
6.2. RESPONSE ACCURACY
An ANOVA for response accuracy was also carried out. The percentage correct response for each condition is given in Table 4. Given that an individual response was either correct or not, the basic data is essentially binary. Moreover, the overall response accuracy levels in each cell were well over 75% in many cases and under 25% in one case, so the data do not completely conform to the assumptions underlying an analysis of variance. With this proviso in mind, there was a significant main effect of problems, $F(4,152) = 32.89, p \leq 0.001$, which reflected the varying level of difficulty of problems. There was a significant main effect of trace outputs, $F(2,76) = 23.06, p \leq 0.001$, and there was a significant interaction between trace output and problem, $F(8,304) = 8.07, p \leq 0.001$.
Given the interaction, a problem by problem post hoc analysis of simple effects was carried out. Again two kinds of comparisons were made. (i) A comparison of mean response accuracies for the Spy type trace output compared to the mean of the response accuracies for the TPM* and EPTB trace outputs combined. (ii) A comparison of the mean response accuracies for the TPM* and EPTB trace outputs.
The resultant F values are shown in Table 5. In order to contain familywise (FW) error the Scheffé test was used to compute critical values of F. The table shows that at $p = 0.05$ Spy-type trace outputs produced significantly lower response accuracy than combined TPM* and EPTB response accuracies for Problems 2, 4 and 5. This is a similar result to that for solution times, see Table 2, though the solution time difference for Problem 2 does not reach significance. It also shows that at $p = 0.05$, EPTB trace outputs produced higher response accuracies than TPM* trace outputs for Problems 4 and 5, though this is not mirrored in the response time differences, see Table 2.
There is a possibility that problem statements for Problems 4 and 5 were not true isomorphs, see Figure A1 in the appendix. Some problems were couched in terms of “matching” and others in terms of “invocation”. In some cases an extra explanatory note was added about the meaning of “invocation”.
This extra explanatory sentence appeared in the EPTB version of Problem 4 and in both the Spy and the EPTB version of Problem 5. This may partly account for trend in the data suggesting better performance of Problem 5 compared to Problem 4 in their Spy versions, and for some of the significant disparity between TPM* and EPTB on Problems 4 and 5.
6.3. WRONG CHOICES
Problem 1 was answered correctly by most subjects so the following analysis of wrong choices concentrates on Problems 2–5. Table 6 gives the number of times that each of the responses was chosen for each problem and trace. Note that there was a “give up” option in each case and this is shown as option “0” in the table.
Problem 2: In the Spy trace version, eight subjects gave up, six chose option 4 (that “e” is defined by a single recursive clause) and four chose option 2 (that the first clause of “g” has five sub-goals, see Figure 5). None of the Spy subjects chose option 3 (that the second clause of “g” has five sub-goals). In the EPTB version 5 subjects also chose option 4.
Problem 3: In both the Spy and the TPM* versions, no subject chose option 3 (that the second clause of “f” is not called), roughly similar numbers divided between options 1 and 2 and giving up. Option 1 states that the first clause of “g” may have more than two sub-goals and option 2 says that the second clause of “g” has exactly three sub-goals. An extra complication in this question was that the subject had to select the option which was false.
Problem 4: To solve this problem, which included the program source code, subjects were asked about the number of times a particular clause is invoked when the program is executed to prove a given query. The correct answer was option 2: Three times, see Figure 7. Table 6 shows that the most frequently chosen incorrect response for both Spy (19) and TPM* (20) was option 1: Once. In Spy the remaining responses were roughly evenly divided amongst the other responses including giving up. In TPM* the next most frequent incorrect choice was option 3: Four times. In EPTB the incorrect choices excluded option 4: Not at all, and divided evenly among the other two and giving up.
Problem 5: This problem was presented with the program code and subjects were asked to pick out a true statement referring to the number of times a particular clause was invoked given a particular query. On average this problem took the longest time to solve with all the trace outputs. More subjects (13) gave up on this problem than on any other. The most preferred incorrect response for both Spy and TPM* was option 4: “1st clause of j is invoked 3 times”. In both cases the other incorrect responses divided fairly evenly among the remaining incorrect options and giving up. In EPTB no subject chose option 2: “1st clause of p is invoked 3 times”, and the other incorrect choices were split among the other possibilities.
6.4. MODALITY MISMATCHES
In addition to being harder than Problems 1–3, Problems 4 and 5 included the Prolog program as part of the problem statement whereas the simpler problems did not. In order to solve Problems 4 and 5 subjects would have needed to reconcile the program with its trace. In the case of the Spy and EPTB the dominant feature of lines in the trace bear a resemblance to lines in the program. This is rather less than the case with TPM* where the dominant features in the trace are the nodes of the AORTA graph itself, so
<table>
<thead>
<tr>
<th>Problem</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
</tr>
</thead>
<tbody>
<tr>
<td>Option</td>
<td>0</td>
<td>1*</td>
<td>2</td>
<td>3</td>
</tr>
<tr>
<td>TPM*</td>
<td>1</td>
<td>38</td>
<td>1</td>
<td>2</td>
</tr>
<tr>
<td>Spy</td>
<td>8</td>
<td>25</td>
<td>4</td>
<td>0</td>
</tr>
<tr>
<td>EPTB</td>
<td>2</td>
<td>33</td>
<td>1</td>
<td>2</td>
</tr>
</tbody>
</table>
Table 6
Choices of each option: ‘0’ is ‘give up’, starred option is correct
there is a modality mismatch between standard Prolog textual programs and their traces. For both Spy and EPTB, search within both the program and the trace is largely up and down the page, whereas in TPM* search needs to follow the links in the AORTA graph. A modality mismatch hypothesis would predict that solution times for Problems 4 and 5 compared to Problems 1–3 would increase to a greater extent for TPM* than for EPTB. It would also predict that response accuracy times for Problems 4 and 5 compared to Problems 1–3 would decrease to a greater extent for TPM* than for EPTB.
The hypothesis is only partially supported by the data. Thus, from Tables 1 and 2 we note that TPM* trace output produces faster solution times than EPTB trace output on Problems 1 and 3 but produces only similar solution times for Problems 4 and 5 (i.e. a relative slowing). From Tables 4 and 5 we note that TPM* trace output produces similar response accuracy to EPTB trace outputs for Problems 1–3 and lower response accuracy for Problems 4 and 5 (i.e. a relative decrease in accuracy). However, comparative solution times results for Problem 2 disrupt this picture.
It could be argued that the subjects were not skilled enough in Prolog and that this combined with the extra difficulty of Problems 4 and 5 to produce poor response accuracy for TPM*. To investigate this issue further, the data on correct solution times was reanalysed. Only data from subjects who successfully solved both Problem 4 and Problem 5 in their TPM* versions were selected. There were eight such “good” subjects and their overall response accuracy is shown in Table 7. These eight subjects all gave the correct answers for Problems 1, 2, 4 and 5 in their TPM* version, for Problems 2, 4 and 5 in their EPTB version and for Problem 3 in the Spy version. None of these eight subjects answered Problem 4 correctly in its Spy version. The correct solution times for these eight subjects are shown in Table 8.
An analysis of simple effects on a problem by problem basis for Table 8 shows that none of the differences in solution times between TPM*-type trace outputs and EPTB-type trace outputs reaches significance under the Scheffé test. The high variance in the data for Problem 2 in its EPTB version is caused by one subject who answered this question very slowly. Excluding this subject from the analysis does not clarify the overall picture. So, overall, the data do not readily support the modality mismatch hypothesis, not least because of the confounding effect of problem difficulty.
### Table 7
Percentage correct response for “good” subjects (n = 8)
<table>
<thead>
<tr>
<th>Trace</th>
<th>Problem</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
</tr>
</thead>
<tbody>
<tr>
<td>TPM*</td>
<td>100</td>
<td>100</td>
<td>75</td>
<td>100</td>
<td>100</td>
<td>100</td>
</tr>
<tr>
<td>Spy</td>
<td>87.5</td>
<td>62.5</td>
<td>100</td>
<td>0</td>
<td>37.5</td>
<td></td>
</tr>
<tr>
<td>EPTB</td>
<td>87.5</td>
<td>100</td>
<td>87.5</td>
<td>100</td>
<td>100</td>
<td></td>
</tr>
<tr>
<td>Mean</td>
<td>91.7</td>
<td>87.5</td>
<td>87.5</td>
<td>66.7</td>
<td>79.2</td>
<td></td>
</tr>
</tbody>
</table>
Table 8
Solution times mean (SD) in seconds of correct responses only from ‘‘good’’ subjects (n = 8)
<table>
<thead>
<tr>
<th>Trace</th>
<th>Problem 1</th>
<th>Problem 2</th>
<th>Problem 3</th>
<th>Problem 4</th>
<th>Problem 5</th>
</tr>
</thead>
<tbody>
<tr>
<td>TPM*</td>
<td>54.1 (17.0)</td>
<td>71.1 (40.0)</td>
<td>52.9 (22.6)</td>
<td>67.4 (28.7)</td>
<td>142.5 (45.2)</td>
</tr>
<tr>
<td>Spy</td>
<td>57.5 (32.9)</td>
<td>126.0 (51.3)</td>
<td>64.0 (31.7)</td>
<td>129.6 (30.0)</td>
<td>207.2 (71.9)</td>
</tr>
<tr>
<td>EPTB</td>
<td>59.5 (32.2)</td>
<td>101.4 (117.7)</td>
<td>96.0 (52.8)</td>
<td>71.0 (48.1)</td>
<td>104.4 (49.4)</td>
</tr>
<tr>
<td>Mean</td>
<td>57.0 (27.2)</td>
<td>99.5 (77.7)</td>
<td>71.0 (40.7)</td>
<td>89.3 (45.6)</td>
<td>151.4 (69.4)</td>
</tr>
</tbody>
</table>
7. Discussion
Before discussing the likely reasons for the observed results, certain caveats should be restated. The results are based on a non-interactive use of trace outputs for small programs whose traces would fit in a single screen. It is not at all clear how these results could be generalized to much larger programs, to more (natural) interactive debugging, programming and problem solving activities, and therefore no attempt will be made to do so.
To summarize, the results show a major effect of format of trace output on subjects’ ability to solve Prolog problems in terms of both the time to solve problems and the accuracy of response.
Overall, as expected, Spy had the most shortcomings. On Problems 4 and 5 it produced longer solution times, and on Problems 2, 4 and 5 it produced lower accuracy than the other two tracers. It did not provide explicit information on system goals, and the simple, non-indented, linear text format is not good at conveying information about backtracking, more complicated retrying of clauses and information about variables involving list manipulation. Much of this implicit information required a large degree of inference on the part of the user. Novice programmers found this very difficult and ended up performing worse than they did with other tracers.
7.1. TEXT VERSUS GRAPHICS
Our study illustrates that format and perspective have a significant effect on information access and that the balance of advantage between textual and graphical format is far from clearcut. Graphic TPM* produced faster solution times than textual EPTB on Problems 1 and 3, though response accuracy was similar to EPTB. By contrast, the response accuracy for textual EPTB was better than graphical TPM* for Problems 4 and 5, though the solution times were similar to those for TPM*. On Problem 2 TPM* and EPTB produced similar results.
We have already noted that Problems 4 and 5 were both harder and were presented together with their program code. We investigated the possibility of some modality mismatch effect between lines in the program and “lines” in the trace, but the data were equivocal. So it seems more likely that the solution time and response accuracy disparities are a result of a particular interaction between the problems and the amount of inference required from the trace. These results suggest that hidden state information rather than a modality mismatch was the dominant factor.
7.2. HIDDEN STATE INFORMATION
Larkin (1989) draws attention to the problem of hidden state information in a representation. Both Problems 4 and 5 are distinguished from the others in that in each case a single one of the incorrect answer options accounted for a large proportion of the errors in the cases of both Spy and TPM*. It would be necessary to conduct further experiments to establish exactly why subjects chose the options that they did.
A working hypothesis for both Spy and TPM* is that the particular incorrect options were selected because the subjects had to make an inference from the trace notation since the required state information is hidden. The fact that they were novices would have increased the chance of error.† In the case of Spy traces clause matching information is particularly hard to deduce. It is more readily available in TPM* but the subjects still had to deduce that in a clause box showing 5 and an earlier failure, that earlier failure was from a match to clause 4. In both the case of Spy and TPM* that inference is only easily made by understanding the potential unification between the goal in question and the heads of the program clauses.
8. Conclusion
This study has shown that representation format has an effect in display-based problem-solving and that the effects are problem-dependent. The study has also shown that the case for graphical representations as compared to textual representation is not absolute but depends, as others have argued, on the nature of the problems-solving being undertaken.
Several lines of development of this work suggest themselves. First, the study should be rerun using a different set of problems. These would be drawn more carefully from programming courses so as to ensure ecological validity and be more carefully disguised to ensure that their statements are isomorphic. The modality mismatch hypothesis needs further work by comparing traces with equivalent hidden state information for problems of equivalent difficulty but offered in different modalities. The analysis of response times, accuracy and particularly errors would benefit from additional protocol “think aloud” data in the manner of Kessler and Anderson (1989).
The study should be extended to the use of tracers as interactive devices and to the use of more realistically sized programs. Some progress has been made on the former, again showing interesting differential effects between tracers and problems (Mulholland, 1994, 1995).
There is scope for further trace notation design. We have made some initial steps in designing and evaluating a Prolog trace notation (TTT) that attempts to marry what we believe are the best features of TPM* and EPTB (Taylor et al., 1991, 1994). The evaluation of TTT with a small number of subjects (n = 13) produced broadly similar results to those reported here confirming the relative effectiveness of TPM* compared to Spy (Patel, du Boulay & Taylor, 1994).
† The results also suggest that the initial instruction and the criterion test may not have been sufficiently rigorous.
We thank R. Noble, M. Eisenstadt and the referees for most helpful comments. This work was supported by a grant from the UK Joint Research Council Initiative in Cognitive Science/HCI. The experimental work was conducted using the POPLOG programming environment.
References
Appendix
This section includes TPM* and EPTB questions (Figure A1) for Question 4 to illustrate the degree of disguise between question isomorphs. The correct answer choice is starred.
Given the program below, and the trace output shown, how many times is the 4th clause invoked (whether successfully or unsuccessfully) during the execution of the following goal (N.B. A clause is "invoked" if its head matches a call - it will then succeed if all its subgoals succeed, and fail otherwise.)
?- mayrphi([9,19], [1,9,[11,30]], L).
mayrphi([], _, []).
mayrphi(_, [], []).
mayrphi([H1|T1], [H1|T2], [H1|Others]):-
mayrphi(T1, T2, Others).
mayrphi([H1|T1], [H2|T2], Others):-
H1 < H2, mayrphi(T1, [H2|T2], Others).
mayrphi([H1|T1], [H2|T2], Others):-
H1 > H2, mayrphi([H1|T1], T2, Others).
1. Once
2. Three times*
3. Four times
4. Not at all
Let "zwysick" be defined by the following five clauses.
zwysick([], _, []).
zwysick(_, [], []).
zwysick([A|P], [A|Q], [A|R]):-
zwysick(P, Q, R).
zwysick([A|P], [B|Q], R):-
A < B, zwysick(P, [B|Q], R).
zwysick([A|P], [B|Q], R):-
A > B, zwysick([A|P], Q, R).
What is the number of invocations (successful or otherwise) of the 4th clause when the following goal is computed?
?- zwysick([3,7], [2,3,5,8], I).
1. One
2. Three*
3. Four
4. None
Figure A1. Question 4 EPTB (top), TPM* (below).
|
{"Source-Url": "https://kar.kent.ac.uk/21542/1/petal.pdf", "len_cl100k_base": 16364, "olmocr-version": "0.1.50", "pdf-total-pages": 35, "total-fallback-pages": 0, "total-input-tokens": 73995, "total-output-tokens": 20307, "length": "2e13", "weborganizer": {"__label__adult": 0.0004112720489501953, "__label__art_design": 0.0009751319885253906, "__label__crime_law": 0.00037789344787597656, "__label__education_jobs": 0.01395416259765625, "__label__entertainment": 0.0001341104507446289, "__label__fashion_beauty": 0.00021255016326904297, "__label__finance_business": 0.00044345855712890625, "__label__food_dining": 0.0003452301025390625, "__label__games": 0.0008096694946289062, "__label__hardware": 0.000942230224609375, "__label__health": 0.00040531158447265625, "__label__history": 0.0004279613494873047, "__label__home_hobbies": 0.0001857280731201172, "__label__industrial": 0.0004954338073730469, "__label__literature": 0.0009813308715820312, "__label__politics": 0.00028634071350097656, "__label__religion": 0.0006761550903320312, "__label__science_tech": 0.041748046875, "__label__social_life": 0.0002188682556152344, "__label__software": 0.0137939453125, "__label__software_dev": 0.9208984375, "__label__sports_fitness": 0.00027561187744140625, "__label__transportation": 0.0007028579711914062, "__label__travel": 0.00019431114196777344}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 75528, 0.05917]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 75528, 0.60616]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 75528, 0.928]], "google_gemma-3-12b-it_contains_pii": [[0, 1247, false], [1247, 3860, null], [3860, 7513, null], [7513, 11168, null], [11168, 14693, null], [14693, 17584, null], [17584, 17704, null], [17704, 20937, null], [20937, 24610, null], [24610, 28138, null], [28138, 31711, null], [31711, 34860, null], [34860, 35779, null], [35779, 36667, null], [36667, 37065, null], [37065, 39397, null], [39397, 40838, null], [40838, 41090, null], [41090, 41795, null], [41795, 43219, null], [43219, 43671, null], [43671, 43769, null], [43769, 46212, null], [46212, 49524, null], [49524, 51173, null], [51173, 53811, null], [53811, 54456, null], [54456, 55162, null], [55162, 57999, null], [57999, 60895, null], [60895, 63909, null], [63909, 66987, null], [66987, 70731, null], [70731, 74363, null], [74363, 75528, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1247, true], [1247, 3860, null], [3860, 7513, null], [7513, 11168, null], [11168, 14693, null], [14693, 17584, null], [17584, 17704, null], [17704, 20937, null], [20937, 24610, null], [24610, 28138, null], [28138, 31711, null], [31711, 34860, null], [34860, 35779, null], [35779, 36667, null], [36667, 37065, null], [37065, 39397, null], [39397, 40838, null], [40838, 41090, null], [41090, 41795, null], [41795, 43219, null], [43219, 43671, null], [43671, 43769, null], [43769, 46212, null], [46212, 49524, null], [49524, 51173, null], [51173, 53811, null], [53811, 54456, null], [54456, 55162, null], [55162, 57999, null], [57999, 60895, null], [60895, 63909, null], [63909, 66987, null], [66987, 70731, null], [70731, 74363, null], [74363, 75528, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 75528, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 75528, null]], "pdf_page_numbers": [[0, 1247, 1], [1247, 3860, 2], [3860, 7513, 3], [7513, 11168, 4], [11168, 14693, 5], [14693, 17584, 6], [17584, 17704, 7], [17704, 20937, 8], [20937, 24610, 9], [24610, 28138, 10], [28138, 31711, 11], [31711, 34860, 12], [34860, 35779, 13], [35779, 36667, 14], [36667, 37065, 15], [37065, 39397, 16], [39397, 40838, 17], [40838, 41090, 18], [41090, 41795, 19], [41795, 43219, 20], [43219, 43671, 21], [43671, 43769, 22], [43769, 46212, 23], [46212, 49524, 24], [49524, 51173, 25], [51173, 53811, 26], [53811, 54456, 27], [54456, 55162, 28], [55162, 57999, 29], [57999, 60895, 30], [60895, 63909, 31], [63909, 66987, 32], [66987, 70731, 33], [70731, 74363, 34], [74363, 75528, 35]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 75528, 0.04278]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
5013028a53d48c6e23f3a9bfe4b984a24809d653
|
HexType: Efficient Detection of Type Confusion Errors for C++
Yuseok Jeon
Purdue University
jeon41@purdue.edu
Priyam Biswas
Purdue University
biswas12@purdue.edu
Scott Carr
Purdue University
carr27@purdue.edu
Byoungyoung Lee
Purdue University
byoungyoung@purdue.edu
Mathias Payer
Purdue University
mathias.payer@nebelwelt.net
ABSTRACT
Type confusion, often combined with use-after-free, is the main attack vector to compromise modern C++ software like browsers or virtual machines. Typecasting is a core principle that enables modularity in C++. For performance, most typecasts are only checked statically, i.e., the check only tests if a cast is allowed for the given type hierarchy, ignoring the actual runtime type of the object. Using an object of an incompatible base type instead of a derived type results in type confusion. Attackers abuse such type confusion issues to attack popular software products including Adobe Flash, PHP, Google Chrome, or Firefox.
We propose to make all type checks explicit, replacing static checks with full runtime type checks. To minimize the performance impact of our mechanism HexType, we develop both low-overhead data structures and compiler optimizations. To maximize detection coverage, we handle specific object allocation patterns, e.g., placement new or reinterpret_cast which are not handled by other mechanisms.
Our prototype results show that, compared to prior work, HexType has at least 1.1 – 6 times higher coverage on Firefox benchmarks. For SPEC CPU2006 benchmarks with overhead, we show a 2 – 33 times reduction in overhead. In addition, HexType discovered 4 new type confusion bugs in Qt and Apache Xerces-C++.
CCS CONCEPTS
• Security and privacy → Systems security: Software and application security:
KEYWORDS
Type confusion; Bad casting; Type safety; Typecasting; Static_cast; Dynamic_cast; Reinterpret_cast
1 INTRODUCTION
C++ is well suited for large software projects as it combines high level modularity and abstraction with low level memory access and performance. Common examples of C++ software include Google Chrome, MySQL, the Oracle Java Virtual Machine, and Firefox, all of which form the basis of daily computing uses for end-users.
The runtime performance efficiency and backwards compatibility to C come at the price of safety: enforcing memory and type safety is left to the programmer. This lack of safety leads to type confusion vulnerabilities that can be abused to attack programs, allowing the attacker to gain full privileges of these programs. Type confusion vulnerabilities are a challenging mixture between lack of type and memory safety.
Generally, type confusion vulnerabilities are, as the name implies, vulnerabilities that occur when one data type is mistaken for another due to unsafe typecasting, leading to a reinterpretation of the underlying type representation in semantically mismatching contexts.
For instance, a program may cast an instance of a parent class to a descendant class, even though this is neither safe nor allowed at the programming language level if the parent class lacks some of the fields or virtual functions of the descendant class. When the program subsequently uses the fields or functions, it may use data, say, as a regular field in one context and as a virtual function table (vtable) pointer in another. Such type confusion vulnerabilities are not only wide-spread (e.g., many are found in a wide range of software products, such as Google Chrome (CVE-2017-5023), Adobe Flash (CVE-2017-2095), Webkit (CVE-2017-2415), Microsoft Internet Explorer (CVE-2015-6184) and PHP (CVE-2016-3185)), but also security critical (e.g., many are demonstrated to be easily exploitable due to deterministic runtime behaviors).
Previous research efforts tried to address the problem through runtime checks for static casts. Existing mechanisms can be categorized into two types: (i) mechanisms that identify objects through existing fields embedded in the objects (such as vtable pointers) [6, 14, 29, 38]; and (ii) mechanisms that leverage disjoint metadata [15, 21]. First, solutions that rely on the existing object format have the advantage of avoiding expensive runtime object tracking to maintain disjoint metadata. Unfortunately, these solutions only support polymorphic objects which have a specific form at runtime that allows object identification through their vtable pointer. As most software mixes both polymorphic and non-polymorphic objects, these solutions are limited in practice — either developers must manually blacklist unsupported classes or programs end up having unexpected crashes at runtime. Therefore, recent state-of-the-art detectors leverage disjoint metadata for type information. Upon object allocation, the runtime system records the true type of the object in a disjoint metadata table. This approach indeed does not suffer from non-polymorphic class issues, because type information can be accessed without referring vtable pointers.
However, disjoint metadata schemes have to overcome two challenges: (i) due to C++’s low level nature it is hard to identify all object allocations and (ii) the lookup through this disjoint metadata table results in prohibitive overhead. Existing approaches with disjoint metadata precisely exhibit these drawbacks. Because it is difficult to handle all C++ language quirks imposed by developers, they only protect a small fraction of typecasts in practice. Due to the complexity of metadata tracking, existing approaches introduce prohibitive overheads (TypeSan [15] has up to 71.2% overhead for Firefox with a geometric mean of 30.8%; note that TypeSan already improves performance over CFI techniques [20, 34–36] verify all indirect control-flow transfers within a program to detect control-flow hijacking. However, these techniques address the type confusion problem only partially if control flow is hijacked, i.e., they detect usage of the corrupted vtable pointer, ignoring any preceding data corruption. Similarly, type protection schemes [14, 38] protect virtual calls from vtable hijacking attacks but do not block type confusion attacks. Memory safety mechanisms [24, 26, 32] protect against spatial and temporal memory safety violations but incur prohibitively high overhead in practice. Also, these mechanisms do not protect against type confusion, e.g., they do not stop an int array of the correct size from being used in place of an object. Control-flow hijacking protection and memory safety are therefore orthogonal to type confusion detection. Type confusion may be used to cause a memory safety violation. Detecting type confusion allows earlier detection of security violations for these cases.
We propose HexType, a mechanism that protects C++ software from type confusion by making all casts explicit. Each cast in the source language (explicit or implicit, static or dynamic) is turned into a dynamic runtime check. HexType records the type of each object and specific casts are replaced with our instrumentation. We fundamentally address the challenges of earlier work by (i) increasing coverage of typecasting checks and (ii) drastically reducing overhead.
Our prototype implementation of HexType vastly outperforms state-of-the-art type confusion detectors, increasing coverage and often lowering overhead. Our reduced overhead is the result of novel optimization techniques and using an efficient type metadata structure. We leverage an analysis that identifies types that are used in typecasting, allowing us to remove tracing overhead for any objects that are never cast. For the type metadata structure, we design a two-layered data structure that combines a hash table (fast-path) and red-black tree (slow-path) in order to reduce object tracing overhead. Despite performance, our mapping scheme also overcomes limitations of existing work such as relying on fixed addresses for metadata which may run into compatibility issues if applications try to reuse the same addresses.
To address the low coverage of related work, we developed allocation detectors that track reuse of pre-allocated memory space cases for new objects (through placement new) and transferring objects through reinterpret_cast. Additionally, HexType increases coverage for dynamic_cast and reinterpret_cast and goes beyond static_cast unlike all the previous works. In the case of dynamic_cast, HexType replaces the existing inefficient typecasting verification routine with a fast lookup using our metadata. HexType supports reinterpret_cast to increase object tracing coverage and find additional bugs.
Due to our increased coverage, we discovered four new type confusion vulnerabilities (which evaded previous approaches) in two widely-used open source libraries (Qt Base library and Apache Xerces-C++) during our evaluation. For the Firefox benchmarks, HexType increases coverage by 1.1 – 6 times compared to TypeSan with some increased performance overhead due to the vast increase in coverage. For SPEC CPU2006 benchmarks with overhead, we show a 2 – 33 times reduction in overhead.
Our major contributions can be summarized as:
1. An open source type confusion detector with low overhead and high coverage (outperforming state-of-the-art detectors);
2. A novel optimization that greatly reduces the number of objects that need to be tracked (as much as 54% – 100% on SPEC CPU2006), thus reducing overhead;
3. Design of efficient data structures that use a fast-path (O(1) time complexity) for type information insertion and lookup (with a hit rate of 94.09% and 99.99% on the SPEC CPU2006 and 98.76% and 95.20% for Firefox respectively);
4. Robust allocation identification implementation that greatly increases coverage (1.1 – 6 times over TypeSan on Firefox) combined with also covering alternate casting methods such as placement new;
5. Discovery of four new vulnerabilities in QT Base library and Apache Xerces-C++;
2 BACKGROUND
In this section, we provide background information on C++’s type system, various cast operations, and previous type confusion detection tools necessary to understand the design and implementation of HexType.
2.1 C++ Classes and Inheritance
C++ is an object-oriented programming language, with classes as the primary abstraction. Classes allow the programmer to define new types. A class can inherit from multiple ancestor classes. The descendent class has all the same members (methods and variables) as its ancestor(s) and optionally additional members defined in the descendent class definition.
In C++, a pointer of type A can be cast into a pointer of another
type, type B. This effectively tells the compiler to treat the pointed-to
object as being type B.
The crucial question is: when is a typecast safe? The answer
depends on the type of the pointed-to object and the destination
type (type B in the previous example). Focusing on casting between
class types, the security objective of this work, casting from descen
dant class to ancestor class is always safe since the members of the
descendant class are a superset of the members of the ances
tor class. This operation is called upcasting. For example, as
shown in Figure 1, if we visualize the type hierarchy with the ances
tor class at the top and descendants at the bottom, moving up the
hierarchy (upcasting) is safe. On the other hand, downcasting,
casting from ancestor to descendant, may not be safe if the ances
tor misses any member of the descendant class. This is depicted
in Figure 2. Such downcasting has been abused by attackers in a
wide-range of popular C++ programs, which lead to complete
compromises of an underlying system, as recently shown for, e.g.,
Google Chrome (CVE-2017-5023), Adobe Flash (CVE-2017-2095),
Webkit (CVE-2017-2415), Microsoft Internet Explorer (CVE-2015-
6184) or PHP (CVE-2016-3185).
2.2 C++ Cast Operations
The C++ syntax allows four different types of casts to meet different
requirements of the developer. Each casting type performs unique
casting operations, imposing non-trivial security implications. In
the following, we provide detailed information on each casting
type, particularly focusing on its security aspects in terms of type
confusion issues.
The example in Figure 1 shows a cast using static_cast, but there
are other cast in C++ and their details are important to this
work. The other cast types we are concerned with are dynamic_cast,
reinterpret_cast, and C-style typecasting.
\begin{verbatim}
static_cast<type>(expression)
dynamic_cast<type>(expression)
reinterpret_cast<type>(expression)
const_cast<type>(expression)
\end{verbatim}
Static Cast. A static_cast casts an object of type A to an object
of type B. The check is executed purely at compile time and no
runtime check is performed. Due to the static nature of this cast,
the runtime type of the object is not considered and the check is
limited to check if the two types are compatible, i.e., there is a path
in the type hierarchy from expression’s type and type that involves
upcasting and/or downcasting.
While not incurring any performance overhead, the safety guaran
tees of static casts are limited. Therefore, the programmer is
responsible that an object of the correct type is used, e.g., guaran
teeing that the downcasted object is actually an object of the
derived type. In practice, since it is challenging to figure out such
compatibility at compile time, this has led to the unfortunate fact
that type confusions are dominating vulnerabilities in modern C++
programs [23].
Dynamic Cast. A dynamic_cast can safely convert types be
tween classes in the same class hierarchy. Whereas static_cast
only performs a compile time check, it performs an additional
runtime check using heavy-weight metadata, Run Time Type In
formation (RTTI). As, in general, the dynamic runtime type of an
object cannot be determined statically, dynamic_cast must lever
age runtime type information such as RTTI. RTTI encodes all type
related information, and a compiler generates this RTTI per type
such that each type has its dedicated RTTI entry in a compiled
binary. The RTTI entry essentially forms a recursive structure in
that each RTTI entry points to another RTTI entry to represent the
class hierarchy. A compiler further appends a reference to the RTTI
entry at the end of each virtual function table, so that the RTTI
entry can be retrieved at runtime using any virtual address point
ing to an object. In other words, since the first field in an object is
typically filled with a virtual function table pointer, dynamic_cast
can find the RTTI entry given an object address using the virtual
function table pointer. After locating the corresponding RTTI en-
try, dynamic_cast starts to recursively traverse RTTI to verify the
casting correctness (i.e., that the types are compatible). If there is a
path on the type hierarchy between expression’s type and the
target type, then the types are compatible. The types are compatible
whenever the type of expression is a descendant of type (upcast).
The types can also be compatible when type is the exact type of the
object pointed to by expression. If the casting is incorrect (i.e.,
the type of expression and type are incompatible), the cast fails in
one of two ways:
\begin{itemize}
\item If type is a pointer type, it returns NULL.
\item If type is a reference type, it throws a pre-defined exception
(i.e., std::bad_cast).
\end{itemize}
Due to the design of dynamic_cast, its usage is strictly limi
ted to polymorphic objects. As mentioned before, dynamic_cast
relies on a virtual function table to locate RTTI, but the virtual
function table is only present in polymorphic objects. Note that,
given these limitations, dynamic_cast can only be used for poly
morphic types. Thus, compilers simply generate a compile-time
error if a dynamic_cast is used for a non-polymorphic type. Note
that runtime errors are still possible.
Reinterpret Cast. A reinterpret_cast converts between any
two (potentially incompatible) types. It instructs the compiler to
reinterpret the underlying bit pattern of the cast objects. Because
it does neither create a copy nor perform any runtime check, a
reinterpret_cast always incurs zero overhead. From the secur
ity standpoint, programmers are responsible to ensure the cor
rectness of reinterpret_cast similar to the case in static_cast.
Since reinterpret_cast only changes the object’s type, it simply
returns the same address. This behavior can cause problems for
polymorphic classes or classes with multiple inheritance. For poly
morphic classes, reinterpret_cast returns a pointer to an object
with potentially the wrong vtable pointer as reinterpret_cast
does not change the memory of the object. If the object uses multi
ple inheritance, then a pointer to a base class may have the wrong
value (not a pointer to the object itself) [5]. However, if the exact
source object type information is known then reinterpret_cast
can be used to: (1) efficiently construct an object without execut
ing the constructor (reusing an old object of the same type) and (2)
restoring the actual type if a function returns a void* pointing to
an object.
class Ancestor { int x; };
class Descendant : Ancestor {
double y;
};
Ancestor *A = new Ancestor();
Descendant *D;
D = static_cast<Ancestor*>(A);
D->y; // error
Figure 2: A code example and diagram of a type confusion problem where an ancestor class is incorrectly accessed using a pointer to a descendent class. The static cast results in type confusion and accessing the field D->y results in a memory safety violation.
Figure 3: A system overview of HexType. HexType consists of several modules that analyze type relationship information and insert object tracing and typecasting instrumentation to verify typecasting operation.
**2.3 Defenses against type confusion**
Type confusion is a pressing problem and several mechanisms have been proposed to detect and protect against type confusion. As mentioned earlier, the existing defenses can be grouped into two categories: (i) those based on identifying objects based on existing fields embedded in the object themselves (such as vtable pointers) [6, 14, 29, 38]; and (ii) those based on disjoint metadata [15, 21].
CaVer [21] uses disjoint metadata for all allocated objects to support non-polymorphic classes without blacklisting. CaVer is the first typecasting detection tool (based on disjoint metadata) that can verify type-casting for non-polymorphic objects. However, CaVer suffers from both security and performance issues — low safety coverage on castings and high runtime overhead.
TypeSan [15] reduces the performance overhead by a factor of 3 – 6 compared to CaVer and increases detection coverage by including C-style allocation (e.g., malloc). However, the overhead of both disjoint metadata approaches is still high due to inefficient metadata tracking, e.g., tracking most live objects. Also, while increasing coverage compared to CaVer, TypeSan still has an overall low coverage rate. Especially, TypeSan has 12 – 45% coverage rate for Firefox. These limitations motivated us to design HexType, which overcomes the aforementioned limitations — namely reducing per-cast check overhead, increasing coverage, and providing additional features.
**3 THREAT MODEL**
Our threat model assumes that the underlying application is benign but contains a type confusion error that an attacker can find and exploit. The primary goal of our defense mechanism is to prevent such type confusion attacks. Our defense mechanism automatically detects such exploitation attempts, avoiding any negative security ramifications. We further assume that the attacker may read arbitrary memory, and thus our detection mechanism is designed not to rely on information hiding or randomization. Attacks not based on type confusion, including control-flow hijacking, integer overlow, and memory corruption, are out of scope and these can be protected by other security hardening techniques. We assume that our instrumentation cannot be removed by the attacker, i.e., our instrumented code is on a non-writable page. The underlying operating system, program loader, and system libraries are in the Trusted Computing Base (TCB).
4 HEXTYPE DESIGN AND IMPLEMENTATION
HexType is a Clang/LLVM-based type confusion detector for C++ programs. During compilation of a target program, HexType generates a HexType-hardened program. During runtime, if HexType detects a type confusion error, the program is terminated with a detailed bug report.
Figure 3 illustrates an overview of HexType. Given the source code as input, HexType generates a type table containing all type relationship information (§4.1) and, at runtime, information about the true types of each allocated object is collected in the object mapping table (§4.2). HexType verifies the correctness of each cast using both the type relationship information and object mapping table (§4.3). HexType leverages a set of optimization techniques to reduce performance overhead during the above processes (§4.4).
4.1 Type Relationship Information
In order to verify typecasting operations, HexType needs to know a valid set of destination types that can be cast from a given source type. Note that compilers keep this information readily available during compilation to check the validity of casts statically, but such checks are inherently limited as the true source type of an object is only known at runtime. C++ applications generally do not keep explicit information about the type hierarchy. This subsection describes how HexType generates and maintains a hierarchical type information for executables and shared libraries. We call this information type table.
During compilation, HexType extracts all type relationship information and prepares metadata for each type. For example, as shown in Figure 1, for the type DOMElementImpl, HexType first collects all types that are allowed to be cast (i.e., DOMElement and DOMNode), each of which is basically a parent class of DOMElementImpl. Instead of simply storing a type name in the type table, HexType stores a string hash of the type name to avoid expensive string match operations, enabling O(1) comparisons. HexType exports, per type, a list of hash values as a global variable during the compilation, allowing other libraries to reuse this information. These lists of hash values are sorted to efficiently search value from the target list using binary search during runtime type casting verification. HexType generates one such global variable per type.
DOMElementImpl: H(DOMElement), H(DOMNode), ...
In order to provide compatibility, HexType allows and the type table includes phantom classes. A phantom class is a parent-child relationship where the data layout of the child is equivalent to the data layout of the parent. HexType allows downcasts from such a child to the parent as such phantom classes are frequently used in practical environments to support interoperability between C and C++.
To manage the type table efficiently, HexType only records each type’s relationship information once, following the one definition rule (ODR) [19] of the C++ standard. According to this rule, the type definition of each object must be identical (each object’s parent information is always the same) among all source code which will be merged. Therefore, each type will have a uniquely identical list of hash values among all source codes except for phantom classes. Since each object can have a derived class as a phantom class and the set of the phantom classes cannot be determined when each object type is defined (we also have to rely on information from each object’s derived class defined site), HexType only needs to update this phantom class information.
4.2 Object Type Tracing
In order to verify typecasting operations at runtime, HexType needs to locate the type information based on the underlying object identified by the source pointer address in the casting operation. Unlike dynamic_cast, HexType does not utilize RTTI to retrieve type information due to the following limitations of RTTI: (i) RTTI only provides type information for polymorphic objects (not supporting typecasting verification of non-polymorphic objects); (ii) RTTI incurs expensive typecasting verification costs due to its recursive structure; and (iii) RTTI significantly blows up the size of the compiled binary.
For these reasons, HexType designs a new set of techniques, which aims at maximizing security coverage and minimizing performance overhead. In the following, we first describe how HexType captures the underlying memory semantics with respect to the type information. HexType systematically identifies all object allocation sites, which significantly elevates the coverage for typecasting operations (§4.2.1). Next, we illustrate how HexType maintains such memory semantics at runtime. In order to perform efficient lookup operations, HexType employs a new data structure, type table, which supports both a fast-path for performance efficiency and a slow-path for completeness (§4.2.2).
4.2.1 Tracing Object Type Allocation. The C++ type system is not strongly constrained and thus developers can easily change the object type at runtime as required. This flexibility, though it is one of the main reasons of C++’s popularity, introduces several challenges when tracking type information. More precisely, HexType must identify the correct type information imposed to certain runtime memory objects, but dynamic type changes complicate the identification processes.
HexType comprehensively identifies all the sites that assign types, which can be generally categorized into the following two cases depending on when the type assignment is performed: (1) at the time of creating an object and (2) at the time of transferring an object. The first case includes the well known new operator which allocates object memory space through typical system memory allocator (i.e., malloc) and initializes the object by invoking its associated constructor function. The first case also includes placement new, which reuses specified memory space and simply invokes the constructor for initialization. For these type allocation sites at object creation time, HexType registers the type of the object in the type table by passing the type information and the base pointer to the registration function. The runtime library function updates the type table with this information. We describe more details how HexType maintains information in §4.2.2.
The second case of type assignments happen while hard-copying objects that have already been constructed. In C++, it is common to copy or move memory objects in memory space for, e.g., object marshaling or when passing objects between allocation spaces. Once the memory object is relocated in memory space, a developer is responsible to reassign the type of underlying memory objects.
template<class T, std::size_t N>
class static_vector
{
// properly aligned uninitialized storage for N T's
size_t Size = sizeof(T);
size_t Align = alignof(T);
typename std::aligned_storage<Size, Align>::type d[N];
......
public:
template<typename ...Args> void insert(Args&&... args)
{
......
// Create an object using placement new
new(d+_size) T(std::forward<Args>(args)...);
......
}
const T& operator[](std::size_t pos) const
{
// Access an object using reinterpret_cast
return reinterpret_cast<const T*>(d+pos);
}
......
};
Figure 4: Code example for std::aligned_storage using placement new and reinterpret_cast to manage type allocation.
Developers can rely on move or copy operators in C++ if the underlying object is a C++ class object constructed through new or placement new operators. Alternatively, they can explicitly specify the type of underlying memory objects using reinterpret_cast. This second case is commonly used to work around system constraints. For example, when an object is marshaled and unmarshaled to pass it between different components, reinterpret_cast can efficiently construct an object without explicitly executing the class constructor again. To handle reinterpret_cast, HexType instruments reinterpret_cast to call a runtime function with two pieces of information: (i) destination type and (ii) source address information. In the runtime library function, HexType inserts this information into the type table only if there is no matching entry with reinterpret_cast’s source address.
For example, Figure 4 shows how aligned_storage creates and accesses objects. In the initial step, aligned_storage creates uninitialized memory blocks (line 5). In this uninitialized storage, the objects are created using placement new (line 13). Then, we can access the allocated objects using reinterpret_cast (line 20).
In fact, previous work including UBSan, CaVer, and TypeSan all fail to generally handle type assignment sites. In the case of UBSan, it cannot capture the type information of non-polymorphic objects as it has to rely on RTTI, resulting in unexpected crashes at runtime. In the case of CaVer and TypeSan, they only consider new operator as type assignment sites and thus they miss all other assignment sites mentioned above. As we will clearly demonstrate in the evaluation section, HexType showed 1.1 - 6 times higher coverage on Firefox benchmarks compared to TypeSan.
4.2.2 Mapping Objects to type table. HexType maintains an object mapping table, which maps runtime objects to its associated type information in the type table. More specifically, a key in the object mapping table is an object address and its mapped value is an address pointing to the associated entry within the type table. It is performance critical for HexType to efficiently design this object mapping table, because this mapping process through object mapping table is performed every time HexType attempts to verify the typecasting operations.
We found various object tracking methods in previous works [15, 21]. TypeSan [15] uses a memory shadowing scheme to track global, heap, and stack objects. However, the TypeSan memory shadowing scheme has three limitations: (i) TypeSan uses a fixed address for the metadata table (to enable faster lookups) which may result in compatibility problems if applications reuse the same address, e.g., due to ASLR, which we observed in practice; (ii) TypeSan only updates objects in the “object to type” mapping table when objects are allocated (it does not delete information when an object is deleted from memory). Therefore stale metadata can create additional problems; (iii) TypeSan’s memory shadowing scheme uses more memory resources compared to other non-memory shadowing schemes. CaVer [21] uses a red-black tree to keep track of global and stack objects. However, overhead becomes prohibitive for, e.g., stack objects, as stack objects incur frequent insertions and deletions. Since a red-black tree generally shows \(O(\log N)\) time complexity to delete, insert, and search.
Toward this end, HexType leverages a new data structure to reduce performance overheads in mapping operations. The key insight for object mapping table is that some objects are accessed much more frequently than others. We therefore designed a data structure that splits object lookup into a fast pass using a hash table and a slow path using a red-black tree, see Figure 5.
Figure 5: A snapshot example of object mapping table, showing how it maps an object using a combination of fast-path and slow-path slots. When HexType looks up type information, an object address is used to obtain the reference to the corresponding object mapping table entry. HexType first matches the fast-path slot. If not present in the fast-path slot, HexType then searches the corresponding red-black tree to find type information (slow-path) and updates the fast-path accordingly.
class Base1 { ... }
class Base2 { ... };
// multiple inheritance
class Derived: public Base1, public Base2 { ... };
Derived obj;
Derived* dp = &obj;
// indicates the Derived's Base2 object
Base2* b2p = dp;
// static_cast restores the original pointer value
Derived* dps = static_cast<Derived*>(b2p);
// reinterpret_cast preserves the new pointer value
Derived* dpr = reinterpret_cast<Derived*>(b2p);
Figure 6: An example of how reinterpret_cast results in a type confusion problem.
Object. Each hash table entry holds two slots: (1) the fast-path slot for the least recently cast object, which holds the reference to the object (to check if the object matches), the hash of the object’s type, and the reference to object’s type relationship information (collects all destination types that are allowed to be cast) and (2) the slow-path slot, which holds a reference to a per-slot red-black tree maintaining a complete set of objects that map to the hash table entry. In other words, once HexType locates a hash table entry, it simply reuses the value in the fast-path slot if the object’s address in the fast-path slot matches. Otherwise, HexType walks through the red-black tree pointed by the slow-path slot to address collisions. Whenever a lookup in the red-black tree is performed, the fast-path is updated with the most recent object. As a result, our mapping scheme with object mapping table imposes $O(1)$ time complexity for fast-path accesses and $O(\log N)$ for slow-path accesses (where $N$ is the number of values in the per-slot red-black tree). In the SPEC CPU2006 C++ benchmarks, our approach uses the fast-path 99.68% of time to update metadata and 100% of the time to lookup information from the type table. We demonstrate that these design choices for the object mapping table are reasonable in the evaluation section in §5.
4.3 Type Casting Verification
We now describe the final step of HexType, typecasting verification, which checks the safety of casting. HexType instruments typecasting operations with additional verification code at compile time. At runtime, this instrumentation locates the object’s true type information in the type table and then compares the target type with the expected type at the cast site to determine if the cast is legal.
HexType instruments all type casting operations related to type confusion issues. As described in §2.2, these include static_cast sites, where its casting operation performs downcasting. More precisely, HexType instruments additional code invoking a runtime verification function while passing necessary information with respect to casting verification (i.e., a base object pointer subjected to casting and a hash value of a destination type).
Additionally, HexType also verifies reinterpret_cast. As mentioned in §2.2, reinterpret_cast forces the casting operation by copying the memory bits of a pointer value even though casting types are not compatible. Thus, this operation is security critical if misused. For example, as shown in Figure 6, since reinterpret_cast simply returns the same unchanged address (line 15), a pointer to a base class points to a semantically different object, which results in access to an unexpected memory area. In other words, reinterpret_cast does not properly adjust the pointer according to the class hierarchy (line 5) as it simply hard-copies a to-be-cast value, compared to static_cast which adjusts the pointer.
Once a runtime verification function is invoked at runtime, HexType first locates the object mapping table. Given the base pointer address of an object, HexType computes the hash index within the object mapping table, which returns a reference to the corresponding type table walking through either fast-path or slow-path (§4.2). Using this type table as well as the provided destination type information, HexType reasons about whether the underlying object can be indeed a sub-object of the destination type such that the casting itself is correct in the end. If HexType detects type confusion at runtime, it displays a detailed report that includes allocated object type, expected object type, and the type casting location. This information allows the developer to triage the type casting issue quickly.
4.4 Optimization
Type casting verification supported by HexType may impose non-negligible performance overhead as it involves additional computation. In order to make HexType a truly practical security tool, we implement a set of performance optimization techniques, namely only tracing unsafe objects, only verifying unsafe casting, and efficient dynamic casting.
Only Tracing Unsafe Object. HexType only traces type information on potentially unsafe objects and does not trace safe objects. We define $T$ as a safe object type if and only if $T$ is never subject to typecasting. $T$ is a potentially unsafe object type otherwise. Since safe object types will never be used for casting at all, HexType does not need to keep track of them to check casting validity. We assume that the source pointer of a safe object always references an object of the correct type as no casting operation in the program exists that breaks this assumption. As illustrated in Figure 7, HexType performs the following two steps to identify unsafe objects. First, it identifies a typecasting-related object set, which can be used for typecasting operations. HexType identifies all type information both at the casting site and for the cast object. An object that is cast can be of type $X$ or any of the child classes of type $X$. The type casting site therefore must accept all possible subtypes. Next, when instrumenting object allocation sites, HexType selectively instruments allocation only for typecasting-related objects.
While evaluating HexType, we found that tracking stack objects is the most critical performance bottleneck. Thus, considering allocation characteristics of stack objects, we apply a special optimization scheme to conservatively distinguish safe stack objects from unsafe stack objects.
First, we apply CaVer’s optimization technique which is based on the observation that the lifetime of a stack object can be relatively well defined with respect to a set of functions the object is active—a function (that the subject stack object is declared) and all of its
class D : public B { .... };
class F : public C { .... };
B *pB = new B;
C *pC = new C;
static_cast<D*>(pB);
static_cast<F*>(pC);
Figure 7: An example of how HexType creates a potentially unsafe object type set. In the example, we assume that objects of type B and C are typecast. HexType will identify these potentially unsafe types and all its children types as unsafe.
callee functions, if there are no out-going indirect calls. Thus, only if there are no out-going indirect calls, we perform an escape analysis for the set of those functions so as to ensure that any reference to the stack object never leaves the analyzed functions. Further more, if there are no typecasting operations within these clustered and side-effect free functions, then the analyzed stack objects will never be used for typecasting. In this case, it is truly a safe stack object that does not need to be tracked at runtime.
We apply a more fine-grained analysis for functions that did not pass the previous check: (i) we check whether each stack object in the function is a local variable using SafeStack which is a component of CPU/CPS [20], since SafeStack supports local variables detection and (ii) if these local stack objects are not used for any typecasting operation within this function, we do not need to trace these stack objects.
Only Verifying Unsafe Casting. Clearly HexType does not need to perform runtime verification for a casting operation if it can be proven safe during compilation. We call such a provably safe cast operation a safe casting, and unsafe casting otherwise. Since HexType supports runtime casting verification, we can leverage an optimization that relies on an imprecise yet conservative static analysis to distinguish these two categories. In other words, given a casting operation, HexType determines if it is safe casting only if HexType can be completely certain at compile time. If HexType cannot determine it is safe in a compile time, HexType simply considers it unsafe casting and falls back to a runtime check.
HexType leverages a conservative backward dataflow analysis to identify safe casting. Starting from a casting site, HexType reasons about type information of an underlying object, i.e., how the underlying object has been allocated. To answer this question, we perform an inter-procedural use-def chain analysis, where the use point is defined as a casting site and the def point is defined as any object allocation sites.
For example, as shown in Figure 8, if the source of typecasting operation uses the address-of operator(&) or array name directly to get the address of the object, HexType can easily determine the source object type and verify the typecasting operation at compile time. Also, we can predict the object type through the use-def chain analysis. In these cases, we can remove HexType’s typecasting verification instrumentation and verify typecasting operations during compile time. However, if we cannot determine the source type, HexType will again fall back to a runtime check.
Efficient Dynamic Casting. Since HexType offers efficient runtime casting verification, the existing dynamic_cast can be optimized accordingly. HexType therefore replaces each dynamic_cast with our fast lookup. In order to preserve the runtime semantics of dynamic_cast as dictated by the C++ standard, HexType takes additional steps in response to an incorrect casting detected in runtime. As described in §2.2, HexType returns NULL for a pointer-typed casting and throws an exception for a reference-typed casting. This optimization can be especially useful if applications heavily rely on dynamic casting.
4.5 Implementation
We have implemented HexType, as shown in Figure 10, based on the LLVM Compiler infrastructure project [22] (version 3.9.0). The HexType implementation consists of 4,677 lines of code that we added to Clang, an LLVM Pass, and our compiler-rt runtime library. HexType’s LLVM Pass (i) creates type relationship information, and (ii) instruments allocations of unsafe objects to record allocated object type information into our object mapping table. Also, we modify Clang to (i) instrument all downcast sites (the pointer type of casting operation is one of the parent objects of destination type),
### 5 EVALUATION
In this section, we evaluate HexType focusing on following aspects: (i) the detection coverage (§5.1); (ii) newly discovered vulnerabilities by HexType (§5.2); (iii) the efficiency of object mapping table (§5.3); and (iv) runtime overhead (§5.4).
#### Experimental Setting.
All evaluations were performed on Ubuntu 16.04.2 LTS with a quad-core 3.60GHz CPU (Inter i7-4790), 250GB SSD-based storage, 1TB HDD, and 16GB RAM.
#### Evaluation Target Programs.
We have applied HexType to the following programs: all seven C++ benchmarks from SPEC CPU2006 [7] and Firefox [11]. For Firefox, we use Octane [13] and Dromaeo [10] benchmark suites. Moreover, in order to compare HexType with previous work, we applied TypeSan as well, and ran these programs under the same configuration. For CaVer, we use the numbers from the paper since CaVer was developed almost three years ago and we encountered compatibility issues with the current test environment and software (i.e., Firefox).
#### 5.1 Coverage on Typecasting
One of the primary goals of HexType is in increasing the typecasting coverage such that HexType can ensure that all different typecasting operations are correctly performed. To evaluate typecasting coverage, we counted how many typecasting operations were verified at runtime (shown in Table 1). We used two different versions of HexType in this experiment, where each version either turned off or on the optimization techniques presented in §4.4 (denoted as HexType-no-opt and HexType, respectively). For TypeSan, we referred to the evaluation numbers presented in the paper [15].
For SPEC CPU2006, HexType verifies almost all typecasting operations — 100% for omnetpp, dealII, and soplex, and 99.8% for xalancbmk. Compared to TypeSan, HexType improves the coverage number on xalancbmk (i.e., improved from 89% to 99.8%). This is because xalancbmk heavily uses placement new to allocate objects, for which TypeSan looses information about the object at runtime. Thus TypeSan fails to resolve type information associated with such objects. However, as described in §4.2.1, HexType correctly handles these new operator allocations, which significantly raised the coverage ratio.
For Firefox, depending on the benchmark suite, HexType successfully covers typecasting operations: ranging from 73% to 88% with HexType-no-opt; and ranging from 54% to 59% with HexType. During our evaluation, we found that HexType’s coverage rate drops after applying our optimizations due to interactions with Firefox’s complex object allocation patterns and how our optimizations handle and track allocations in LLVM/Clang. While we are investigating and plan to fix this issue in the future, HexType with optimization still shows better coverage rate than TypeSan. Most of the missing type casts in xalancbmk and Firefox result from application-specific
---
```cpp
1. Class T : public S { ... };
2. // (1) if type is a pointer and invalid downcast,
3. // it returns NULL
4. S *c_obj = new S;
5. T* d = dynamic_cast<T*>(c_obj);
6. if(!d) {
7. // invalid downcast
8. }
9. }
10. // (2) if type is a reference type and invalid downcast,
11. // it throws a exception
12. try
13. {
14. S b;
15. T& rd = dynamic_cast<T&>(b);
16. }
17. catch (std::bad_cast& bc)
18. {
19. // invalid downcast
20. }
```
**Figure 9:** An example of how to handle type confusion errors using `dynamic_cast`. HexType will provide the same error handling behaviors when HexType detects type confusion errors.
---
<table>
<thead>
<tr>
<th>Benchmark</th>
<th>TypeSan</th>
<th>HexType-no-opt</th>
<th>HexType</th>
</tr>
</thead>
<tbody>
<tr>
<td>spec</td>
<td>%</td>
<td>%</td>
<td>%</td>
</tr>
<tr>
<td>ff-drom-dom</td>
<td>73.3</td>
<td>61.1</td>
<td>56.5</td>
</tr>
<tr>
<td>ff-drom-js</td>
<td>80.1</td>
<td>3.4</td>
<td>59.4</td>
</tr>
<tr>
<td>dealII</td>
<td>100.0</td>
<td>0</td>
<td>100.0</td>
</tr>
<tr>
<td>soplex</td>
<td>100.0</td>
<td>0</td>
<td>100.0</td>
</tr>
<tr>
<td>xalancbmk</td>
<td>89.5</td>
<td>11.1</td>
<td>99.8</td>
</tr>
<tr>
<td>omnetpp</td>
<td>100.0</td>
<td>0</td>
<td>100.0</td>
</tr>
</tbody>
</table>
**Table 1:** The evaluation of typecasting verification coverage against SPEC CPU2006 and browser benchmarks. Columns with % present a coverage ratio and columns with x present a coverage improvement ratio (i.e., HexType’s coverage divided by TypeSan’s coverage).
allocation patterns. More specifically, Firefox creates a custom storage pool (typed as an array of char), and manipulates the pool using memcpy or direct object initialization (e.g., data.key = key; data.index = index; . . .). Xalancbmk also uses a special storage pool (SerializeEngine) that manages objects directly without calling memory allocation functions. As these allocation patterns cannot be detected by HexType during the instrumentation phase, HexType cannot track runtime object types that are allocated through these patterns. Handling these missing allocations is challenging. A naive approach would trace the custom storage pool (allocated as char array) and its low-level allocation patterns using memcpy or direct object initialization. This would unfortunately result in high overhead. Alternatively, we propose to modify the few locations in Firefox and annotate the object allocation accordingly. Although this coverage ratio in Firefox may not be as impressive as HexType’s result of the SPEC CPU2006 benchmarks, we emphasize it is significantly improved from the state-of-the-art tool, TypeSan. TypeSan only covered 27.75% of Firefox’s typecasting on average, 52.98% and 29.18% less than HexType-no-opt and HexType, respectively, highlighting HexType’s advantage in identifying allocation sites (§4.2.1).
5.2 Newly Discovered Vulnerabilities
During the course of evaluating HexType by running the set of target programs, we discovered four new type confusion vulnerabilities. In particular, HexType reported four vulnerable cases in Qt base library while evaluating Wireshark and Apache Xerces-C++, all of which have been confirmed and patched by the corresponding developer communities. For Apache Xerces-C++, HexType found two new vulnerabilities. These vulnerabilities occurred due to type confusion issues between DOMNodeImpl (indicated by DOMNode type pointer) and DOMTextImpl. Since the DOMNodeImpl object is allocated using placement new from a pre-allocated memory pool previous approaches cannot trace these objects. Therefore, these vulnerabilities were not detected by previous schemes such as CaVer or TypeSan.
In addition, HexType found two new vulnerabilities in the Qt-based library. The Qt team already patched our reported type confusion bugs [30]. HexType reported type confusion issues when Qt performs a casting from QMapNodeBase (base class) to QMapNode (derived class). Since QMapNode is not a subobject of QMapNodeBase, it violates C++ standard rules 5.2.9/11 [19] (down casting is undefined if the object that the pointer to be cast points to is not a subobject of down casting type) and causes undefined behavior.
These new vulnerabilities discovered by HexType clearly demonstrate the security advantage of HexType, especially compared to other previous work including TypeSan and CaVer. We would like to further point out that these new type confusion vulnerabilities were discovered only with basic benchmark workloads. In the future, we plan to run HexType under a fuzzing framework such as AFL [37], to discover more security critical vulnerabilities related to type confusions.
5.3 Efficiency of Object Tracing
Recall that the key runtime functions that HexType performs are (1) keeping track of object types (at the time of object allocation) and (2) looking up an object type (at the time of type casting). As described in §4.2.2, we designed object mapping table to efficiently handle these operations leveraging both a fast-path and a slow-path. Therefore, the performance efficiency of object mapping table clearly relies on the hit ratio of the fast-path (i.e., the number of operations that only access the hash table) such that HexType does not need to consult the slow-path (i.e., accessing not only the hash table but also the corresponding red-black tree) in most cases.
Table 2 lists the fast-path hit ratios while running the set of evaluation target programs. Overall, most of operations showed high fast-path hit ratios, ranging from 98.820% and 99.99% to update object mapping table and from 94.099% and 100% to lookup object mapping table. This high fast-path hit ratio was also maintained when HexType was running large-scale programs such as Firefox, which creates more than 32,000 million objects at runtime. This result implies that the design decision of the object mapping table is efficient enough to support a wide range of programs, which in turn significantly helped HexType to reduce runtime impact.
### 5.4 Performance Overhead
In order to understand performance impacts imposed by HexType, this subsection measures performance overhead in terms of runtime speed. Table 4 shows the performance overhead on the SPEC CPU2006 and Firefox, handling placement new and `reinterpret_cast`. For all seven C++ benchmarks in SPEC CPU2006, HexType outperformed previous work in all cases. This is largely because of HexType's optimization algorithms (§4.4) as well as object mapping table designs (§4.2). To clearly understand these, Table 3 reports how many objects HexType identified as safe objects. With the help of the optimization algorithm, HexType was able to dramatically reduce the number of objects to be traced — reduced from 83% to 100% of tracing for all cases except omnetpp. For omnetpp, the number of casting related classes (unsafe objects) is higher than other cases. However, we can reduce almost 54% object of the tracing overhead. Interestingly, out of the seven SPEC CPU2006 C++ benchmarks that we ran, povray, astar, and namd do not perform any typecasting operation that HexType has to verify at runtime. This implicates that HexType will have zero overhead for these cases since there are no object tracing and typecasting operation. In comparison, TypeSan imposes 26.73% overhead for povray.
In the case of omnetpp and dealll, HexType has shown significantly better performance due to HexType’s optimization on replacing `dynamic_cast` (§4.4). This optimization technique can show strong performance improvements, particularly for the applications heavily relying on `dynamic_cast`. We analyzed programs in our evaluation set, and found that two SPEC CPU2006 C++ benchmarks, dealll and omnetpp, perform a huge number of `dynamic_cast`, 206 M and 47 M number, respectively. Therefore, we replaced `dynamic_cast` in our verification routines which reduced the dealll’s performance overhead by 4%.
For Firefox, HexType showed similar or higher overhead than TypeSan. Note that, when assessing performance, HexType vastly extends coverage compared to TypeSan (past the differences in coverage). Moreover, while HexType reduced object tracing by nearly 52 – 100% in SPEC CPU2006, it only reduced the number of traced objects by about 21 – 44% in Firefox. We also suspect this is because of the Firefox’s runtime characteristic — almost all objects in Firefox, as shown in Table 2, are allocated on the stack. We note that TypeSan’s object mapping scheme comes with a security risk as it never removes object type information. As a result, if the stack location of a former properly allocated object is used in a casting operation, it may be interpreted as a valid object. However, since HexType properly deletes those objects when the stack returns, HexType does not suffer from these security issues.
### 6 RELATED WORK
In this section, we summarize previous research works on typecast verification. HexType focuses on type confusion attacks that violate pointer semantics in typecasting operations. CaVer [21] first addressed such exploits due to type casting verification and identified eleven security vulnerabilities due to bad typecasting. Next, TypeSan [15] improved the performance and coverage over CaVer. Similar to HexType, both CaVer and TypeSan are implemented on top of the LLVM compiler framework, instrumenting code during compile time. For metadata allocation both CaVer and TypeSan use a disjoint metadata scheme. TypeSan uses a shadow memory for the heap. However, these schemes extend the identification of overall object allocation and increase the overhead. HexType uses a global, whole address
space two layer object-to-type mapping scheme to reduce overhead and supports additional object allocation patterns through placement new and reinterpret_cast. Hence HexType vastly increases coverage compared to the aforementioned approaches. Performance is comparable despite the increased coverage. UBSan [29], another typecast verification framework, works only for polymorphic classes. It relies on runtime type information (RTTI) and instruments only static_cast and checks the casting during runtime. Thus it can only handle polymorphic classes problem as well as requires manual source modification. This makes it difficult to use in large projects.
Several Control-Flow Integrity (CFI) techniques [2, 3, 8, 12, 34, 40] ensure the integrity by checking any invalid control-flow transfer within the program. However, these techniques address the type confusion problem only partially if control-flow hijacking is performed via type exploitation. Similarly, defenses [17, 31, 38, 39] that protect virtual calls from viable hijacking attacks considers only the type of the virtual calls. These schemes do not address the overall bad casting problems. Another control-flow hijack mitigation technique is Code Pointer Integrity (CPI) [4, 20], which guarantees the integrity of all the code pointers in a program. This approach can prevent the accessibility of corrupted pointers, but does not block type casting attacks.
Bad type casting can lead to memory corruption attacks where an attacker can potentially get access to out-of-bounds memory of the cast object. Such attacks can be identified by existing mechanisms. Defense techniques focusing on memory corruption [9, 16, 18, 25, 27, 28, 33] can detect exploits if a type confusion attack leads to memory access past the cast object. These techniques efficiently detect such attacks, but unlike HexType they cannot address type confusion issues. another kind of memory corruption
8 CONCLUSION
Type casting vulnerabilities are a prominent attack vector that allows exploitation of large modern software written in C++. While allowing encapsulation and abstraction, object oriented programming as implemented in C++ does not enforce type safety. C++ offers several types of type casts and some are only checked statically and others not at all, at runtime an object of a different type can therefore incorrectly pass a type cast. To detect these illegal type casts, defenses need to both track the true allocated type of each object and replace all casts with an explicit check. HexType tracks the true type of each object by supporting various allocation patterns, several of which (such as placement new and reinterpret_cast) were not handled in previous work. While previous work focused only static_casts, HexType also covers dynamic_cast and reinterpret_cast. To limit the overhead of these online type checks, HexType both reduces the amount of incurred checks by removing checks that can be proven correct statically and limiting the overhead per check due to a set of optimizations.
Our prototype results show that HexType has at least 1.1 → 6 times higher coverage on Firefox benchmarks. For SPEC CPU2006 benchmarks with overhead, we show a 2 → 33 times reduction in overhead. In addition, HexType discovered 4 new type confusion bugs in Qt and Apache Xerces-C++. The open-source version of HexType is available at https://github.com/HexHive/HexType.
ACKNOWLEDGMENTS
We would like to thank the anonymous reviewers, Nathan Burrow, and Scott Carr for their detailed and constructive comments. This material is based in part upon work supported by the National Science Foundation under awards CNS-1513783 and CNS-1657711, by ONR award N00014-17-1-2513, and by Intel Corporation. Any opinions, findings, and conclusions or recommendations expressed in this material are those of the authors and do not necessarily reflect the views of our sponsors.
REFERENCES
[34] Caroline Tice, Tom Roeder, Peter Collingbourne, Stephen Checkoway, Úlfar Erlingsson, Luiz Louzada, and Geoff Pike. 2014. Enforcing Forward-edge Control-flow Integrity in GCC & LLVM. In USENIX Security. 15. http://dl.acm.org/citation.cfm?id=2671225.2671285
[38] Chao Zhang, Scott A Carr, Tongxin Li, Yu Ding, Chengyu Song, Mathias Payer, and Dawn Song. 2016. VTrust: Regaining Trust on Virtual Calls. In NDSS.
[39] Chao Zhang, Chengyu Song, Kevin Zhijie Chen, Zhaoqiang Chen, and Dawn Song. 2015. VTint: Protecting Virtual Function Tables’ Integrity. In NDSS.
13
A PSEUDO CODE
# (1) Insert typecasting verification instrumentation (Clang)
def emitHexTypeCastVerification():
for targetTypeCast in range(allTypeCastSet):
emitTypeCastVerifyInstrumenation(targetTypeCast):
# (2) Verify typecasting (runtime library)
def verifyTypecasting(src_addr, dst_addr, dstTyHashValue):
# Check parentset (safecast)
# if destination type is one of source object’s parent type,
# this is a SAFECAST.
srcType = getTypeInfoFromObjTypeMap(src_addr):
srcParentSet = srcType.ParentSetRef:
if srcParentSet.binarysearch(dstTyHashValue):
return SAFECAST:
# Check phantom (safecast)
# Although destination type is not of
# source object's parent type,
# it would be phantom class.
dstPhantomSet = getPhantomObjSet(dstTyHashValue):
if dstPhantomSet.binarysearch(srcType.hashValue):
return SAFECAST:
return BADCAST:
Figure 11: Algorithm for verifying typecasting
# (1) Stack object tracing (LLVM pass)
def stackObjTracing():
for function in range(module):
for block in range(function):
for inst in range(block):
if isStackObjAllocaInst(inst):
addObjUpdateInstrumentation(getAllocTypeInfo(inst)):
stackObjTracingSet.insert(getAllocTypeInfo(inst)):
def stackObjRemove(function, stackObjTracingSet):
for targetObjInfo in range(stackObjTracingSet):
addObjRemoveInstrumentation(targetObjInfo):
# (2) Heap object tracing (LLVM pass)
def heapObjTracing():
for function in range(module):
for block in range(function):
for inst in range(block):
if isHeapAllocCall(inst):
addObjUpdateInstrumentation(getAllocTypeInfo(inst)):
if isFreeCall(inst):
addObjRemoveInstrumentation(getAllocTypeInfo(inst)):
# (3) Global object tracing (LLVM pass)
def globalObjTracing():
GlobalFun = FunctionCreate():
for targetObjInfo in range(getAllGlobalsObjInfo()):
addObjUpdateInstrumentation(targetObjInfo):
appendToGlobalCtors(module, GlobalFun):
NEW TYPE CONFUSION BUGS
// New Apache Xerces-C++ type confusion vulnerability
// (Code location) xercesc/dom/imple/DOMCasts.hpp, line 146
// (Description) p is pointing to the object allocated as
// DOMTextImpl, and it is casted into DOMElementImpl.
// Since DOMElementImpl is not a subobject (parent) of
// DOMTextImpl, it is violating C++ standard rules 5.2.9/11
// in [expr.static.cast] (down casting is undefined if
// the object that the pointer to be casted points to is
// not a subobject (parent) of down casting type) and
// causes undefined behaviors.
static inline DOMNodeImpl *castToNodeImpl(const DOMNode *p) {
DOMElementImpl *pE = (DOMElementImpl *) p;
return &(pE->fNode);
}
// HexType type confusion report
== Type confusion Report ==
FileName : xercesc/dom/impl/DOMCasts.hpp Line: 99
[From] (hashValue: 1670590304: DOMTextImpl)
[To] (hashValue: 2789966681: DOMElementImpl)
(Call Stack Info)
0x7f1206da5c92:
(xercesc_3_1::castToNodeImpl(xercesc_3_1::DOMNode const *)+0x42)
............... 0x7f1206da643d:
(xercesc_3_1::DOMNodeImpl::appendChildFast(xercesc_3_1::DOMNode*)+0x2d)
............... 0x7f1203335830:
(__libc_start_main+0xf0)
0x4096e9:
(_start+0x29)
Figure 13: A type confusion bug in Apache Xerces-C++ discovered by HexType
// New QT type confusion vulnerability
// (Code location) qt5.QtCore/qmap.h, line: 189
// (Description) Header(QMapNodeBase) is casted into QMapNode.
// However, since QMapNode is not a subobject of QMapNodeBase,
// it is violating C++ standard rules 5.2.9/11
// and causes undefined behaviors.
template <class Key, class T>
struct QMapData : public QMapDataBase
{
typedef QMapNode<Key, T> Node;
..............
const Node *end() const { return static_cast<const Node *>(&header); }
Node *end() { return static_cast<Node *>(&header); }
..............
}
// HexType type confusion report
== Type confusion Report ==
FileName : /usr/include/x86_64-linux-gnu/qt5/QtCore/qmap.h Line: 189
[From] (hashValue: 980699179: QMapNodeBase)
[To] (hashValue: 1458345177: QMapNode)
(Call Stack Info)
0x62fa7c:
(QMapData<double, QCData>::end()+0x3c)
............... 0x7fc274403830:
(__libc_start_main+0xf0)
0x47ed19:
(_start+0x29)
Figure 14: A type confusion bug in QT discovered by HexType
|
{"Source-Url": "http://hexhive.github.io/publications/files/17CCS.pdf", "len_cl100k_base": 13689, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 52165, "total-output-tokens": 17173, "length": "2e13", "weborganizer": {"__label__adult": 0.0003273487091064453, "__label__art_design": 0.0002980232238769531, "__label__crime_law": 0.0004792213439941406, "__label__education_jobs": 0.0003619194030761719, "__label__entertainment": 5.501508712768555e-05, "__label__fashion_beauty": 0.00012230873107910156, "__label__finance_business": 0.00013887882232666016, "__label__food_dining": 0.00023257732391357425, "__label__games": 0.000629425048828125, "__label__hardware": 0.0007748603820800781, "__label__health": 0.0002834796905517578, "__label__history": 0.00017571449279785156, "__label__home_hobbies": 6.687641143798828e-05, "__label__industrial": 0.0002397298812866211, "__label__literature": 0.00017762184143066406, "__label__politics": 0.00019931793212890625, "__label__religion": 0.00032711029052734375, "__label__science_tech": 0.01047515869140625, "__label__social_life": 6.455183029174805e-05, "__label__software": 0.007411956787109375, "__label__software_dev": 0.9765625, "__label__sports_fitness": 0.00017309188842773438, "__label__transportation": 0.0003058910369873047, "__label__travel": 0.00015091896057128906}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 70929, 0.03809]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 70929, 0.55137]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 70929, 0.84989]], "google_gemma-3-12b-it_contains_pii": [[0, 5006, false], [5006, 10559, null], [10559, 17187, null], [17187, 20270, null], [20270, 26981, null], [26981, 31985, null], [31985, 38303, null], [38303, 42571, null], [42571, 46909, null], [46909, 51399, null], [51399, 55033, null], [55033, 62837, null], [62837, 66523, null], [66523, 68672, null], [68672, 70929, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5006, true], [5006, 10559, null], [10559, 17187, null], [17187, 20270, null], [20270, 26981, null], [26981, 31985, null], [31985, 38303, null], [38303, 42571, null], [42571, 46909, null], [46909, 51399, null], [51399, 55033, null], [55033, 62837, null], [62837, 66523, null], [66523, 68672, null], [68672, 70929, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 70929, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 70929, null]], "pdf_page_numbers": [[0, 5006, 1], [5006, 10559, 2], [10559, 17187, 3], [17187, 20270, 4], [20270, 26981, 5], [26981, 31985, 6], [31985, 38303, 7], [38303, 42571, 8], [42571, 46909, 9], [46909, 51399, 10], [51399, 55033, 11], [55033, 62837, 12], [62837, 66523, 13], [66523, 68672, 14], [68672, 70929, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 70929, 0.01895]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
d2298362e538129bd13f5098169340dfcf2258e2
|
GUARDS: A GENERIC UPGRADEABLE ARCHITECTURE FOR REAL-TIME DEPENDABLE SYSTEMS
D. Powell\(^{(1)}\), J. Arlat\(^{(1)}\), L. Beus-Dukic\(^{(2)}\), A. Bondavalli\(^{(3)}\), P. Coppola\(^{(4)}\), A. Fantechi\(^{(5)}\), E. Jenn\(^{(6)}\), C. Rabéjac\(^{(7)}\), A. Wellings\(^{(2)}\)
Abstract. The development and validation of fault-tolerant computers for critical real-time applications are currently both costly and time-consuming. Often, the underlying technology is out-of-date by the time the computers are ready for deployment. Obsolescence can become a chronic problem when the systems in which they are embedded have lifetimes of several decades. This paper gives an overview of the work carried out in a project that is tackling the issues of cost and rapid obsolescence by defining a generic fault-tolerant computer architecture based essentially on commercial off-the-shelf (COTS) components (both processor hardware boards and real-time operating systems). The architecture uses a limited number of specific, but generic, hardware and software components to implement an architecture that can be configured along three dimensions: redundant channels, redundant lanes and integrity levels. The two dimensions of physical redundancy allow the definition of a wide variety of instances with different fault-tolerance strategies. The integrity level dimension allows application components of different levels of criticality to co-exist in the same instance. The paper describes the main concepts of the architecture, the supporting environments for development and validation, and the prototypes currently being implemented.
Keywords: computer architecture, generic architecture, embedded systems, fault tolerance, real-time, integrity levels
1. Introduction
Most ultra-dependable real-time computing architectures developed in the past have been specialized to meet the particular requirements of the application domain for which they were targeted. This specialization has led to very costly, inflexible, and often hardware-intensive solutions that, by the time they are developed, validated and certified for use in the field, can already be out-of-date in terms of their underlying hardware and software technology. This problem is exacerbated in some application domains since the systems in which the real-time architecture is embedded may be deployed for several decades, i.e., almost an order of magnitude longer than the typical lifetime of a generation of computing technology.
A consortium of European companies and academic partners has been formed to design and develop a Generic Upgradable Architecture for Real-time Dependable Systems (GUARDS), together with an associated development and validation environment. The end-user companies in the consortium all currently deploy ultra-dependable real-time embedded computers in their systems, but with very different requirements and constraints resulting from the diversity of their application domains: nuclear submarine, railway and space systems. The overall aim of the GUARDS project is to significantly decrease the lifecycle costs of such embedded systems. The intent is to be able to configure instances of a generic architecture that can be shown to meet the very diverse requirements of these (and other) critical real-time application domains. A three-pronged approach is being followed to reduce the cost of validation and certification of instances of the architecture: a) design for validation, so as to focus validation obligations on a minimum set of critical components; b) re-use of already-validated components in different instances; and c) the support of software components of different criticalities.
The paper is structured as follows. Section 2 sketches the rationale for the design of the generic architecture, which is then summarized in Section 3. Central to the architecture is an inter-channel communication network, which is described in Section 4. Section 5 details the inter-channel fault-tolerance mechanisms while Section 6 discusses the scheduling issues raised by active replication of real-time tasks. Sections 7 and 8 discuss respectively the development and validation environments that accompany the architecture. Section 9 describes the prototypes currently being implemented. Finally, Section 10 concludes the paper.
2. Design Rationale
To merit the epithet “generic”, the architecture must be able to meet the widest possible spectrum of dependability and real-time requirements. To this end, we first consider some key non-functional requirements of typical applications in each of the three end-user domains. We then discuss the issues of fault classes and real-time scheduling.
2.1. Key Non-Functional Requirements
A typical instance of the architecture in the railway domain would be a fail-safe control system. Standards in this domain dictate extremely low catastrophic failure rates for individual subsystems (e.g., less than $10^{-11}$/hour with respect to physical faults). In railway applications, it is common to physically segregate subsystems responsible for vital (safety-critical) functions from non-vital functions. We decided to investigate the possibility of a single instance supporting both high-integrity vital functions and low-integrity non-vital functions.
In the nuclear submarine domain, an instance of the architecture would typically be used to support secondary protection functions, which are required to be ready to react in case of (rare) incidents. Two requirements from this application domain impose quite severe restrictions on the design space. First, it must be possible to separate redundant elements of the architecture by several meters so as to tolerate physical damage. Second, to avoid obsolescence during the submarine’s lifetime, the use of unmodified commercial off-the-shelf operating system(s) is mandatory.
A particularly challenging application in the space domain is that of an autonomous spacecraft carrying out missions containing phases that are so critical that tolerance of several faults may be required (e.g., target fly-by or docking). During non-critical phases, the redundant elements may be powered down to save energy. Moreover, it is necessary for an instance to be able to support software of different integrity levels: high-integrity critical software that is essential for long-term mission reliability and potentially unreliable payload software.
2.2. Fault Classes
The architecture aims to tolerate permanent and temporary physical faults (of both internal and external origins) and should provide tolerance or confinement of software design faults. This wide spectrum of fault classes [42] has several consequences beyond the basic physical redundancy necessary to tolerate permanent internal physical faults. Tolerance of permanent external physical faults (e.g., physical damage) requires geographical separation of redundancy. Temporary external physical faults (transients) can lead to rapid redundancy attrition unless their effects can be undone. This means that it must be possible to recover corrupted processors. Temporary internal physical faults (intermittents) are treated as either permanent or transient faults according to their rate of recurrence.
Many design faults can also be tolerated like intermittents if their activation conditions are sufficiently diversified [29] (e.g., through loosely-coupled replicated computations). However, design faults that are activated systematically for a given sequence of application inputs can only be tolerated through diversification of design or specification. Due to limited resources, the project has not considered diversification of application software beyond imposing the requirement that no design decision should preclude that option in the future. However, we have studied the use of integrity level and control-flow monitoring mechanisms to ensure that design faults in non-critical
application software do not affect critical applications. Moreover, we have considered diversification for tolerating design faults in off-the-shelf operating systems. We also encourage activation condition diversification to provide some tolerance of design faults in replicated hardware and replicated applications.
2.3. Real-Time Models
In keeping with the genericity objective, the architecture must be capable of supporting a range of real-time computational and scheduling models.
The computational model defines the form of concurrency (e.g., tasks, threads, asynchronous communication, etc.) and any restriction that must be placed on application programs to facilitate their timing analysis (e.g., bounded recursion). Applications supported by GUARDS may conform to a time-triggered, event-triggered or mixed computational model.
Three scheduling models are considered [69]:
- Cyclic — as typified by the traditional cyclic executive.
- Cooperative — where an application-defined scheduler and the prioritized application tasks explicitly pass control between one another to perform the required dispatching.
- Pre-emptive — the standard pre-emptive priority scheme.
We have focused primarily on the pre-emptive scheduling model since this is the most flexible and the one that presents the greatest challenges.
3. The Generic Architecture
The diversity of end-user requirements and fault-tolerance strategies led us to define a generic architecture that can be configured into a wide variety of instances. The architecture favors the use of commercial off-the-shelf (COTS) hardware and software components, with application-transparent fault-tolerance implemented primarily by software. Drawing on experience from systems such as SIFT [47], MAFT [37], FTPP [30] and Delta-4 [52], the generic architecture is defined along three dimensions of fault containment (Figure 1) [53]:
- Integrity levels, or design-fault containment regions.
- Lanes, or secondary physical-fault containment regions.
- Channels, or primary physical-fault containment regions.
A particular instance of the architecture is defined by the dimensional parameters \( \{ C, M, I \} \), a reconfiguration strategy, and an appropriate selection of generic hardware and software GUARDS components. These generic components implement mechanisms for:
- Inter-channel communication.
- Output data consolidation.
- Fault-tolerance and integrity management.
Fault-tolerance and integrity management are software-implemented through a distributed set of generic system components (shown as a “middleware” layer on Figure 1). This layer is itself fault-tolerant (through replication and distribution of its components) with respect to faults that affect channels independently (e.g., physical faults). However, the tolerance of design faults in this system layer is not explicitly addressed.
### 3.1. The Integrity Dimension
The integrity dimension aims to provide containment regions with respect to software design faults. The intent is to protect critical components from the propagation of errors due to residual
---
1 Note, however, that correlated faults are included in the models used to assess the dependability of instances of the architecture: see Section 8.2.
design faults in less-critical components. Each application object is classified within a particular integrity level according to how much it can be trusted (the more trustworthy an object is, the higher its integrity level). The degree to which an object can be trusted depends on the evidence that is available supporting its correctness, and the consequences of its failure (i.e., its criticality).
The required protection is achieved by enforcing an integrity policy to mediate the communication between objects of different levels. Basically, the integrity policy seeks to prohibit flows of information from low to high integrity levels, like in the Biba policy [15]. However, this approach is inflexible. An object can obtain data of higher integrity than itself, but the data must then inherit the level of integrity of this object. This results in a decrease in the integrity of the data, without any possibility of restoring it. We deal with this drawback by providing special objects (Validation Objects) whose role is to apply fault tolerance mechanisms on information flows. The purpose of these objects is to output reliable information by using possibly corrupted data as input (i.e., with a low integrity level). Such objects upgrade the trustworthiness of data and hence allow information flows from low to high integrity levels [67].
It must be ensured that it is not possible to by-pass the controls put into place to enforce the policy. This is achieved by spatial and temporal isolation, which are provided respectively by memory management hardware and resource utilization budget timers [66]. Furthermore, for the most critical components (the topmost integrity level) and a core set of basic components (i.e., the integrity management components and the underlying hardware and operating systems), it must be assumed either that there are no design faults, or that they can be tolerated by some other means (e.g., through diversification).
In this paper, we do not detail the integrity dimension any further — the interested reader should refer to references [66, 67]
3.2. The Lane Dimension
Multiple processors or lanes are used essentially to define secondary physical fault containment regions. Such secondary regions can be used to improve the capabilities for fault diagnosis within a channel, e.g., by comparison of computation replicated on several nodes. There is also scope for improving coverage with respect to design faults by using intra-channel diversification.
Alternatively, lanes can be used to improve the availability of a channel, e.g., by passivating a node that is diagnosed to be permanently faulty. The required fault diagnosis could be triggered either by the error-processing mechanisms within a channel or through an error signal from the inter-channel voting mechanisms.
Further reasons for defining an instance with multiple lanes include parallel processing to improve performance and isolation of software of different integrity levels. To aid the timing
analysis of such software we require that the multiple processors within a channel have access to shared memory (see Section 6).
3.3. The Channel Dimension
Channels provide the primary fault containment regions that are the ultimate line of defense within a single instance for physical faults that affect a single channel. Fault tolerance is based on active replication of application tasks over the set of channels. It must be ensured that replicas are supplied with the same inputs in the same order, despite the occurrence of faults. Then, as long as replicas on fault-free channels behave deterministically, they should produce the same outputs. Error processing can thus be based on comparison or voting of replica outputs.
Not all instances require the same number of channels. In fact, one could imagine an instance with just one channel. This would be the case for an application that only requires multiple integrity levels, or for which the fault-tolerance mechanisms implemented within a channel are judged to be sufficient. It should be expected, however, that most applications require instances with several channels. Important cases are:
- Two channels: motivated either by a requirement for improved safety (using inter-channel comparison) or improved reliability (based on intra-channel self-checking to provide crash failure semantics).
- Three channels: the well-known triple modular redundancy (TMR) strategy that enables most $^2$ faults in one channel to be masked. In addition any disagreements are detected and used as inputs for error diagnosis and fault treatment.
- Four channels: to enable masking of completely arbitrary faults or to allow a channel to be isolated for off-line testing while still guaranteeing TMR operation with the remaining on-line channels.
Instances of the architecture with more than four channels are not currently envisaged.
4. Inter-Channel Communication Network
Central to the architecture is an inter-channel communication network (ICN), which fulfills two essential functions:
- It provides a global clock to all channels.
- It allows channels to achieve interactive consistency (consensus) on non-replicated data.
The ICN consists of an ICN-manager for each channel and unidirectional serial links to interconnect the ICN-managers. In the current implementation, the ICN-manager is a Motorola
\[\text{\footnotesize \textsuperscript{2} The exception is that of Byzantine clock behavior (see Section 4.1).}\]
68040-based board with a dual-port shared memory for asynchronous communication with the intra-channel VME back-plane bus. Serial links are provided by two Motorola 68360-based piggy-back boards\(^3\). Each such board provides two Ethernet links. One link is configured as transmit only, the other links are configured as receive only. An ICN-manager can thus simultaneously broadcast data to the remote ICN-managers over its outgoing serial link and receive data from the remote ICN-managers over the other links.
### 4.1. Clock Synchronization
The ICN-managers constitute a set of fully interconnected nodes. Each node has a physical clock and computes a global logical clock time through a fault-tolerant synchronization algorithm. Such an algorithm is classically defined as one that satisfies both the agreement and accuracy properties:
- The *agreement* condition is satisfied if and only if the skew between any non-faulty logical clocks is bounded.
- The *accuracy* condition is satisfied if and only if all non-faulty logical clocks have a bounded drift with respect to real time.
Since COTS-based solutions are preferred within GUARDS, we focused on software-implemented algorithms. In particular, we considered both convergence averaging and convergence non-averaging algorithms [57].
In a convergence averaging algorithm, each node resynchronizes according to clock values obtained through periodic one-round clock exchanges. On each node, the other clocks can be taken into account through a mean-like function [40], or a median-like function [46]. The worst-case skew of these algorithms is dominated by the uncertainty on transmission delay. They can tolerate \( f \) arbitrarily faulty nodes in a (fully connected) network of \( n \) nodes, under the sufficient condition that \( n > 3f \).
In a convergence non-averaging algorithm, each node periodically seeks to be the system synchronizer. To deal with possible Byzantine behavior, the exchanged messages can be authenticated [63]. The worst-case skew of these algorithms is dominated by the maximum message transit delay. When authentication is used for inter-node message exchanges, they can tolerate \( f \) arbitrarily faulty nodes with only \( n > 2f \) nodes.
The GUARDS architecture uses a convergence-averaging solution based on [46] and applied to up to four nodes (i.e., ICN-managers in our architecture). This choice was motivated mainly by reasons of performance and design simplicity. It implies that the probability of occurrence of a Byzantine clock must be carefully evaluated in a three-channel configuration. This probability is
---
\(^3\) For a two-channel instance, only one ICN piggy-back board is necessary.
expected to be very small, since the ICN serial links are broadcast media and the ICN-managers can check whether that they receive a syntactically correct synchronization message in a well-defined local time window.
The global clock maintained by the set of ICN-managers is broadcasted, via the intra-channel back-plane busses, to the processors and I/O boards local to a channel.
4.2. Interactive Consistency
The issue of exchanging private data between channels and agreeing on a common value in the presence of arbitrary faults is known as the interactive consistency problem (the symmetric form of the Byzantine agreement problem) [50]. The two fundamental properties that a communication algorithm must fulfill to ensure interactive consistency are:
- **Agreement**: if channels $p$ and $q$ are non-faulty, then they agree on the value ascribed to any other channel.
- **Validity**: if channels $p$ and $q$ are non-faulty, then the value ascribed to $p$ by $q$ is indeed $p$’s private value.
In the general case, the necessary conditions to achieve interactive consistency in spite of up to $f$ arbitrarily faulty channels are [39]:
- At least $3f+1$ channels.
- At least $2f+1$ disjoint inter-channel communication links.
- At least $f+1$ rounds of message exchange.
- Bounded skew between non-faulty channels.
Under the assumption of authenticated messages, which can be copied and forwarded but not undetectably altered by a relayer, the condition on the minimal number of channels can be relaxed to $f+2$. Nevertheless, at least $2f+1$ channels are still necessary if majority voting must be carried out between replicated application tasks.
The interactive consistency protocol used in GUARDS is based on the ZA algorithm [28], which was derived from the Z algorithm [64] by adding the assumption of authentication. In particular, authentication precludes the design fault in the Z algorithm identified in [45]. Following the hybrid fault model described in [45], the protocol allows for both arbitrarily faulty channels and channels affected by less severe kinds of faults (e.g., omission faults).
For performance reasons, and since by assumption the architecture only needs to tolerate accidental faults and not malicious attacks, we preferred to use a keyed checksum scheme for message authentication rather than resorting to true cryptographic signatures. Under this scheme, multiple checksums are appended to each (broadcasted) message. Each checksum is computed over
the concatenation of the data part of the message and a private key that is known only to the sender and to one of the broadcast destinations.
4.3. Scheduling
The ICN is scheduled according to a table-driven protocol. The schedule consists of a frame (corresponding to a given application mode) that is subdivided into cycles and slots. The last slot of a cycle is used for clock synchronization so the length of a cycle is fixed either by the required channel synchronization accuracy or by the maximum I/O frequency in a given mode. The other slots of a cycle are of fixed duration and can support one fixed-sized message transmission (and up to three message receptions). In the current implementation, each message may contain 1000 bytes.
5. Inter-Channel Error Processing and Fault Treatment
From a conceptual viewpoint, it is common to consider fault tolerance as being achieved by error processing and fault treatment [2, 41]:
- **Error processing** is aimed at removing errors from the computation state, if possible before failure occurrence. In general, error processing involves three primitives: error detection, error diagnosis and error recovery.
- **Fault treatment** is aimed at preventing faults from being activated again and also involves three primitives: fault diagnosis, fault passivation and reconfiguration.
In GUARDS, error recovery is achieved primarily by error compensation, whereby the erroneous state contains enough redundancy to enable its transformation into an error-free state. This redundancy is provided by active replication of critical applications (although diversification is not precluded) over the $C$ channels; it is application-transparent and managed by software, including comparison or voting of computed results. Error processing thus relies primarily on $N$-modular redundancy to detect disagreeing channels and (when $C \geq 3$) to mask errors occurring in the voted results at run-time. When $C=2$, two possibilities are offered, as already mentioned in Section 3.3:
- Error detection (locally by a channel) and compensation (by switching to a single channel configuration).
- Error detection (by channel comparison) and switching to a safe state (a degenerate form of forward recovery).
Figure 2 illustrates the replicated execution of an iterative task in the case of a three-channel configuration. After reading the replicated sensors, the input values are consolidated across all channels after a two-round interactive consistency exchange over the ICN. The application tasks are then executed asynchronously, with pre-emptive priority scheduling allowing different interleavings of their executions on each channel. This diversifies the activities of the different
channels, thereby allowing many residual design faults to be tolerated as if they were intermittents (cf. Section 2.2).

Figure 2 — TMR execution of an application function split in sequential threads
Application state variables (which contain values that are carried over between iterations) are used together with consolidated inputs to compute the output values, which are exchanged in a single round over the ICN and voted. The voted results are then written to the actuators, possibly via output consolidation hardware, which allows the physical values to be voted.
Since neither the internal state variables of the underlying COTS operating systems nor the totality of the application state variables are voted, further error recovery is necessary to correct any such state that becomes erroneous (note that this may be case even in the event of a transient fault). However, this is a secondary, non-urgent error recovery activity since, until another channel is affected by a fault, the error compensation provided by output voting or switching can be relied upon to ensure that correct outputs are delivered to the controlled process. Consequently, this secondary error recovery can be viewed as part of fault treatment.
In the next section, we describe the GUARDS diagnosis mechanisms, which include both error diagnosis, to decide whether the damage to a channel’s state warrants further action, and fault diagnosis, to decide the location and type of the fault and thus the necessary corrective action.
Then, in Section 5.2, we describe the state recovery procedure that allows reintegration of a channel after a transient fault or repair of a permanent fault. Finally, Section 5.3 discusses mechanisms for output consolidation.
### 5.1. Diagnosis
The first step in diagnosis is to collect error reports generated during the interactive consistency and consolidation exchanges (majority voting discrepancies, timing errors, ICN bus transmission errors, protocol violations, etc.) and then to filter them to assess whether the extent of damage warrants further action. Indeed, some reported errors may not have resulted in any change to the state of a channel. Alternatively, if only a small part of the state has become erroneous, then an erroneous channel might correct itself autonomously by overwriting the erroneous variables during
continued execution. If such fortuitous recovery does not occur, an explicit forward recovery action is necessary to reconstruct a correct state.
The filtering of errors is done using a software-implemented mechanism known as an α-count, which was originally proposed for the discrimination of transient versus intermittent-permanent faults [17]. Error reports are processed on a periodic basis, giving lower weights to error reports as they get older. A score variable $\alpha_x$ (initially set to 0) is associated to each component $x$ to record information about the errors attributed to that component. The $L$-th judgment is accounted for as follows:
$$\alpha_x(L) = \alpha_x(L-1) + 1 \quad \text{if component } x \text{ is perceived as faulty}$$
$$\alpha_x(L) = k \cdot \alpha_x(L-1) \quad \text{if component } x \text{ is perceived as correct (with } 0 < k < 1)$$
When $\alpha_x(L)$ becomes greater than a given threshold $\alpha_T$, the damage to the state of component $x$ is judged to be such that further diagnosis is necessary.
The appropriate filtering action can be provided by several different heuristics for the accumulation and decay processes (where $\alpha_x(L)$ takes slightly different expressions) [17, 55, 56]. For a given error distribution, the parameters of the heuristics can be determined through a dependability evaluation (for example, see [17]).
A distributed version of α-count is used in GUARDS to provide the error syndrome that is input to inter-channel fault diagnosis. Each channel $i$ maintains $C$ α-count variables, one, $\alpha_{ii}$, representing its opinion of its own health and $C-1$ variables, $\alpha_{ij}$, $j \neq i$, representing its opinions of the health of the other channels. The α-counts are updated and processed cyclically. Each cycle $N_\alpha$, called an α-cycle, has a duration chosen such that $N_\alpha \cdot n_1 = N_{frame}$ where $n_1$ is an integer and $N_{frame}$ the duration of the ICN frame (see Section 4.3).
Since each channel may have a different perception of the errors created by other channels, the α-counts maintained by each channel must be viewed as single-source (private) values. They are consolidated at the end of each α-cycle through an interactive consistency protocol so that fault-free channels have a consistent view of the status of the instance (a consistent matrix $A$ of α-count values). During the next α-cycle, fault diagnosis can thus be performed using $A$. The resulting diagnosis consists of a vector $D$ whose elements $D_i$ represent the diagnosed state of each channel (correct or requiring passivation and isolation).
The fault diagnosis problem has been extensively studied in the literature. An ideal diagnosis should be both correct and complete:
- A diagnosis is correct if any channel that is diagnosed as faulty is indeed faulty.
- A diagnosis is complete if all faulty channels are diagnosed as faulty.
In the current case, the inter-channel tests have imperfect coverage so a channel requiring passivation is not necessarily accused by all correct channels [16, 43]. The algorithm in the current
implementation diagnoses a channel as faulty if it is accused of being faulty by a majority of channels or, of course, if it accuses itself. This algorithm is correct and complete under the assumption that not more than one channel at a time is accused by a fault-free channel. However, due to the memory effect of the $\alpha$-count mechanism, this assumption can be violated if near-coincident faults occur on different channels. In this situation, there is thus a trade-off between the probability of incorrect diagnosis caused by a long memory effect (high value of $k$) and the probability of having an incorrect majority vote due to slow elimination of a faulty channel (low value of $k$). This trade-off is the subject of ongoing research.
Once a channel has been diagnosed as requiring passivation, it is isolated (i.e., disconnected from the outside world) and reset (with the re-initialization of operating system structures). A thorough self-test is then carried out. If the test reveals a permanent fault, the channel is switched off and (possibly) undergoes repair. Whenever a channel passes the test (i.e., the fault was transient), or after having repaired a channel having suffered a permanent fault, it must be reintegrated to avoid unnecessary redundancy attrition.
It should be noted that the error filtering action of the $\alpha$-count can effectively be turned off by setting its threshold $\alpha_T=1$. In this case, any transient fault leading to a self-detected error (or to errors perceived by a majority of channels) will cause that channel to go through the possibly lengthy self-test and reintegration procedure, irrespectively of the extent of the actual damage to the channel’s state. A fault affecting another channel before reintegration of the former will induce a further decrease in the number of active channels. When transients are common, this policy can thus cause rapid switching to a safe state if the number of active channels becomes insufficient for error compensation to remain effective. The choice of whether filtering is used or, more generally, the value of the $\alpha$-count threshold, thus leads to a classic trade-off between safety and reliability.
### 5.2. State Restoration
For a channel to be reintegrated, it must first resynchronize its clock, then its state, with the pool of active channels. Since not all state variables are necessarily consolidated through ICN exchanges, the state (or channel context) cannot be retrieved by simply observing the traffic on the ICN, but must be explicitly copied from the active channels. This is achieved by a system state restoration (SR) procedure, called Running SR, applied to the channel context, i.e., the set of application state variables whose values are carried over successive iterations without consolidation.
A minimum level of service must be ensured, even during the SR procedure, so a limited number of vital application tasks must be allowed to continue execution on the active channels. Running SR is therefore a multi-step algorithm where, at each step, only a fraction of the state is exchanged. Furthermore, vital application tasks may update state variables while SR progresses.
The basic behavior of Running SR is the following (more details with variations and optimizations are given in [18, 19]). The channel context is arranged in a single (logical) memory block managed by a “context object”. When the state of channel needs to be restored, the system enters an “SR mode”. The \( C - 1 \) active channels enter a “put state” sub-mode while the joining channel enters a “get state” sub-mode.
To take advantage of the parallel links of the ICN, the whole block of memory storing the channel context is split into \( C - 1 \) sub-blocks of similar size, each managed by one of the active channels. Each active channel \( i \) propagates to the joining channel any updates to state variables belonging to block \( i \). A Sweeper task is executed to transfer the \( i \)-th block of the context. In the joining channel, transferred data are received and processed by a Catcher task. This task has most of the CPU time available since no application tasks are executed on that channel.
Switching from normal computation to the SR mode occurs at the beginning of an ICN frame, with a corresponding change in task scheduling, and SR completion always occurs at the end of a frame. After completion, signatures of the entire channel state are taken in each channel and exchanged through the interactive consistency protocol. State restoration is considered successful if all signatures match. Normal application scheduling is then re-activated on the next frame.
Since a deterministic, finite time is required to copy the memory block, and any updates to already copied state variables are immediately propagated, the whole (parallel) state restoration is performed in a deterministic, finite time. The state restoration tasks are assigned a priority and a deadline, and for schedulability analysis are treated the same as vital application tasks. Note that, during SR, the ICN has to support: a) the normal traffic generated by the vital (i.e., non-stoppable) applications, b) the extra traffic due to state variable updates, and c) the traffic generated by the Sweeper task. SR will therefore normally require a mode change to suspend non-vital application tasks so as to release processor time and ICN slots for SR execution and communication.
5.3. Output Data Consolidation
The purpose of the output data consolidation system (cf. Figure 1) is to map the replicated logical outputs of each channel onto the actual physical outputs to the controlled process, in such a way that the latter are either error-free or in a safe position. Such consolidation, placed at the physical interface with the controlled process, is the ultimate error confinement barrier, and is a complement to any software-implemented voting of the logical outputs.
A given instance of the architecture could have several different output consolidation mechanisms according to its various interfaces with the controlled process. Ideally, an output data consolidation mechanism should extend into the controlled process itself, to prevent the physical interface to the process from becoming a single point of failure. A typical example would be a control surface (e.g., in a fly-by-wire application) that can act as a physical voter by summing the
forces produced by redundant actuators. Alternatively, a single channel can be designated to control a given actuator. Failures of that actuator can be detected at the application level by means of additional sensors allowing each channel to read back the controlled process variable and check it against the requested output. Recovery can then be achieved by switching to an alternative actuator. Other process-specific output data consolidation mechanisms used in the GUARDS end-user application domains include combinatorial logic implemented by relay or fluid valve networks, and the “arm-and-fire” technique commonly used to trigger space vehicle pyrotechnics (one channel sends an "arm" command, which is checked by the other channels, then all channels send matching "fire" commands; the pyrotechnics are triggered only if a majority of the latter concur with the former).
Output consolidation mechanisms such as these may be used for various end-user instances of the architecture. By definition, such process-specific techniques cannot be generic so no specific research has been carried out in this direction. However, the project has considered generic output consolidation mechanisms for networked and discrete outputs. A prototype consolidation mechanism is being implemented for discrete digital or analog outputs that can be electrically isolated from each other and then connected through a wired-OR to the output devices. Consolidation is achieved by having each channel read back its own output and those of the other channel(s) so that a vote can be carried out in software. Each channel then sends selection signals to a hard-wired voter (one per channel) that allows or disconnects that channel’s outputs. This approach relies on the assumption that the output devices can tolerate the short but inevitable output glitch caused by the read-back, vote and disconnection delay.
6. Real-Time Scheduling
The architecture is capable of supporting a range of scheduling models (cf. Section 2.3). In this section, we focus on the standard pre-emptive priority-based scheme. We also discuss the consequences on scheduling of the ICN network.
Our timing analysis is based upon the Response-time Analysis [7, 44]. We assume that any communication between applications is asynchronous through the shared memory. The use of round-robin scheduling on the intra-channel VME bus allows all shared memory accesses to be bounded. This is adequate because it is assumed that the number of hosts within a channel is small. Furthermore, we assume the use of a non-blocking algorithm such as that proposed in [62] to avoid the problems associated with remote blocking.
6.1. Inter-Channel Replication of Applications
For an application task to be replicated, it must behave deterministically and each replica task must process the same inputs in the same order. At any point where there is potential for replica divergence, the channels must perform an interactive consistency agreement. Unfortunately, the
cost of executing interactive consistency agreement protocols can be significant. There is therefore a need to keep their use to a minimum.
In our approach, we force all replicated tasks to read the same internal data. We can thus trade-off fewer agreement communications (and therefore greater efficiency) against early detection of errors. If we assume that each replica does not contain any inherently non-deterministic code, replica determinism and error masking (or detection) can be ensured by:
- Performing interactive consistency agreement or Byzantine agreement on single-sourced data.
- Ensuring that all replicas receive the same inputs when those inputs are obtained from other replica tasks (replicated inputs).
- Voting on any vital output.
6.1.1. Agreement on Sensor Inputs
To reduce the complexity of the input selection algorithm, which processes the vector of redundant values consolidated through the interactive consistency exchange, it is important to minimize the error between the redundant input values. However, since the tasks are independently scheduled on each channel, they could read their corresponding sensors at significantly different times. This is similar to the input jitter problem where a task ($\tau$) implementing a control law has to read its input on a regular basis. If jitter is a problem, the solution is to split the task into two tasks ($\tau^p, \tau'$). $\tau^p$ has a release time$^4$ and a deadline appropriate for the dynamics (and the allowable jitter) of the physical quantity being measured by the sensor. Task $\tau'$ has the original $\tau$'s deadline and is executed at an offset from the release time of $\tau^p$. We will discuss what value this offset should have in Section 6.2.
6.1.2. Identical Internal Replicated Input
Two cases need to be considered when reader and writer tasks share the same data, according to whether or not there is an explicit precedence constraint between the writer and the reader. When there is such a constraint, then it can be captured by the scheduling. When tasks share data asynchronously (and therefore there is no explicit precedence constraint between the writer and the reader), there are four types of interaction:
- Periodic writer — Periodic reader: the periods of the two tasks do not have a simple relationship.
- Periodic writer — Sporadic reader: there is no relationship between the period of the writer and the release of the reader.
$^4$ We assume that all I/O is periodic in nature.
• Sporadic writer — Sporadic reader: there is no relationship between the release of the writer and the release of the reader.
• Sporadic writer — Periodic reader: there is no relationship between the release of the writer and the period of the reader.
In all of these cases, to ensure each replica reads the same value, we keep more than one copy of the data (usually two is enough) and use timestamps [9, 51]. The essence of this approach is to use off-line schedulability analysis [7] to calculate the worst-case response times of each replicated writer. The maximum of these values is added to the release time of the replicas (taking into account any release jitter) to give a time by which all replicas must have written the data (in the worst case). To allow for clock drift between replicas, the maximum skew, $\varepsilon$, is also added. This value is used as a timestamp when the data is written.
A reader replica simply compares its release time with the data timestamp. If the timestamp is earlier, then the reader can take the data. If the timestamp is later than its release time, then the reader knows that its replicated writer has potentially executed before the other replicated writers. It must therefore take a previous value of the data (the most recent) whose timestamp is earlier than its release time. All reader replicas undertake the same algorithm and consequently get the same value.
6.1.3. Output Voting
Where output voting is required, it is again necessary to transform the replicated task writing to the actuators into two tasks ($\tau'$ and $\tau^{op}$): $\tau'$ sends the output value across the ICN for voting, and $\tau^{op}$ reads the majority vote and sends this to the actuator. The deadline of $\tau'$ will determine the earliest point when the ICN manager can perform the voting. The offset and deadline of $\tau^{op}$ will determine when the voted result must be available and the amount of potential output jitter. Hence, the two tasks have similar timing characteristics to the tasks used for input agreement (cf. Section 6.1.1). The main difference is that there is a simple majority vote rather than an agreement protocol involving three separate values.
6.2. Handling Offsets
A real-time periodic transaction model has been developed in which periodic transaction $i$ consists of three tasks $\tau^1_i$, $\tau^2_i$ and $\tau^3_i$. Task $\tau^1_i$ reads a sensor and sends the value to the ICN manager. Task $\tau^2_i$ reads back from the ICN manager the set of values received from all the replicas; it consolidates the values and processes the consolidated reading and eventually produces some data. It sends this data for output result consolidation to the ICN manager. Task $\tau^3_i$ reads the consolidated result from the ICN manager and sends it to the actuator.
This form of real-time transaction is implemented by timing offsets. Analysis of task sets with offsets is N-P complete [44] and even sub-optimal solutions are complex [6, 8, 65]. The approach we take is based on [10], modified to take into account the fact that the computational
times of $\tau_i^1$ and $\tau_i^3$ (respectively $C_i^1$ and $C_i^3$) are much smaller than $C_i^2$, the computational time of $\tau_i^2$, i.e., $C_i^2 >> \max(C_i^1, C_i^3)$.
Once offsets has been assigned, a check must be made to ensure that: (a) the response times of the individual tasks are less than the offsets of the next task in the transaction, (b) there is enough time before the offset and after the response to transmit data on the ICN network, and (c) that the deadline of the transaction has been met. If any of these conditions is violated, then it may be possible to modify the offsets of the transaction violating the condition in an attempt to satisfy all the requirements [10].
6.3. Scheduling the ICN Network
Following the individual schedulability analysis of each channel, the following characteristics are known for each task participating in replicated transactions:
- Period
- Response-time
- Offset
- Deadline
The ICN tables can be built from this information — in the same way as cyclic executive schedules can be constructed [22]. Since all communication through the channels’ shared memory is asynchronous, the ICN manager can take the data any time after the producing task’s deadline has expired.
Of course, there is a close relationship between the scheduling of the channels and the scheduling of the ICN network. If the off-line tool fails to find an ICN schedule, it is necessary to re-visit the design of the application.
7. Architecture Development Environment
The generic architecture is supported by an Architecture Development Environment [49] consisting of a set of tools for designing instances of the architecture according to a coherent and rigorous design method. The toolset allows collection of the performance attributes of the underlying execution environment and the analysis of the schedulability of hard real-time threads, not only within each processing element of the system, but also among them. This allows in particular a rigorous definition of critical communication and synchronization among the redundant computers.
7.1. Design Method
The design and development of a GUARDS software application are centered on a hard real-time (HRT) design method, which allows real-time requirements to be taken into account and
verified during the design. The method also addresses the problem of designing replicated, fault-tolerant architectures, where a number of computing and communication boards interact for the consolidation of input values and output results.
The design of a GUARDS application is defined as a sequence of correlated activities, that may be re-iterated to produce a software design that complies with both the functional and non-functional requirements of the application. Three design activities are identified:
- **Functional** architecture design, where the software application is defined through an appropriate design method and according to its functional requirements and its performance requirements (task periods, deadlines, etc.).
- **Infrastructure** architecture design, where the required hardware boards and generic GUARDS software components are identified. They constitute the underlying computing environment of the application software.
- **Physical** architecture design, where the functional architecture is mapped onto the infrastructure and analyzed according to the performance requirements. This is done not only for the processors within each replicated channel, but also at the inter-channel level, to determine the ICN exchanges needed to consolidate input values and output results.
### 7.2. Inter-Channel Schedulability
According to the dependability requirements, each critical application task replica needs to consolidate its inputs and its output results with those of the corresponding replicas on the other channels (Figure 3). Each application task $\tau_i$ is structured as a real-time transaction, consisting of three sub-tasks, or threads, $\tau_{i1}$, $\tau_{i2}$ and $\tau_{i3}$ responsible for input acquisition, result calculation and output actuation (cf. Section 6.2).

For each application task $\tau_i$, a deadline is set that defines the time by which the final value must be sent to the actuator(s) (corresponding to the third thread of the transaction). Intermediate deadlines $D_{\tau_1}$ and $D_{\tau_2}$ are also introduced for the first and second threads. They define the time by which the input or output results are (or must be) ready for transfer through the ICN (after a fixed intra-channel transfer time) and consolidated. The transfer and consolidation of each value over the ICN must take place at pre-defined transfer slots (to synchronize such activities on each channel) and the needed duration determines an offset for the activation of the following thread (Figure 4).
Virtual Nodes, similar to that in the HOOD 3.1 method [31]. The extended method can take into account the lane dimension of GUARDS (by allocating objects to different processors within a channel) and the integrity dimension (by defining spatial firewalls around objects of a given criticality). The HRT-HoodNICE toolset has been accordingly enhanced.
The infrastructure architecture design is supported by a specific toolset that manages an archive of hardware and software components. Such components are described by their relations, compatibilities and performance attributes. The tool selects the needed components according to the characteristics of the required GUARDS instance.
As part of the physical architecture design, the application tasks (i.e., HRT objects) identified in the functional architecture are mapped onto the infrastructure architecture. They are coupled with the real-time models of the selected components, in order to analyze and verify their schedulability properties. This is done by the Temporal Properties Analysis toolset, which analyzes the performance of the resulting distributed software system.
The Temporal Properties Analysis toolset includes a Schedulability Analyzer and a Scheduler Simulator, based on those available in HRT-HoodNICE. They have been enhanced to provide a more precise and realistic analysis (by taking into account the concept of thread offsets) and to cope with the specific needs of a redundant fault-tolerant architecture (by allowing the analysis of the interactions over the ICN).
A further result of the physical architecture design is that, on the basis of the real-time models produced by the verification tools, the critical interactions among software functions on different channels are scheduled in a deterministic way. The ICN transfer slots allocated to them and a set of pre-defined exchange tables are produced automatically.
As a final step of the design phase, the overall structure of the software application is extracted from the HRT-HOOD design and the related code is automatically generated. To this end, a set of mapping rules has been defined to translate the HRT-HOOD design in terms of threads implemented in a sequential programming language (which could be C or the sequential subset of Ada) and executed by a POSIX compliant microkernel [68].
8. Validation
The validation strategy implemented within GUARDS has two main objectives [3]:
- A short-term objective: the validation of the design principles of the generic architecture, including both real-time and dependability mechanisms.
- A long-term objective: the validation of the development of instances of the architecture implementing specific end-user requirements.
A large spectrum of methods, techniques and tools has been considered to address these validation objectives and to account for the validation requirements expressed by the emerging trans-application domain standard IEC 1508 [33].
Following the comprehensive development model described in [42], the validation strategy is closely linked to the design solutions and the proposed generic architecture. The validation environment that supports the strategy includes components for verification and evaluation, using both analytical and experimental techniques. Figure 5 illustrates the relationship between the components of the validation environment, and their interactions with the architecture development environment.
Besides the three main validation components (namely, formal verification, model-based evaluation and fault injection), the figure explicitly identifies the role played by the methodology and the supporting toolset being developed for schedulability analysis (cf. Section 7.3). The figure also depicts the complementarity and relationships among the three validation components. In particular, fault injection (carried out on prototypes) complements the other validation components by providing means for: a) assessing the validity of the necessary assumptions made by the formal verification task, and b) estimating the coverage parameters included in the analytical models for dependability evaluation. The following three subsections briefly describe the related validation activities.
8.1. Formal Verification
Formal approaches were used both for specification and as a design-aid. We concentrated our effort on four dependability mechanisms, which constitute the basic building blocks of the architecture: a) clock synchronization, b) interactive consistency, c) fault diagnosis, and d) multi-level integrity.
The formal approaches that have been applied included both theorem proving and model checking. Table 1 summarizes the main features of the verifications carried out for each of the target mechanisms.
<table>
<thead>
<tr>
<th>Target Mechanism</th>
<th>Clock Synchronization</th>
<th>Interactive Consistency</th>
<th>Fault Diagnosis</th>
<th>Multi-level Integrity</th>
</tr>
</thead>
<tbody>
<tr>
<td>Properties Verified</td>
<td>Agreement Accuracy</td>
<td>Agreement Validity</td>
<td>Correctness Completeness</td>
<td>Segregation Policy (Multi-level Objects)</td>
</tr>
<tr>
<td>Approach</td>
<td>Theorem Proving</td>
<td>Model Checking</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Description and Specification</td>
<td>Higher Order Logic</td>
<td>Process Algebra (CCS) and Temporal Logic (ACTL)</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Supporting Tool</td>
<td>PVS</td>
<td>JACK</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
The work carried out on the verification of clock synchronization relied heavily on PVS (Prototype Verification System) [48]. It led to the development of a general theory for averaging and non-averaging synchronization algorithms [60]. The verification of the synchronization solution used in GUARDS (cf. Section 4.1) was derived as an instantiation of this general theory.
The verifications concerning interactive consistency [12, 14], fault diagnosis [13] and multi-level integrity [27, 61] were all based on model checking using the JACK (Just Another Concurrency Kit) toolset [21]. This integrated environment provides a set of verification tools that can be used separately or in combination. Due to the complexity of the required models, the toolset was extended to include a symbolic model checker for ACTL [26].
These studies demonstrated the feasibility and the benefits of formal methods on realistic industrial problems using state-of-the-art tools. We believe this is an important outcome that can significantly facilitate the acceptance of the GUARDS generic architecture for critical applications. It is also expected that further exploitation of the complementarity between theorem proving and model checking could facilitate a wider industrial acceptance of formal methods.
8.2. Dependability Evaluation
Model-based dependability evaluation is widely recognized as a powerful means to make early and objective design decisions by assessing alternative architectural solutions. Nevertheless,
fault-tolerant distributed systems (such as GUARDS instances) pose several practical modeling problems (“stiffness”, combinatorial explosion, etc.). Moreover, due to the variety of the application domains being considered, the dependability measures of interest encompass reliability, availability and safety.
To cope with these difficulties, we adopted first a divide-and-conquer approach, where the modeling details and levels are tailored to fit the needs of the specific evaluation objectives. This was achieved by focusing either on generic or specific architectural features, or on selected dependability mechanisms. Then, elaborating on previous related work (e.g., [36]), an incremental approach proposing modular constructs has been devised. Finally, a third viewpoint was considered that aims to provide a global framework for configuring instances to meet specific application dependability requirements.
Table 2 identifies the various dependability evaluation activities carried out according to these three modeling viewpoints.
**Table 2 — Dependability evaluation viewpoints and studies**
<table>
<thead>
<tr>
<th>Modeling Level</th>
<th>Focused</th>
<th>Abstract</th>
<th>Detailed</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Targeted Mechanisms, Strategies, Instances</strong></td>
<td>- $\alpha$-count mechanism</td>
<td>- Railway prototype instance</td>
<td>- Overall design and interactions</td>
</tr>
<tr>
<td></td>
<td>- Phased missions</td>
<td>- Nuclear submarine prototype instance</td>
<td>(Nuclear submarine prototype instance)</td>
</tr>
<tr>
<td></td>
<td>- Intra-channel error detection mechanism</td>
<td>- Space prototype instance</td>
<td></td>
</tr>
<tr>
<td><strong>Formalism</strong></td>
<td>Stochastic activity networks and generalized stochastic Petri nets</td>
<td>Generalized stochastic Petri nets</td>
<td>Stochastic Petri nets</td>
</tr>
<tr>
<td><strong>Supporting Tools</strong></td>
<td>UltraSAN, SURF-2</td>
<td>SURF-2</td>
<td>MOCA-RP</td>
</tr>
<tr>
<td><strong>Resolution Methods</strong></td>
<td>Analytical and Monte-Carlo simulation</td>
<td>Analytical and method of stages</td>
<td>Monte-Carlo simulation</td>
</tr>
</tbody>
</table>
The focused models addressed several issues concerning the analysis of generic mechanisms (e.g., $\alpha$-count [17]) and of specific features for selected instances (phased missions, for the space prototype instance [20], intra-channel error detection for the railway prototype instance).
The second viewpoint aims to establish a baseline set of models for the three instances of the architecture described in Section 9 (see also [54]). A general notation is introduced that allows for a consistent interpretation of the model parameters (layers, correlated faults, etc.) for each prototype instance. This work provides the foundation of a generic modeling approach to guide the choice of a particular instantiation of the architecture, according to the dependability requirements of the end-user application. A large number of parameters (proportion of transient vs. permanent faults, correlated faults in the hardware and software layers, coverage factors, error processing rates, etc.) have been included in the models, allowing intensive sensitivity analyses to be carried out. As an example of the results obtained, Figure 6 compares reliability and safety for the three instances.
considered\textsuperscript{5}, for one set of values of the parameters included in the models. The ranking of the reliability curves simply reflects the redundancy at the channel level (C = 4, 3, and 2 for the space, railway and space instances, respectively). However, in the case of safety, the ranking of the nuclear and railway instances is reversed. This is mainly due to the fact that, in the nuclear instance, correlated design faults in either lane of the executive layer can be detected by the inter-lane comparison within each channel.
\begin{figure}[h]
\centering
\includegraphics[width=\textwidth]{figure6.png}
\caption{Comparison of the dependability of selected instances}
\end{figure}
Detailed models are needed to allow for a more comprehensive analysis of the behavior of the instances (dependencies, error propagation, etc.). Specific work has addressed hierarchical modeling with the aim of mastering the complexity of such detailed models \cite{35}. This work is directed mainly at: a) enforcing the thoroughness of the analysis, b) helping the analyst (i.e., a design engineer who is not necessarily a modeling expert). It is currently being applied and refined on the nuclear submarine prototype instance.
Although they were supported by different tools, namely UltraSAN \cite{59}, MOCA-RP \cite{25} and SURF-2 \cite{11}, the modeling efforts all rely on the stochastic Petri net formalism. This should facilitate re-use of the results of the various studies (both models and modeling methodology).
\textsuperscript{5} As safety is only a minor concern for the application targeted by the space instance, only the reliability curve is shown for that instance.
8.3. Fault Injection
The main objectives for the planned fault injection activities are twofold: a) to complement the formal verification of GUARDS mechanisms (i.e., removal of residual deficiencies in the mechanisms), and b) to support the development of GUARDS instances by assessing its overall behavior in the presence of faults, in particular by estimating coverage and latency figures for the built-in error detection mechanisms [4].
Indeed, as an experimental approach, fault injection provides a pragmatic means to complement formal verification by overcoming some of the behavioral and structural abstractions made, especially regarding the failure mode assumptions. Fault injection is to be carried out on complete prototypes so the mechanisms are tested globally when they have been integrated into an instance. In particular, the interactions between the hardware and software features are taken into account.
Although available tools could have been used — albeit with some extensions — a specific fault injection toolset (FITS) is being developed. Such a toolset is a major feature of the validation environment made available to support the end-users in the development of specific instances of the generic architecture.
Both for cost-effectiveness and flexibility, the fault injection environment is based on the software-implemented fault injection (SWIFI) technique [32]. This also allows tests to be conducted more efficiently, since: a) a limited number of errors can simulate the consequences of a large number of faults, b) it is less likely that the injected error fails to exercise the dependability mechanisms.
Two main levels of injection are being considered, whether the targeted mechanisms are implemented by the ICN-manager board or by the intra-channel processors. In practice, the implementations differ significantly: whereas fault injection on the intra-channel processors can be assisted by the resident COTS operating systems and debug facilities [24, 38], the ICN-manager only has a very simple cyclic executive.
We concentrate here on the verification objective, which is the main focus of the current implementation of FITS; further features are needed to address the evaluation objective (e.g., see [5]). Some examples of the experiments aimed at testing various mechanisms are given in Table 3.
Table 3 — Fault injection-based testing of the GUARDS dependability mechanisms
<table>
<thead>
<tr>
<th>ICN Mechanisms:</th>
<th>Fault/Error Type</th>
<th>Trigger Event</th>
<th>Observation</th>
</tr>
</thead>
<tbody>
<tr>
<td>- Clock Synchronization</td>
<td>- Omit/Delay Synch. Mess.</td>
<td>Specific Frame/Cycle/Slot</td>
<td>ICN Status Vectors</td>
</tr>
<tr>
<td>- Interactive Consistency</td>
<td>- Alter ICN Table</td>
<td></td>
<td></td>
</tr>
<tr>
<td>- α-count and Diagnosis</td>
<td>- Repeat the Same Error</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Host Mechanisms:</td>
<td>- Issue a Forbidden Call</td>
<td>Condition (Flag/Counter) on the Host</td>
<td>- Integrity Kernel Activity on the Host</td>
</tr>
<tr>
<td>- Multi-level Integrity</td>
<td>- Provoke Illegal Branch</td>
<td></td>
<td>- Control Flow Monitor</td>
</tr>
<tr>
<td>- Control Flow Monitoring</td>
<td>- Modify Task Priorities</td>
<td></td>
<td>- ICN Status Vectors</td>
</tr>
<tr>
<td>- HRT Scheduling</td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Besides injecting specific fault/error types, FITS allows injection to be synchronized with the target system by monitoring trigger events. Of course, the observations depend on the targeted mechanisms. While it is primarily intended to inject on a single channel, observations are carried out on all channels. Initial experiments will focus on the ICN mechanisms.
9. Prototypes
Several practical instances of the generic architecture have been studied, and a prototype for each of the three end-user domains is under development. The basic building blocks are practically identical in each instance. However, the configurations of the instances are very different and offer quite different fault-tolerance strategies. Moreover, although the operating systems chosen by each end-user are POSIX-compliant, they are not identical, neither are the end-users' preferred system development environments. Consequently, although there is a single specification of the generic software components of the fault-tolerant and integrity management layer, they have different practical instantiations in each instance.
9.1. Railway Instances
One instance studied for the railway domain is a fairly classic triple modular redundant (TMR) architecture with one processor per channel (Figure 7). If a channel is diagnosed to be permanently faulty, the system degrades to a two-out-of-two mode. If a fault should occur while in this mode, the instance is switched to a safe state if the errors caused by the fault are detected (either locally within a channel or by two-out-of-two comparison).
This instance would employ Motorola 68040 or 68360 processors, each running a POSIX-compliant VxWorks operating system. Compared to currently deployed systems, the innovative aspect of this instance is the co-existence of two levels of application software integrity corresponding to very different degrees of criticality:
- Highly-critical interlocking logic or safety nucleus, which must be the highest integrity.
- Non-critical monitoring, diagnostic and supervision functions.
This is a significant departure from current practice in railway applications, where these two levels of criticality would normally be implemented on separate instances. However, there is an appreciable economic advantage to be gained when it is possible to share the same hardware between both levels (e.g., for small railway stations).
A second railway instance has also been considered for an embedded train control system and is currently being prototyped. This is a straightforward duplex fail-safe configuration.
### 9.2. Nuclear Submarine Instance
The targeted nuclear submarine application is a secondary protection system. The instance considered for this application is a dual-channel architecture with two Pentium processors in each channel (Figure 8). To prevent common-mode failures of both channels due to physical damage, the channels are geographically separated by a distance of several meters. Like the railway triplex system, this instance hosts two levels of integrity.
An innovative aspect of this instance is the use of two processors in each channel, with two different POSIX-compliant operating systems: QNX and VxWorks. Apart from the operating systems, both processors in each channel run identical application software. The copies of application components executing on each lane form self-checking pairs to provide detection of errors due to faults activated independently on each lane. In particular, this includes physical faults (of the processors) and design faults of the processors and their operating systems. It is assumed that design faults of the operating systems are activated independently, based on the fact that their designs are diversified. Although the processors are identical, we also assume that faults in their design will be independently activated, based on their diversification of utilization (due to loose coupling and diversification of operating systems).
As long as both channels are operational, they operate in a two-out-of-two mode. Results of computations that are declared as error-free by the intra-channel mechanisms are compared and, in case of disagreement, the instance is put into a safe state. However, if errors are detected locally, by intra-channel mechanisms, the channel declares itself to be faulty and the instance switches to single channel operation. Note that this strategy is different to that of the two-channel configurations of the railway instances (duplex instance, or triplex instance degraded to duplex), which switch to a safe state whether the error is detected locally or by comparison.
9.3. Space Instance
The instance of the architecture for space applications is the most complex of those considered. It is a full four-channel instance of the architecture capable of tolerating arbitrary faults at the inter-channel level (Figure 9). Degradation to three-, two- and one-channel operation is possible. This instance also features two levels of integrity.
Figure 8 — Nuclear submarine duplex instance \((C=2, M=2, I=2)\)
Like the instance intended for the nuclear submarine application, this instance also possesses two lanes, but for a different reason. For the nuclear application, the aim was to allow diversified but equivalent operating systems to be used so that errors due to design faults could be detected. Here, the objective is to have one of the lanes (the secondary lane) act as a back-up for the other lane (the primary lane). Each lane supports a different operating system and different application software:
- The primary lane runs a full-functionality version of VxWorks and a nominal application that provides full control of the spacecraft and its payload. The application includes built-in self-monitoring based on executable assertions and timing checks.
- The secondary lane runs a much simpler, restricted version of VxWorks and a safety-monitoring and simple back-up application. The purpose of the latter is to provide control of the spacecraft in a very limited “survival” mode (e.g., sun-pointing and telemetry/telecontrol functions).
The idea is that neither the full VxWorks nor the nominal application supported by the primary lane can be trusted to be free of design faults. However, the restricted version of VxWorks and the application software supported by the back-up lane are assumed to be free of design faults and thus trustable. The aim is to allow continued (but severely degraded) operation in the face of a correlated fault across all processors of the primary lane. Errors due to such a correlated fault can be detected in two ways:
- By the self-monitoring functions (essentially control-flow monitoring and executable assertions) included within the nominal application.
- By a safety-monitoring application executed by the secondary lane while the primary lane is operational.
In view of the differing levels of trust of the applications supported by the primary and secondary lanes, they are placed at different levels of integrity. The nominal application (on the primary lane) is not trusted, so it is assigned to the lower integrity level. The back-up application is assumed to be free of design faults and is placed at the higher integrity level. This separation of the integrity levels on different lanes provides improved segregation (firewalling) between the two levels of integrity.
10. Conclusions and Future Work
The GUARDS project is an ambitious one. We have defined a generic fault-tolerant architecture based on COTS components and a small set of purpose-designed hardware and software building blocks. This architecture can be configured along three different dimensions (channels, lanes, integrity levels) to meet the dependability requirements of a wide variety of end-user applications.
The design of the architecture has shamelessly borrowed ideas from previous work (in particular, SIFT [47], MAFT [37], FTPP [30] and Delta-4 [52]). Like SIFT and Delta-4, the focus has been on software-implemented fault-tolerance, with a minimum of special-purpose hardware. Like SIFT and MAFT, the architecture uses a fully-connected broadcast bus network for inter-channel communication. Furthermore, the architecture uses the ZA algorithm for interactive consistency [28], which resulted from work done in the MAFT project. Like FTPP, the architecture allows parallel processing within each channel (the M dimension). As in Delta-4, the focus has been on the use of COTS operating systems. The architecture also includes several completely innovative aspects: support for multiple levels of integrity, a novel error-filtering technique ($\alpha$-count) and support for a wide range of scheduling models, including pre-emptive scheduling. At the time of writing, the first prototypes are nearing completion and it is hoped that performance measurements and fault injection results will soon be available.
A particularly constraining design requirement was that only COTS operating systems were to be used. This requirement appears to leave just two options to the fault-tolerant system designer. The first option is to use a hardware-intensive approach so that the hardware, although fault-tolerant, presents a standard interface to an unmodified COTS operating system, e.g., as in [1]. This approach, which precludes the use of COTS hardware boards, might nevertheless be necessary in some performance-critical applications. The second option, which is the one followed in GUARDS, is to use high-granularity replication managed by software above the COTS operating systems of interconnected COTS processor boards. One consequence of this choice is that the operating systems themselves cannot be protected from errors. This means that even a transient fault might require a processor to be completely re-initialized. Furthermore, the fault-tolerance management software cannot access data structures that are internal to the operating systems. This leads to a non-trivial channel reintegration procedure that relies on programmer-defined context objects. One interesting direction for future research on this aspect would be to explore how
compile-time reflection could be used to render context definition transparent to the application programmer [58].
Acknowledgments
GUARDS is partially financed by the European Commission as ESPRIT project n° 20716. The consortium consists of three end-user companies: Technicatome (France), Ansaldo Segnalamento Ferroviario (Italy) and Matra Marconi Space (France); two technology-provider companies: Intecs Sistemi (Italy), Siemens AG Österreich PSA (Austria); and three academic partners: LAAS-CNRS (France), Pisa Dependable Computing Centre (Italy) and the University of York (United Kingdom). The University of Ulm (Germany) also participated in the first phase of the project as a subcontractor.
References
Distributed Real-Time Systems”, in *Dependable Computing for Critical Applications 6*, (M.
Dal Cin, C. Meadows and W. H. Sanders, Eds.), pp.103-119, IEEE Computer Society
[53] D. Powell, *Preliminary Definition of the GUARDS Architecture*, LAAS-CNRS, Toulouse,
France, Research Report, N°96277, January 1997 (ESPRIT Project 20716 GUARDS Report
N° D1A1.A0.5000.D).
CNRS, Toulouse, France, Research Report, N°98136, May 1998 (ESPRIT Project 20716
[56] C. Rabéjac, *Inter-Channel Fault Treatment Mechanism*, Matra Marconi Space, France,
Checkpointing using Compile-Time Reflection”, in *Workshop on Embedded Fault-Tolerant
Conf. on Fault-Tolerant Computing (FTCS-23)*, (Toulouse, France), pp.674-679, IEEE
[60] D. Schwier and F. von Henke, “Mechanical Verification of Clock Synchronization
Algorithms”, in *Design for Validation*, ESPRIT Long Term Research Project 20072: DeVa -
Levels of Criticality*, PDCC, Pisa, Italy, ESPRIT Project 20716 GUARDS Report,
|
{"Source-Url": "http://bonda.cnuce.cnr.it/Documentation/Reports/Doc1999/PDF99/TPDS99-dis.pdf", "len_cl100k_base": 15463, "olmocr-version": "0.1.50", "pdf-total-pages": 38, "total-fallback-pages": 0, "total-input-tokens": 91413, "total-output-tokens": 21816, "length": "2e13", "weborganizer": {"__label__adult": 0.0004091262817382813, "__label__art_design": 0.0012388229370117188, "__label__crime_law": 0.0003619194030761719, "__label__education_jobs": 0.0012531280517578125, "__label__entertainment": 0.00016629695892333984, "__label__fashion_beauty": 0.0002453327178955078, "__label__finance_business": 0.0005207061767578125, "__label__food_dining": 0.0004239082336425781, "__label__games": 0.001373291015625, "__label__hardware": 0.01007080078125, "__label__health": 0.0004935264587402344, "__label__history": 0.000843048095703125, "__label__home_hobbies": 0.00022423267364501953, "__label__industrial": 0.0015840530395507812, "__label__literature": 0.00040602684020996094, "__label__politics": 0.0004100799560546875, "__label__religion": 0.0007920265197753906, "__label__science_tech": 0.383056640625, "__label__social_life": 8.445978164672852e-05, "__label__software": 0.01459503173828125, "__label__software_dev": 0.57958984375, "__label__sports_fitness": 0.00030732154846191406, "__label__transportation": 0.0012149810791015625, "__label__travel": 0.0002994537353515625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 90395, 0.03188]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 90395, 0.5085]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 90395, 0.90145]], "google_gemma-3-12b-it_contains_pii": [[0, 1747, false], [1747, 4695, null], [4695, 7944, null], [7944, 10014, null], [10014, 11200, null], [11200, 14216, null], [14216, 16693, null], [16693, 19401, null], [19401, 21893, null], [21893, 24625, null], [24625, 26998, null], [26998, 30115, null], [30115, 33320, null], [33320, 36567, null], [36567, 39579, null], [39579, 42080, null], [42080, 45186, null], [45186, 47471, null], [47471, 49345, null], [49345, 50066, null], [50066, 52788, null], [52788, 54300, null], [54300, 56901, null], [56901, 59968, null], [59968, 61654, null], [61654, 63996, null], [63996, 66576, null], [66576, 68052, null], [68052, 70079, null], [70079, 71885, null], [71885, 75161, null], [75161, 77421, null], [77421, 79753, null], [79753, 81932, null], [81932, 84162, null], [84162, 86458, null], [86458, 88820, null], [88820, 90395, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1747, true], [1747, 4695, null], [4695, 7944, null], [7944, 10014, null], [10014, 11200, null], [11200, 14216, null], [14216, 16693, null], [16693, 19401, null], [19401, 21893, null], [21893, 24625, null], [24625, 26998, null], [26998, 30115, null], [30115, 33320, null], [33320, 36567, null], [36567, 39579, null], [39579, 42080, null], [42080, 45186, null], [45186, 47471, null], [47471, 49345, null], [49345, 50066, null], [50066, 52788, null], [52788, 54300, null], [54300, 56901, null], [56901, 59968, null], [59968, 61654, null], [61654, 63996, null], [63996, 66576, null], [66576, 68052, null], [68052, 70079, null], [70079, 71885, null], [71885, 75161, null], [75161, 77421, null], [77421, 79753, null], [79753, 81932, null], [81932, 84162, null], [84162, 86458, null], [86458, 88820, null], [88820, 90395, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 90395, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 90395, null]], "pdf_page_numbers": [[0, 1747, 1], [1747, 4695, 2], [4695, 7944, 3], [7944, 10014, 4], [10014, 11200, 5], [11200, 14216, 6], [14216, 16693, 7], [16693, 19401, 8], [19401, 21893, 9], [21893, 24625, 10], [24625, 26998, 11], [26998, 30115, 12], [30115, 33320, 13], [33320, 36567, 14], [36567, 39579, 15], [39579, 42080, 16], [42080, 45186, 17], [45186, 47471, 18], [47471, 49345, 19], [49345, 50066, 20], [50066, 52788, 21], [52788, 54300, 22], [54300, 56901, 23], [56901, 59968, 24], [59968, 61654, 25], [61654, 63996, 26], [63996, 66576, 27], [66576, 68052, 28], [68052, 70079, 29], [70079, 71885, 30], [71885, 75161, 31], [75161, 77421, 32], [77421, 79753, 33], [79753, 81932, 34], [81932, 84162, 35], [84162, 86458, 36], [86458, 88820, 37], [88820, 90395, 38]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 90395, 0.06199]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
d6bf8bf49a148cc9415674fd2d2267fff77a91e4
|
Classical realizability in the CPS target language
Frey, Jonas
Published in:
Electronic notes in theoretical computer science
DOI:
10.1016/j.entcs.2016.09.034
Publication date:
2016
Document version
Publisher’s PDF, also known as Version of record
Document license:
CC BY-NC-ND
Citation for published version (APA):
Classical Realizability in the CPS Target Language
Jonas Frey
Department of Computer Science
University of Copenhagen, Denmark
jofr@di.ku.dk
Abstract
Motivated by considerations about Krivine’s classical realizability, we introduce a term calculus for an intuitionistic logic with record types, which we call the CPS target language. We give a reformulation of the constructions of classical realizability in this language, using the categorical techniques of realizability triposes and toposes.
We argue that the presentation of classical realizability in the CPS target language simplifies calculations in realizability toposes, in particular it admits a nice presentation of conjunction as intersection type which is inspired by Girard’s ludics.
Keywords: Classical realizability, ludics, topos, tripos, CPS translation.
1 Introduction
The relationship between continuation passing style (CPS) translations of the λ-calculus, negative translations of classical into intuitionistic logic, control operators in abstract machines, and evaluation order (call-by-value vs. call-by-name) was uncovered during the 70’s, 80’s, and early 90’s of the past century. The first step was Plotkin [22] recognizing that CPS translations can be used to simulate different evaluation orders within one another. In the 80’s, Felleisen and his collaborators [5] made the connection between control operators in abstract machines and CPS translations, observing that the behavior of a control operator like call/cc in the source language of a CPS translation can be implemented by a purely functional expression in the target language. Griffin [13] observed the analogy of CPS translations and negative translations via the proofs-programs-correspondence, and through this analysis he discovered that the natural type for call/cc is Pierce’s law.
1 This work is supported by the Danish Council for Independent Research Sapere Aude grant “Complexity via Logic and Algebra” (COLA).
http://dx.doi.org/10.1016/j.entcs.2016.09.034
1571-0661/© 2016 Published by Elsevier B.V.
This is an open access article under the CC BY-NC-ND license (http://creativecommons.org/licenses/by-nc-nd/4.0/).
i.e. the propositional schema \(((A \Rightarrow B) \Rightarrow A) \Rightarrow A\). Since Pierce’s law when added to constructive logic yields full classical logic, his observation was celebrated as the unexpected discovery of an \textit{algorithmic meaning of classical logic}.
Negative translations do not require full intuitionistic logic as target logic, and – inspired by Girard’s [11] – Lafont, Reus, and Streicher identified the \((\neg, \land)\)-fragment of intuitionistic logic as sufficient [18,19]. Although in this representation negation is taken as primitive, it is often useful to think of negation as given by the intuitionistic encoding \(\neg A \equiv A \Rightarrow \bot\), and when constructing models in cartesian closed categories or \textit{response categories} [24,23] \(\mathbb{C}\), one has to interpret \(\bot\) by an object \(R \in \mathbb{C}\) other than the initial object to avoid degeneracy. This \(R\) is called the \textit{response type}, and is comparable to the parameter \(A\) in Friedman’s \(A\)-translation [9].
Krivine’s \textit{classical realizability} [17] is a realizability interpretation of classical logic which builds on the algorithmic understanding of classical logic arising from Griffin’s insight. It is formulated using an extension of the \(\lambda\)-calculus with \texttt{call/cc}, with an operational semantics provided by the \textit{Krivine abstract machine} (KAM) [16]. To interpret logic, the interpretation utilizes a parameter called the \textit{pole}, which plays a role comparable to the response type \(R\), and to Friedman’s \(A\), as has been pointed out by Miquel [20].
A motivation of the present work is to make more explicit in which sense the pole plays the role of the response type, by giving a formulation of classical realizability in the \textit{target language} instead of the source language, in which Krivine’s work takes place. To this end, we introduce a term language for a minimal intuitionistic logic based on negation and disjunction (not \textit{conjunction} as Lafont, Reus and Streicher proposed). A design goal is to get a \textit{minimalistic system with a simple operational semantics}, and this is achieved by combining negation and disjunction into a ‘synthetic’ finitary multi-disjunction which should be understood as something like \(\neg(A_1 \lor \cdots \lor A_n)\), but we write as \(\langle \ell_1(A_1), \ldots, \ell_n(A_n) \rangle\), where \(\ell_1, \ldots, \ell_n\) are elements of a countable set \(\mathcal{L}\) of \textit{labels}, comparable to \textit{biases} in Girard’s ludics [12]. The CPS target language is a term language of a natural deduction system based on this type constructor scheme.
Instead of presenting the system as a minimal intuitionistic logic based on negation and disjunction, we could also have chosen a presentation as a \textit{dual-intuitionistic} (i.e. using sequents with many formulas on the right and at most one on the left) [27] system based on negation and \textit{conjunction}, which would be closer to Carraro, Salibra, and Ehrhard’s \textit{stack calculus} [1], a system which was introduced for similar reasons (as an analysis of Krivine realizability), but is based on \textit{implication} rather than negation. I have chosen the intuitionistic – rather than dual-intuitionistic – presentation for the simple reason that it is easier to handle and does not require as much ‘backward thinking’, but it is good to keep the alternative point of view in mind when comparing with Krivine realizability. In particular, the terms of the CPS target language are \textit{records}, i.e. a kind of tuples, and should be viewed in analogy to \textit{stacks} on the Krivine machine, which fits with the fact that we use sets of \textit{terms} as truth values where Krivine uses sets of \textit{stacks}.
However, we reverse the order on truth values relative to Krivine’s account, and
take the empty set as falsity (rather than the set of all stacks as Krivine does), since we use a call-by-value translation of classical logic into the target language instead of the call-by-name translation that is implicit in Krivine’s approach. This difference is immaterial from a model-theoretic point of view since it only reverses the order on predicates, which are symmetric as Boolean algebras, but it changes the implementation of classical connectives: where in Krivine realizability, universal quantification is the primitive operation that is given by unions of truth values (and the encoding of $\exists$ is indirect and involves dualization), in our presentation existential quantification is the primitive operation. Moreover, in Section 4 we describe how conjunction can be represented as an intersection type under certain (mild) conditions, and together we get a simple representation of the connectives of regular logic (i.e. the $(\exists, \land, \top)$-fragment of first order logic) not involving the pole at all. This is desirable since regular logic is all that is required for the tripos-to-topos construction [14], and a simpler representation of its connectives greatly facilitates calculations in classical realizability toposes.
1.1 Related work
The CPS target language is similar in spirit to Thielecke’s CPS calculus [26], which can also be motivated as a term calculus for a type system with a kind of multi-negation. The main difference is that in Thielecke’s system the basic type constructor is a negated $n$-ary conjunction, and not a an $n$-ary disjunction as in the CPS target language.
Although different in objective, Curien et al.’s work on term calculi for classical logic [2,3] was inspirational for the present article, and so were Mellies and Tabareau’s tensor logic [21] and Zeilberger’s analysis of polarized logic [28].
Terui’s computational ludics [25] is a term calculus for ludics designs with a notion of head reduction analogous to the CPS target language. Specifically, the CPS target language can be understood as a non-linear version of the purely additive fragment of the syntax of computational ludics.
Finally – and rather unsurprisingly – there is a strong analogy to Hyland and Ong’s innocent strategies [15]. Specifically, the $\eta$-expanded closed normal forms of a type $A$ without variables are precisely the innocent strategies on $A$ viewed as tree.
2 The CPS target language
The syntax of the CPS target language, given in Table 1, distinguishes two syntactic classes called terms and programs.
A term is either a variable or a record, i.e. a family $\langle \ell_1(x_1, p_1), \ldots, \ell_n(x_n, p_n) \rangle$ of programs $p_i$ – the methods of the record, each abstracted by a variable $x_i$ – indexed by a finite subset $\{\ell_1, \ldots, \ell_n\} \subseteq L$ of a countable set of labels, which we take to be the set $L = \{a, \ldots, z\}^*$ of lower case strings (in practice we will only use strings of length 1). The use of different fonts is important: curly $k, \ell$ are placeholders for generic labels, whereas sans-serif k,l are specific labels. The order in which the methods of a record are listed is not important – we view them abstractly as
functions from finite sets $F \subseteq \text{fin} \mathcal{L}$ of labels to programs with a distinguished free variable. In accordance with this viewpoint, we use ‘family notation’ $\langle \ell(x.p) | \ell \in F \rangle$ for records when convenient (in particular in Section 4). We refer to the set of labels indexing the methods of a record $t$ as the domain of the record and denote it $\text{dom}(t)$ – thus $\text{dom}(\langle \ell_1(x_1.p_1), \ldots, \ell_n(x_n.p_n) \rangle) = \{\ell_1, \ldots, \ell_n\}$ and $\text{dom}(\langle \ell(x.p) | \ell \in F \rangle) = F$.
A program is an expression of the form $t_{\ell}u$, with the intended meaning that the program (or method) labeled $\ell$ in $t$ is called with $u$ as an argument. This reading suggests the reduction rule $\langle \ell_1(x_1.p_1), \ldots, \ell_n(x_n.p_n) \rangle_{\ell_i} t \succ p_i[t/x_i]$ (provided $1 \leq i \leq n$), which gives the operational semantics of the language. We use the symbol ‘$\succ$’ only for top-level reduction of programs (i.e. weak head reduction), and write ‘$\rightarrow_{\beta}$’ for the compatible closure (i.e. the closure under term and program formers) of $\succ$ on terms and programs. A redex is a program $t_{\ell}u$ where $t$ is a record (not a variable). A redex $t_{\ell}u$ with $\ell \not\in \text{dom}(t)$ cannot be reduced and is said to be blocked. A normal form is a term or program that does not contain any redexes, i.e. in every application $t_{\ell}u$ the term $t$ is a variable.
We define the sets $\text{FV}(t)$ and $\text{FV}(p)$ of free variables of a term or program in the usual way, where the distinguished variable $x$ of a method $\ell(x.p)$ in a record $t$ is considered bound in $p$. There are no closed normal programs (since the term in head position cannot be a variable) but there are blocked closed programs like $\langle \rangle_k$.
<table>
<thead>
<tr>
<th>Expressions:</th>
</tr>
</thead>
<tbody>
<tr>
<td>Terms: $s, t, u ::= x</td>
</tr>
<tr>
<td>Programs: $p, q ::= t_{\ell}u</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Reduction:</th>
</tr>
</thead>
<tbody>
<tr>
<td>$\langle \ell_1(x.p_1), \ldots, \ell_n(x.p_n) \rangle_{\ell_i} t \succ p_i[t/x_i]$ if $1 \leq i \leq n$</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Types:</th>
</tr>
</thead>
<tbody>
<tr>
<td>$A ::= X</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Typing rules:</th>
</tr>
</thead>
<tbody>
<tr>
<td>(Var) $\Gamma \vdash x_i : A_i$ $\Gamma \equiv x_1 : A_1, \ldots, x_n : A_n$, $1 \leq i \leq n$</td>
</tr>
</tbody>
</table>
| (Abs) $\Gamma, y : B_1 \vdash p_1 \ldots \Gamma, y : B_m \vdash p_m$ $\Gamma \vdash \langle \ell_1(y.p_1), \ldots, \ell_m(y.p_m) \rangle : \langle \ell_1(B_1), \ldots, \ell_m(B_m) \rangle$ |
| (App) $\Gamma \vdash \langle \ell_1(B_1), \ldots, \ell_m(B_m) \rangle \Gamma \vdash u : B_i$ $\Gamma \vdash t_{\ell_i}u$ $1 \leq i \leq m$ |
Table 1
The CPS target language.
and diverging closed programs like \( \langle k(x.x_kx) \rangle_k \langle k(x.x_kx) \rangle_k \).
To allow the construction of non-trivial classical realizability models, the syntax has to be extended by non-logical constructs like *constants* or *instructions* to perform side effects\(^2\). This is achieved by extending the clause for programs in the grammar. To have a model for idealized shell-programs, for example, one can extend the definition of programs to be
\[
p, q ::= t \ell u | r(p, q) | \text{w0}(p) | \text{w1}(p) | 0 | 1
\]
with the intended meaning that the program \( r(p, q) \) reads a bit from standard input and continues with \( p \) or \( q \) depending on its value, \( \text{w0}(p) \) and \( \text{w1}(p) \) write a 0 or 1, respectively, to standard output before continuing with \( p \), and 0 and 1 represent successful and unsuccessful termination. For example, \( \langle k(x.x_kx) \rangle_k \langle k(x.r(x_kx, 0)) \rangle_k \) is a program that reads bits from standard input until it encounters a 1, whereupon it terminates successfully.
Formally, such an extension of the syntax has to be accompanied by an extension of the operational semantics, which in the case of the above example can either be given as a labeled transition system or as a transition relation on programs with *state*. This is explained in detail in [8] using Krivine’s syntax, where it is also explained how in such a setting specifications on program behavior give rise to poles and thus to realizability triposes and toposes. These ideas all transfer to the reformulation of classical realizability given in this article, but instead of formulating our results in this generality – which would require a lot of repetition – we use as running example only a single non-logical constant \( \text{end} \) which represents termination and is comparable to Girard’s *daimon* \( ✠ \). Thus, from now on we assume that programs are of the form
\[
p, q ::= t \ell u | \text{end}.
\]
We denote the sets of closed terms and programs generated by this grammar (together with the rule for terms in Table 1) by \( T \) and \( P \), and more generally we denote by \( T[x_1, \ldots, x_n] \) and \( P[x_1, \ldots, x_n] \) the sets of terms and programs whose free variables are contained in \( \{x_1, \ldots, x_n\} \). The analogous sets of *pure* terms and programs (i.e. those not containing \( \text{end} \)) are denoted by \( T_0, P_0, T_0[x_1, \ldots, x_n] \), and \( P_0[x_1, \ldots, x_n] \).
We consider a Curry-style type system for the CPS target language, whose types are generated from type variables and for each finite set \( \{\ell_1, \ldots, \ell_n\} \) an \( n \)-ary constructor which forms the record type \( \langle \ell_1(A_1), \ldots, \ell_n(A_n) \rangle \) out of types \( A_1, \ldots, A_n \). There are two kinds of typing judgments corresponding to the two syntactic classes:
- **terms** \( t \in T_0[x_1, \ldots, x_n] \) are typed by sequents \( (x_1 : A_1, \ldots, x_n : A_n \vdash t : B) \), and
- **programs** \( p \in P_0[x_1, \ldots, x_n] \) are typed by sequents \( (x_1 : A_1, \ldots, x_n : A_n \vdash p) \).
---
\(^2\) Essentially because of [8, Lemma 26].
\(^3\) A referee points out that a concept comparable to the daimon already appears in Coquand’s *evidence semantics* [4].
Thus, programs are not associated to types, but we think of them as having response type (or type ⊥).
There are three rules (Var), (Abs) and (App), typing variables, records, and applications, respectively. Furthermore, the typing relation is closed under a number of admissible rules.
**Lemma 2.1** The derivable typing judgments are closed under the rules in Table 2.
**Proof.** Each of the four pairs of rules can be shown to be admissible by simultaneous induction on the structure of \( t \) and \( p \).
A consequence of the admissibility of (Cut) is subject reduction.
**Lemma 2.2 (Subject reduction)** If \( \Gamma \vdash \ell_1(x_1.p_1), \ldots, \ell_n(x_n.p_n) \ell_i t \) is derivable for some \( 1 \leq i \leq n \), then \( \Gamma \vdash p_i [t/x_i] \) is derivable.
**Proof.** Inspection of the typing rules shows that \( \Gamma \vdash \ell_1(x_1.p_1), \ldots, \ell_n(x_n.p_n) \ell_i t \) can only be derived by a deduction
\[
\frac{\Gamma, x:A \vdash p \quad \ldots \quad \Gamma, x:A \vdash p_n}{\Gamma \vdash \ell_1(x_1.p_1), \ldots, \ell_n(x_n.p_n) : \ell_1(A_1), \ldots, \ell_n(A_n)}
\]
and applying (Cut) to the hypotheses with \( p_i \) and \( t \) yields the claim.
\[\Box\]
## 3 Realizability
Classical realizability models are always defined relative to a pole, which is a set \( \bot \subseteq \mathbb{P} \) of closed programs satisfying
\[ p \triangleright q, q \in \bot \Rightarrow p \in \bot \]
for all \( p, q \in \mathbb{P} \). The deliberations that follow are valid for arbitrary poles satisfying this condition (relative to reasonable extensions of the pure language with non-logical instructions such as in \([8,10]\)), but to have something to hold on to, we fix a
pole $\perp$ by
$$\perp = \{p \mid p \to^* \text{end}\},$$
which is the set of all programs $p$ whose weak reduction sequence ‘terminates’, i.e. leads to the constant $\text{end}$.4
A truth value is a set $S \subseteq T$ of closed terms. We define as semantic counterparts of the type constructors for each set $\{\ell_1, \ldots, \ell_n\}$ of labels an $n$-ary connective on the set $P(T)$ of truth values.
**Definition 3.1** Given truth values $S_1, \ldots, S_n \in P(T)$ and labels $\ell_1, \ldots, \ell_n \in \mathcal{L}$, the truth value $\langle \ell_1(S_1), \ldots, \ell_n(S_n) \rangle$ is defined by
$$\langle \ell_1(S_1), \ldots, \ell_n(S_n) \rangle = \{t \in T \mid \forall i \in \{1, \ldots, n\} \forall s \in S_i . t \ell_i s \in \perp\}.$$
We introduce realization judgments as semantic counterparts of typing judgments.
**Definition 3.2** Given truth values $S_1, \ldots, S_n, T \subseteq T$, and a term $t \in T[x_1, \ldots, x_n]$ or program $p \in P[x_1, \ldots, x_n]$,
the notation $x_1 : S_1, \ldots, x_n : S_n \vdash t : T$ stands for $\forall s_1 \in S_1, \ldots, s_n \in S_n . t[s_1/x_1, \ldots, s_n/x_n] \in T$ (2)
and the notation $x_1 : S_1, \ldots, x_n : S_n \vdash p$ stands for $\forall s_1 \in S_1, \ldots, s_n \in S_n . p[s_1/x_1, \ldots, s_n/x_n] \in \perp$. (3)
We call expressions of the form (2) and (3) realization judgments. Slightly redundantly, we also say ‘the realization judgment $(\Gamma \vdash t : T)$ is valid’ instead of simply asserting the judgment itself.
The following result is an analogue of Krivine’s adequation lemma [17, Theorem 3].
**Lemma 3.3** Valid realization judgments are closed under the rules in Table 3.
**Proof.** The only nontrivial case is (Abs). Assume that $\Gamma, y : T_k \vdash p_k$ for $1 \leq k \leq m$, and that $s_i \in S_i$ for $1 \leq i \leq n$. We have to show that
$$((\ell_1(y.p_1), \ldots, \ell_n(y.p_m))[\vec{s}/\vec{x}]) t \in \perp$$
for every $1 \leq j \leq m$ and $t \in T_j$. For fixed $j$ and $t$ we have
$$((\ell_1(y.p_1), \ldots, \ell_n(y.p_m))[\vec{s}/\vec{x}]) t = ((\ell_1(y.p_1[\vec{s}/\vec{x}]), \ldots, \ell_n(y.p_m[\vec{s}/\vec{x}])) t \to p_j[\vec{s}/\vec{x}, t/y]$$
where the reduct is in $\perp$ by assumption, and the claim follows from (1). $\square$
---
4 The classical realizability model arising from this pole has some interesting properties, as the author learned from Krivine [7].
Admissible rules for realization judgments, where $S_1, \ldots, S_n, S, T_1, \ldots, T_m, T \subseteq \top$, $\Gamma \equiv x_1 : S_1, \ldots, x_n : S_n$, and $\sigma$ is a permutation.
Table 3
<table>
<thead>
<tr>
<th>Rule</th>
<th>Premise 1</th>
<th>Premise 2</th>
</tr>
</thead>
<tbody>
<tr>
<td>(Var)</td>
<td>$\Gamma \vdash x_i : S_i$</td>
<td>$\Gamma \vdash u : T_i$</td>
</tr>
<tr>
<td>(App)</td>
<td>$\Gamma \vdash t : \langle \ell_1(T_1), \ldots, \ell_n(T_n) \rangle$</td>
<td>$\Gamma \vdash u : T_i$</td>
</tr>
<tr>
<td>(Abs)</td>
<td>$\Gamma, y : T_1 \vdash p_1$</td>
<td>$\Gamma, y : T_m \vdash p_m$</td>
</tr>
<tr>
<td>(Cut)</td>
<td>$\Gamma \vdash s : S$</td>
<td>$\Gamma, x : S \vdash p$</td>
</tr>
<tr>
<td>(Sym)</td>
<td>$\Gamma \vdash p \sigma(\Gamma) \vdash p$</td>
<td>$\Gamma \vdash t : T$</td>
</tr>
<tr>
<td>(Weak)</td>
<td>$\Gamma \vdash p \Gamma, x : S \vdash p$</td>
<td>$\Gamma \vdash t : T$</td>
</tr>
<tr>
<td>(Contr)</td>
<td>$\Gamma, x : S, y : S \vdash p$</td>
<td>$\Gamma, x : S, y : S \vdash t : T$</td>
</tr>
</tbody>
</table>
3.1 Classical realizability tripods
We now show how to do classical realizability in the CPS target language by instantiating a simple (call-by-value) negative translation. To start we fix the shorthands
$$\top \equiv \langle \rangle$$
$$\neg A \equiv \langle k(A) \rangle$$
$$\neg (A, B) \equiv \langle l(A), r(B) \rangle$$
for nullary, unary, and binary type constructors, and using these we encode classical conjunction as
$$A \land B \equiv \neg (\neg A, \neg B).$$
The negative translation maps classical sequents
$$A_1, \ldots, A_n \vdash B_1, \ldots, B_m$$
consisting of formulas built up from propositional variables and the connectives, $\top$, $\neg$ and $\land$, to intuitionistic sequents
$$A_1^*, \ldots, A_n^*, \neg B_1^*, \ldots, \neg B_m^* \vdash$$
where the formulas $A_i^*$ and $B_j^*$ are obtained by expanding the classical connectives according to the above shorthands and encoding.
We could now define classical realization judgments by mimicking the negative translation on the level of realizability, but we will not spell this out explicitly, and
rather develop the remainder of the section in categorical language, by laying out
the construction of classical realizability triposes analogous to the treatment in [8].
Broadly speaking, realizability triposes [14] capture the model theoretic essence
of realizability interpretations as a collection of order relations on sets of semantic
predicates, which together are required to form an indexed preorder – i.e. a con-
travariant functor $\mathcal{P} : \text{Set}^{\text{op}} \to \text{Ord}$ from sets to preorders – subject to certain
conditions. The precise definition of strict Boolean tripos (which is the version of
triposes that we use) is given in Definition A.3.
In our setting, semantic predicates on a set $J$ are functions
$$
\varphi, \psi : J \to P(\top)
$$
into the set of truth values, and the order on predicates is defined by
$$
\varphi \leq \psi \quad :\iff \quad \exists p \in \mathbb{P}_0[x,y] \forall j \in J . \big( x : \varphi(j), y : \neg \psi(j) \vdash p \big),
$$
i.e. $\varphi \leq \psi$ if there exists a pure program $p[x,y]$ which realizes the negative transla-
tion of $\varphi(j) \vdash \psi(j)$ uniformly in $j$.
The first step in establishing that semantic predicates form a tripos is to show
that the predicates on a fixed set form a Boolean prealgebra, i.e. a preorder whose
poset reflection is a Boolean algebra (Definition A.1).
**Theorem 3.4** For every set $J$, the set of $P(\top)^J$ of semantic predicates on $J$
equipped with the order relation (5) is a Boolean prealgebra.
**Proof.** We show first that $\leq$ is actually a preorder. Reflexivity follows from the
fact that $(x : S, y : \neg S \vdash y_k x)$ for arbitrary truth values $S$.
For transitivity, assume that $\varphi \leq \psi$ and $\psi \leq \theta$, i.e. that there exist $p \in \mathbb{P}[v,w]$ and
$q \in \mathbb{P}[x,y]$ such that $(v : \varphi(j), w : \neg \psi(j) \vdash p)$ and $(x : \psi(j), y : \neg \theta(j) \vdash q)$.
The claim $\varphi \leq \theta$ follows from Lemma 3.3 via the derivation
$$
\frac{x : \psi(j), y : \neg \theta(j) \vdash q}{y : \neg \theta(j) \vdash (k(x.q)) : \neg \psi(j)} \frac{v : \varphi(j), w : \neg \psi(j) \vdash p}{v : \varphi(j), y : \neg \theta(j) \vdash p[\langle k(x.q)\rangle/w]}$$
Next we show that the order has finite meets. The predicate with value constant
$\top$ is a greatest element, since $(x : S, y : \neg \top \vdash y_k \langle \rangle)$ for arbitrary truth values $S$.
We claim that a binary meet of $\varphi$ and $\psi$ is given by pointwise application of (the
semantic version of) the type constructor defined in (4), i.e. $(\varphi \land \psi)(j) = \varphi(i) \land \psi(i)$. The
such defined $\varphi \land \psi$ is smaller than $\varphi$ since $(x : \neg (\neg \varphi(j), \neg \psi(j)), y : \neg \varphi(j) \vdash x y)$, and similarly for $\psi$. To see that it is a greatest lower bound, assume that
$\theta \leq \varphi$ and $\theta \leq \psi$, i.e. there exist programs $p \in \mathbb{P}[w,x]$ and $q \in \mathbb{P}[w,y]$ such that
$(w : \theta(j), x : \neg \varphi(j) \vdash p)$ and $(w : \theta(j), y : \neg \psi(j) \vdash q)$. Then we have $\theta \leq \varphi \land \psi$ by
the following derivation.
\[
\begin{align*}
w : \theta(j) \quad x : \neg \varphi(j) &\models p \\
w : \theta(j) \quad y : \neg \psi(j) &\models q \\
w : \theta(j) &\models (l(x,p), r(y,q)) : \neg(\neg \varphi(j), \neg \psi(j)) \\
w : \theta(j), z : \neg(\neg \varphi(j), \neg \psi(j)) &\models z_k(l(x,p), r(y,q))
\end{align*}
\]
To finish the proof that \((P(\mathbb{P})^J, \leq)\) is a Boolean algebra, it now suffices to verify the conditions (i)–(iii) of Lemma A.2, with the negation operation given by \((\neg \varphi)(j) = \neg \varphi(j)\).
For (i) assume that \(\varphi \land \psi \leq \bot\), i.e. that there exists \(p[x,y] \in P[x,y]\) with \((x : \neg(\neg \varphi(j), \neg \psi(j)), y : \neg \neg \top \models p)\). Then we have
\[
w : \varphi(j), z : \neg \neg \psi(j) \models z_k(k y.p(\langle l(v,v_k w), r(w,w_k y)\rangle/x), \langle k(v,v_k)\rangle/y))
\]
(in the following we do not spell out the derivation of realization judgments any more, and leave the type checking to the reader) and hence \(\varphi \leq \neg \psi\).
For (ii) we have
\[
x : \neg(\neg \varphi(j), \neg \varphi(j)), y : \neg \neg \top \models x_r(k z.x_l z)
\]
and for (iii) we have
\[
x : \neg \neg \varphi(j), y : \neg \varphi(j) \models x_k y.
\]
Every function \(f : J \to I\) induces a function \(f^* : P(\mathbb{T})^J \to P(\mathbb{T})^I\) on predicates by precomposition, and it is easy to see that \(f^*\) is monotone and preserves all logical structure (since all propositional operations on predicates are defined pointwise in a uniform way). Since the operation \((f \mapsto f^*)\) clearly preserves composition and identities, it is the morphism part of a contravariant functor
\[
\mathcal{K}_\perp : \text{Set}^{op} \to \text{BA}.
\]
from sets to Boolean prealgebras with object part \(J \mapsto (P(\mathbb{T})^J, \leq)\). We can now prove the main theorem.
**Theorem 3.5** \(\mathcal{K}_\perp\) is a strict Boolean tripos (Definition A.3).
**Proof.** It remains to show that the reindexing maps \(f^*\) admit left adjoints subject to the Beck-Chevalley condition, and that there is a generic predicate.
Let \(f : J \to I\). We claim that a left adjoint \(\exists_f\) to \(f^*\) can be defined by fiberwise union, i.e.
\[
\exists_f(\varphi)(i) = \bigcup_{f_j = i} \varphi(j) \quad \text{for} \quad \varphi \in P(\mathbb{T})^J,
\]
and to prove this we have to show that for any \(\psi \in P(\mathbb{T})^I\) we have \(\varphi \leq f^* \psi\) if and only if \(\exists_f \varphi \leq \psi\). Unfolding definitions yields
\[
\exists p \in \mathbb{P}_0[x,y] \quad \forall j \in J \quad \forall s \in \varphi(j) \quad \forall t \in \neg \psi(f_j). p[s,t] \in \bot
\]
for the first inequality, and
\[ \exists p \in \mathbb{P}_0[x, y] \ \forall i \in I \ \forall s \in \bigcup_{f_j = i} \varphi(j) \ \forall t \in \neg \psi(i) \ . \ p[s, t] \in \bot \]
for the second one. The two statements are equivalent since in both cases the arguments of \( \varphi \) and \( \psi \) range over all pairs \((i, j)\) with \( f_j = i \).
It is easy to see (and well known e.g. from the effective tripos) that fiberwise unions strictly satisfy the Beck-Chevalley condition.
Finally, a generic predicate is given by the identity function on \( P(T) \). \( \square \)
To conclude the section, we reprove [8, Lemma 26] in the new syntax.
**Lemma 3.6** The tripos \( \mathcal{K}_\bot \) induced by a pole \( \bot \) is non-degenerate (not equivalent to the terminal tripos) if and only if \( \mathbb{P}_0 \cap \bot = \emptyset \).
**Proof.** A tripos is degenerate if and only if all truth values are equivalent, which is easily seen to be equivalent to the existence of a pure program \( p[x] \in \mathbb{P}_0[x] \) such that the realization judgment \( (x : T \Vdash p[x]) \) holds. If this is the case, then \( p[\langle \rangle] \in \mathbb{P}_0 \cap \bot \). Conversely, if there exists \( q \in \mathbb{P}_0 \cap \bot \) then we have \( (x : T \Vdash q) \). \( \square \)
## 4 Conjunction as intersection type
In the previous section we have seen that relative to a fixed pole \( \bot \) the semantic predicates give rise to a tripos \( \mathcal{K}_\bot \), and this tripos in turn gives rise to a *topos* \( \text{Set}[\bot] \) whose construction relies only on the *regular* fragment of first order logic, i.e. the fragment of logic consisting of existential quantification and conjunction. To facilitate computation in classical realizability toposes, it is good to have an easy representations of the basic connectives, and in the proof of Theorem 3.5 we saw that existential quantification in the tripos is given by set theoretic union, which is easy enough. However, for conjunction we only have the representation (4) and the involved double negation entails a high logical complexity and obscures things considerably, i.e. it is difficult to know what the elements of \( S \cap T \) look like, even if we know the elements of \( S \) and \( T \) very well.
In this section, we show that under certain conditions on the pole we can identify a class of ‘nice’ representatives of predicates in the tripos which admits an implementation of conjunction as intersection type, while being closed under the other logical operations. The idea to represent conjunction as intersection is inspired by ludics [12].
Given a record
\[ t = \langle \ell(x.p) \mid \ell \in F \rangle \]
and a set \( M \subseteq \mathcal{L} \) of labels, define the *restriction of* \( t \) *to* \( M \) to be the record
\[ t|_M = \langle \ell(x.p) \mid \ell \in F \cap M \rangle. \]
The *syntactic order* \( \sqsubseteq \) on terms and programs is the reflexive-transitive and compatible (i.e. closed under term and program constructors) closure of the set of all
pairs \((t|M, t)\) for records \(t\) and sets \(M\) of labels. Observe that the empty record \(\langle\rangle\) is smaller than any other record in the syntactic order, but not smaller than a variable.
**Definition 4.1** A pole \(\bot\) is called strongly closed, if it satisfies the conditions
\[
p \arr{\beta} q, q \in \bot \Rightarrow p \in \bot \quad \text{and} \\
p \subseteq q, p \in \bot \Rightarrow q \in \bot,
\]
i.e. it is closed under inverse \(\beta\)-reduction and upward w.r.t. the syntactic order.
A truth value \(S \subseteq T\) is called strongly closed, if it satisfies the analogous conditions
\[
t \arr{\beta} u, u \in S \Rightarrow t \in S \quad \text{and} \\
t \subseteq u, t \in S \Rightarrow u \in S.
\]
Although strong closure is a much stronger condition on a pole than mere closure under inverse head reduction, it is satisfied for many ‘reasonable’ poles, in particular for the pole of terminating programs, and more generally for poles constructed from specifications as in [8].
For a fixed strongly closed \(\bot\), there is an easy way to strongly close any given truth value, via a well-known double duality construction. Concretely, for \(S \subseteq T\) define
\[
S^{\uparrow} = \{ p[x] \in \mathbb{P}[x] \mid \forall s \in S. p[s] \in \bot \},
\]
and dually for \(E \subseteq \mathbb{P}[x]\) define
\[
S^{\downarrow} = \{ s \in T \mid \forall p[x] \in E. p[s] \in \bot \}.
\]
If \(\bot\) is strongly closed, it is obvious that so is \(S^{\uparrow \downarrow}\) for any truth value \(S\).
A truth value \(S\) is said to be supported by a set \(M \subseteq \mathcal{L}\) of labels, if we have \(s|M \in S\) for every \(s \in S\). More generally, a predicate \(\varphi \in P(T)^J\) is said to be supported by \(M\), if \(\varphi(j)\) is supported by \(M\) for all \(j \in J\).
The main result of the section is the following.
**Theorem 4.2** Let \(\varphi, \psi \in P(T)^J\) be predicates that are both pointwise strongly closed, and supported by disjoint finite sets \(F = \{\ell_1, \ldots, \ell_n\}\) and \(G = \{k_1, \ldots, k_m\}\) of labels, respectively. Then the predicate \(\varphi \cap \psi\), which is defined by \((\varphi \cap \psi)(j) = \varphi(j) \cap \psi(j)\), is a meet of \(\varphi\) and \(\psi\) and is supported by \(F \cup G\).
**Proof.** We claim that the realization judgments
\[
x : \varphi(j) \cap \psi(j) \vdash x : \varphi(j) \quad \text{and} \\
x : \varphi(j) \cap \psi(j) \vdash x : \psi(j)
\]
and
\[
x : \varphi(j), y : \psi(j) \vdash u[x, y] : \varphi(j) \cap \psi(j)
\]
with \(u[x, y] = (\ell_1(z, x_{\ell_1}z), \ldots, \ell_n(z, x_{\ell_n}z), k_1(z, y_{k_1}z), \ldots, k_m(z, y_{k_m}z))\).
hold for all \( j \). The first two are obvious. For the third one assume that \( s \in \varphi(j) \) and \( t \in \psi(j) \). Then for each \( \ell_i \in \text{dom}(s) \) the redex \( s_{\ell_i} z \) in \( u[s, t] \) can be reduced, and the result \( u'[s, t] \) satisfies \( u'[s, t] \supseteq s|_F \). We have \( s|_F \in \varphi(j) \) since \( \varphi(j) \) is supported by \( F \), and \( u'[s, t] \in \varphi(j) \in \varphi(j) \) and \( u[s, t] \in \varphi(j) \) by strong closure. An analogous argument shows that \( u[s, t] \) is in \( \psi(j) \), and therefore in \( \varphi(j) \cap \psi(j) \). The claim that \( \varphi \cap \psi \) is a meet of \( \varphi \) and \( \psi \) now follows from the next lemma.
To see that \( \varphi \cap \psi \) is supported by \( F \cup G \), assume that \( t \in \varphi(j) \cap \psi(j) \) for some \( j \in J \). Then \( t|_{F \cup G} \supseteq t|_F \in \varphi(j) \) and by strong closure we have \( t|_{F \cup G} \in \varphi(j) \). □
**Lemma 4.3** If \( \varphi, \psi, \theta \in P(\mathbb{T}) \) are predicates and \( s[z], t[z] \in \mathbb{T}_0[z] \) and \( u[x, y] \in \mathbb{T}_0[x, y] \) are pure terms such that the realization judgments
\[
\begin{align*}
z : \theta(j) &\vdash s[z] : \varphi(x) & z : \theta(j) &\vdash t[z] : \psi(x) & x : \varphi(j), y : \psi(j) &\vdash u[x, y] : \theta(x, y)
\end{align*}
\]
for all \( j \in J \), then \( \theta \) is a meet of \( \varphi \) and \( \psi \).
**Proof.** From the first two judgments we can deduce \( (z : \theta(j), v : \neg \varphi(j) \vdash v_k s[z]) \) and \( (z : \theta(j), v : \neg \psi(j) \vdash v_t t[z]) \), which means that \( \theta \leq \varphi \) and \( \theta \leq \psi \), and thus \( \theta \leq \varphi \wedge \psi \). From the third judgment we can derive
\[
w : \neg \neg \psi(j), z : \neg \theta(j) \vdash w_{\ell}(k(x. w_{\ell}(k(y. z_k u[x, y]))))
\]
which means that \( \varphi \wedge \psi \leq \theta \). □
Thus we have a nice representation of conjunction for pointwise strongly closed predicates which are finitely supported by disjoint sets.
Disjointness can always be achieved by renaming, i.e. ‘relocating’, as long as supports are finite. Moreover, strong closure and finite support are preserved by existential quantification, and by the semantic type constructors (Definition 3.1) provided the the pole is strongly closed. A finitely supported and strongly closed generic predicate can also be obtained, by negating the canonical one given by the identity on \( P(\mathbb{T}) \).
**Acknowledgement**
The ideas presented in this article were developed over a long period of time, and I profited from discussions on related issues with many people, including – but not limited to – Pierre Clairambault, Pierre-Louis Curien, Nicolas Guenot, Paul Blain Levy, Paul-André Mellies, Guillaume Munch-Maccagnoni, Jakob Grue Simonsen, Thomas Streicher, Noam Zeilberger, and Stéphane “El Zím” Zimmermann.
Thanks to the referees for their careful rereading and helpful comments.
**References**
A Boolean (pre)algebras and Boolean triposes
This appendix recalls the definitions of Boolean (pre)algebra and strict Boolean tripos, and states an auxiliary lemma to characterize Boolean prealgebras.
Definition A.1 A Boolean algebra is a complemented distributive lattice, i.e. a distributive lattice \((B, \leq, \top, \wedge, \bot, \vee)\) such that for every \(a \in B\) there exists a \(b \in B\) with \(a \wedge b = \bot\) and \(a \vee b = \top\).
A Boolean prealgebra is a preorder whose poset-reflection is a Boolean algebra.
The term ‘Boolean prealgebra’ does not seem to be very prevalent in the literature, but it appears e.g. in [6].
Lemma A.2 A preorder \((B, \leq)\) is a Boolean prealgebra if and only if it has finite meets (denoted by \(\wedge, \top\)) and there exists a function \(\neg : B \to B\) such that
\[
\begin{align*}
(i) \quad a \wedge b &\leq \neg \top \Rightarrow a \leq \neg b \\
(ii) \quad a \wedge \neg a &\leq \neg \top \\
(iii) \quad \neg \neg a &\leq a
\end{align*}
\]
for all \(a, b \in B\).
Proof. The following derivation shows that \(\neg(-)\) is antimonotone.
\[
\begin{align*}
a &\leq b \\
\neg b \wedge a &\leq \neg b \wedge b \\
\neg b \wedge b &\leq \neg \top \\
\neg b \wedge a &\leq \neg \top \\
\neg b &\leq \neg a
\end{align*}
\]
The converse implication of \((i)\) is shown as follows.
\[
\begin{align*}
a &\leq \neg b \\
\neg b \wedge a &\leq \neg b \wedge b \\
\neg b \wedge b &\leq \neg \top \\
\neg b &\leq \neg a
\end{align*}
\]
The following shows that \(\neg(-)\) is an involution,
\[
\begin{align*}
a \wedge \neg a &\leq \neg \top \\
a &\leq \neg \neg a
\end{align*}
\]
which implies that \((A, \leq)\) is auto-dual and hence a lattice. The non-trivial direction
of distributivity is shown as follows.
\[
\begin{align*}
-(a \land b) \land a \land b & \leq -\top \\
-(a \land b) \land a & \leq -b \\
-(a \land b) \land a \land -c & \leq -b \land -c \\
-(a \land b) \land a \land -c & \leq -(-b \land -c) \\
-(-b \land -c) \land -(a \land b) \land a \land -c & \leq -a \\
-(-b \land -c) \land -(a \land b) \land -c & \leq -(-a \land -c) \\
-(-a \land -c) \land -(a \land b) \land -c & \leq -(-b \land -c) \\
-(-a \land -c) \land -(b \land -c) & \leq -(a \land b) \land -c \\
(a \lor c) \land (b \lor c) & \leq (a \land b) \lor c
\end{align*}
\]
It remains to check that for \( a \in A \), \( -a \) is a complement of \( a \) in the sense of the previous definition. This follows from (ii) and the fact that \( -(-) \) is an involution. □
The following definition of strict Boolean tripos is a special case of the concept of tripos as introduced in [14].
**Definition A.3** A strict Boolean tripos is a contravariant functor
\[ \mathcal{P} : \text{Set}^{\text{op}} \rightarrow \text{BA} \]
from the category of sets to the category of Boolean prealgebras and structure preserving maps such that
- for any \( f : J \rightarrow I \), the map \( \mathcal{P}(f) \) has a left\(^5\) adjoint \( \exists_f \) (which is not required to preserve Boolean prealgebra structure), such that for any pullback square
\[
\begin{array}{ccc}
L & \xrightarrow{q} & K \\
\downarrow{p} & & \downarrow{g} \\
J & \xrightarrow{f} & I
\end{array}
\]
we have \( \mathcal{P}(g) \circ \exists_f = \exists_p \circ \mathcal{P}(p) \) (this is the Beck-Chevalley condition), and
- there exists a generic predicate, i.e. a set \( \text{Prop} \) and an element \( \text{tr} \in \mathcal{P}(\text{Prop}) \) such that for every set \( I \) and \( \varphi \in \mathcal{P}(I) \) there exists a unique \( f : I \rightarrow \text{Prop} \) with \( \mathcal{P}(f)(\text{tr}) = \varphi \).
\(^5\) Note that the right adjoint \( \forall_f \) is for free in the Boolean case, it is given by \( \forall_f \varphi = -\exists_f -\varphi \).
|
{"Source-Url": "https://static-curis.ku.dk/portal/files/172100189/Frey_2016_Classical_realizability.pdf", "len_cl100k_base": 12405, "olmocr-version": "0.1.53", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 70866, "total-output-tokens": 14588, "length": "2e13", "weborganizer": {"__label__adult": 0.0005364418029785156, "__label__art_design": 0.0008406639099121094, "__label__crime_law": 0.0006070137023925781, "__label__education_jobs": 0.0024814605712890625, "__label__entertainment": 0.0002856254577636719, "__label__fashion_beauty": 0.0002970695495605469, "__label__finance_business": 0.0005316734313964844, "__label__food_dining": 0.0008797645568847656, "__label__games": 0.001190185546875, "__label__hardware": 0.0012989044189453125, "__label__health": 0.0013456344604492188, "__label__history": 0.0006456375122070312, "__label__home_hobbies": 0.00025391578674316406, "__label__industrial": 0.0012378692626953125, "__label__literature": 0.002826690673828125, "__label__politics": 0.0006632804870605469, "__label__religion": 0.0013866424560546875, "__label__science_tech": 0.451416015625, "__label__social_life": 0.00024580955505371094, "__label__software": 0.0076904296875, "__label__software_dev": 0.521484375, "__label__sports_fitness": 0.0003566741943359375, "__label__transportation": 0.001201629638671875, "__label__travel": 0.0002434253692626953}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 42884, 0.0179]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 42884, 0.28742]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 42884, 0.79471]], "google_gemma-3-12b-it_contains_pii": [[0, 500, false], [500, 2675, null], [2675, 6592, null], [6592, 9829, null], [9829, 12645, null], [12645, 15966, null], [15966, 17679, null], [17679, 20086, null], [20086, 21975, null], [21975, 25146, null], [25146, 27842, null], [27842, 30910, null], [30910, 33584, null], [33584, 36852, null], [36852, 39113, null], [39113, 40847, null], [40847, 42884, null]], "google_gemma-3-12b-it_is_public_document": [[0, 500, true], [500, 2675, null], [2675, 6592, null], [6592, 9829, null], [9829, 12645, null], [12645, 15966, null], [15966, 17679, null], [17679, 20086, null], [20086, 21975, null], [21975, 25146, null], [25146, 27842, null], [27842, 30910, null], [30910, 33584, null], [33584, 36852, null], [36852, 39113, null], [39113, 40847, null], [40847, 42884, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 42884, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 42884, null]], "pdf_page_numbers": [[0, 500, 1], [500, 2675, 2], [2675, 6592, 3], [6592, 9829, 4], [9829, 12645, 5], [12645, 15966, 6], [15966, 17679, 7], [17679, 20086, 8], [20086, 21975, 9], [21975, 25146, 10], [25146, 27842, 11], [27842, 30910, 12], [30910, 33584, 13], [33584, 36852, 14], [36852, 39113, 15], [39113, 40847, 16], [40847, 42884, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 42884, 0.064]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
7bf0ec25ba47b272137a5d3816ab19cdce5fe365
|
Partial Program Admission by Path Enumeration
Authors: Michael Wilson, Ron Cytron, and Jon Turner
Real-time systems on non-preemptive platforms require a means of bounding the execution time of programs for admission purposes. Worst-Case Execution Time (WCET) is most commonly used to bound program execution time. While bounding a program's WCET statically is possible, computing its true WCET is difficult without significant semantic knowledge. We present an algorithm for partial program admission, suited for non-preemptive platforms, using dynamic programming to perform explicit enumeration of program paths. Paths - possible or not - are bounded by the available execution time and admitted on a path-by-path basis without requiring semantic knowledge of the program beyond its Control Flow Graph (CFG).
Partial Program Admission by Path Enumeration
Authors: Michael Wilson, Ron Cytron, Jon Turner
Corresponding Author: mlw2@arl.wustl.edu
Abstract: Real-time systems on non-preemptive platforms require a means of bounding the execution time of programs for admission purposes. Worst-Case Execution Time (WCET) is most commonly used to bound program execution time. While bounding a program's WCET statically is possible, computing its true WCET is difficult without significant semantic knowledge. We present an algorithm for partial program admission, suited for non-preemptive platforms, using dynamic programming to perform explicit enumeration of program paths. Paths - possible or not - are bounded by the available execution time and admitted on a path-by-path basis without requiring semantic knowledge of the program beyond its Control Flow Graph (CFG).
Partial Program Admission by Path Enumeration
Michael Wilson
Department of Computer Science and Engineering
Washington University in St. Louis
St. Louis, Missouri 63130
Email: mlw2@arl.wustl.edu
Ron Cytron
Department of Computer Science and Engineering
Washington University in St. Louis
St. Louis, Missouri 63130
Email: cytron@cse.wustl.edu
Jonathan Turner
Department of Computer Science and Engineering
Washington University in St. Louis
St. Louis, Missouri 63130
Email: jon.turner@arl.wustl.edu
Abstract—Real-time systems on non-preemptive platforms require a means of bounding the execution time of programs for admission purposes. Worst-Case Execution Time (WCET) is most commonly used to bound program execution time. While bounding a program’s WCET statically is possible, computing its true WCET is difficult without significant semantic knowledge. We present an algorithm for partial program admission, suited for non-preemptive platforms, using dynamic programming to perform explicit enumeration of program paths. Paths – possible or not – are bounded by the available execution time and admitted on a path-by-path basis without requiring semantic knowledge of the program beyond its Control Flow Graph (CFG).
I. INTRODUCTION
Admission control in real-time systems running on non-preemptive platforms requires the ability to bound the execution time of applications. In a trusted environment, a single administrator can make an out-of-band determination of execution boundedness. Untrusted, shared environments are more difficult. As an example of such an environment, consider network virtualization, which has been advanced as a way to foster innovation in the Internet [1].
In network virtualization, core router platforms host 3rd-party application code, running at Internet core speeds, allowing the creation of high-speed overlay services [2]. These platforms, of which the IXP 28XX is a representative example, usually have no preemption mechanism suitable for use at high speeds. Internet core speeds necessitate extremely tight cycle budgets for packet processing. To share this type of system among untrusted parties requires stringent admission control.
In other domains, instrumentation with runtime checks to enforce proper behavior is a practical solutions. Unfortunately, Internet core speeds render runtime checks impractical. At 5Gbps, an IXP 2800-based system with 1.4 GHz microengines and 8 hardware thread contexts has a compute budget of 170 cycles. With such tight budgets, even a few runtime checks can quickly push otherwise admissible program paths over budget. A practical solution must therefore impose as little runtime overhead as possible.
Worst-Case Execution Time (WCET) analysis is the currently accepted approach. A WCET bound can be established statically, assuming that all program paths are viable. However, some well behaved programs might be rejected. For example, a program may have mutually exclusive code paths that, taken together, exceed the cycle budget. Demonstrating that these paths are mutually exclusive takes semantic knowledge, either provided by the developer or deduced by analysis at admission time. In most domains, this information is provided by the developer as branch constraints. For our virtualization application, we cannot trust the developer; any semantic knowledge must come from the analysis.
We propose partial program admission as a practical solution to this problem. By explicitly examining all paths, we can perform static analysis to re-write 3rd-party applications to achieve the following goals:
1) all “safe” paths (paths that complete under budget) are admitted,
2) no “unsafe” paths (paths that complete over budget, or that do not complete) are admitted,
3) no runtime penalty is imposed on any safe path, and
4) no semantic knowledge is required.
To re-write the program, we actually duplicate some code paths. While this causes some code expansion, or “bloat”, in practical cases the bloat proves to be within acceptable limits.
Partial program admission seems at first glance to be a useless process. It is uncommon for a developer to wish to run only some fragment of a program. However, our construction for partial program admission is not intended for running only portions of a program, but for generating a new program where the proof of execution time correctness is trivial.
WCET analysis depends upon developer knowledge of branch constraints to eliminate paths that, while present in the program, could never be taken. If the developer has met the desired budget goals, all paths that can actually be taken will be under budget. Only “impossible” paths are excluded. In this way, we allow some code duplication to substitute for detailed understanding of the program.
We also note that, during development, the program may not be under budget. The same partial admission can also serve to inform the developer of program paths that have unexpectedly run over budget. We view this algorithm as a development tool as well as an admission tool.
We present a theoretical construction that mirrors our algorithm in Section II. In section III, we present our actual algorithm, followed by proofs of correctness in Section IV. We follow up with some preliminary performance data in
Section V, related work in Section VI, and our plans for future work in Section VII.
II. ALGORITHM FOUNDATIONS
In this section we define the theoretical constructions on which our algorithm is based. First, we describe the computational model in which our solution works. Next, we describe a series of graph transformations culminating in a construction which meets our goals at the cost of significant code duplication. Finally, we describe a means of reducing the code duplication.
These constructions form the basis of an algorithm which is functionally identical to, but intractably slower than, our algorithm.
A. Computational Model
Our algorithm should be considered in the context of a simplified processor. Our idealized processor has instructions taking exactly one cycle to complete. All memory accesses complete in one cycle. There is no pipeline. There is no preemption.
Our computational model is event-driven, where code is executed only in response to these events. For the network virtualization application, the event is packet arrival.
Each block of code must complete within some number of cycles, known at admission time. Cycles may not be “saved” from one call to the next. The guarantee we must enforce is that, from the time the code is called to the time the code returns control, it consumes no more than the cycle count, called the budget.
Finally, we require the developer to add a “time-exceeded” exception handler to her code. The exception handler is required to adhere to strict coding guidelines which make static analysis simple and easy.
The requirements of our model are sufficient, but not strictly necessary. Our algorithm continues to work so long as at every node we examine, we carry all of the information necessary to determine the total execution time of every path beginning at that node. For example, suppose we have a memory cache. The execution time of subsequent instructions will depend upon the contents of the cache, which can be derived from prior instructions, memory layout, and the behavior of the cache. Our model is chosen to simplify this information as much as possible.
B. Path Enumeration
Our input to the algorithm consists of an assembly level representation of the program. From this, we can develop a Control Flow Graph (CFG) of the program, in which outgoing edges are labeled by the execution time required for the corresponding program segments. Our objective is to derive a new CFG that executes the same sequence of instructions for program executions that complete within a specified time bound \( B \), while terminating in an exception handler for program executions that exceed the budget \( B \).
The conceptual starting point for this construction is the creation of a Control Flow Tree (CFT) from the CFG. The CFT duplicates nodes in the CFG as necessary, in order to convert the graph into a tree.
See Figure 1 for an example. Nodes \( S \) and \( T \) are dummy nodes used to delineate entry and exit points, and contain no actual code. Similarly, in the CFT, \( T_1 \rightarrow T_4 \) are copies of the dummy node \( T \) and contain no code.
Code generated from the CFT is functionally identical to the original CFG. If the length of the path from the root node to a node \( u \) in the tree exceeds \( B \), then we can replace the subtree rooted at \( u \) with an exception node, representing a jump to the exception handling routine. As an additional step, if after applying this step, the CFT contains a subtree whose leaves are all exception nodes, we can replace the entire subtree with an exception node.
This pruning procedure is illustrated on Figure 1. Let us consider a budget of 10 cycles. While it would be valid to execute the path \( A \rightarrow C \rightarrow D_2 \rightarrow F_2 \rightarrow G_4 \) before aborting to the exception handler, it is clear that any execution path reaching \( F_2 \) will go over budget. Our earliest chance to raise the exception is by intercepting the branch instruction at \( D_2 \), with the result shown in Figure 2.
We refer to the tree constructed in this way as the \( B \)-bounded execution tree of the original control flow graph. We note that such a tree can be defined relative to any node \( u \) in the CFG and we let \( bxt_B(u) \) (or generally, BXT) denote this execution tree.
While one could generate a version of the original program directly from the BXT, this typically results in an excessive amount of code duplication. We can dramatically reduce the amount of code duplication by merging equivalent subtrees of the BXT in a systematic way.
C. Code Duplication Reduction
The BXT typically contains many subtrees that are identical to one another and can be merged. To make this precise, we define two nodes \( u_1 \) and \( u_2 \) in the BXT to be equivalent if they were derived from the same node \( u \) in the original CFG.
(that is, they represent copies of the same original program segment). Two subtrees of the BXT are equivalent if they are structurally identical and all of the corresponding node pairs are equivalent. We can merge any pair of equivalent subtrees without changing the set of executions, yielding a bounded execution graph (BXG) equivalent to the BXT. Conceptually, the merging is performed in a top down fashion. That is, if \( u_1 \) and \( u_2 \) are roots of equivalent subtrees, we merge them so long as there are no ancestors \( v_1 \) of \( u_1 \) and \( v_2 \) of \( u_2 \) that are also roots of equivalent subtree. The merging process continues, as long as there are equivalent subtrees that can be merged.
Returning to our example, nodes \( D_1 \) and \( D_2 \) cannot be merged because their child execution trees are different. \( D_1 \) has children \( E_1 \) and \( F_2 \); \( D_2 \) has children \( E_2 \) and \( X \). However, the subtrees rooted at \( E_1 \) and \( E_2 \) are identical. There is no need to retain both trees. Instead, we can merge them into a single subtree. Even further, the tree rooted at \( G_2 \) is identical to the subtrees rooted at \( G_1 \) and \( G_3 \). We can also merge the \( G_2 \) node with the \( G_1/G_3 \) node from the \( E_1/E_2 \) execution tree. See Figure 3.
In contrast to the massive code duplication in the BXT, in the BXG only one node (\( D \)) needed to be duplicated.
D. Intervals
While one can derive the BXG by explicitly constructing the BXT and then merging nodes, there is a more efficient dynamic programming procedure that can be used to construct the BXG directly. This procedure is based on the observation that the structure of a BXT subtree with root node \( u_1 \) is a function of just two things – the node \( u \) in the original CFG from which \( u_1 \) was derived and the amount of available execution time that remains after execution has reached \( u_1 \). If the cost of the path from the root to \( u_1 \) is \( p \), then the remaining execution time is \( B - p \) where \( B \) is the overall bound. We note that the BXT subtree with root \( u_1 \) is \( bxt_{B - p}(u) \). So two nodes \( u_1 \) and \( u_2 \) derived from the same CFG node \( u \) will have identical subtrees if the costs of their paths from the root are identical.
We can extend this notion to path costs that are “close.” Given nodes \( u_1 \) and \( u_2 \) derived from \( u \), with path costs from the root of \( p \) and \( q \) respectively, they will have identical subtrees if \( bxt_{B - p}(u) \) and \( bxt_{B - q}(u) \). This will be true for values of \( B - p \) and \( B - q \) that are “close enough” in a certain sense.
For each node \( u \) in the original CFG, the dynamic programming procedure produces a partition on the integers corresponding to a partition on subtrees. Two values \( i \) and \( j \) fall in the same equivalence class of the partition if and only if \( bxt_i(u) = bxt_j(u) \). Using these partitions, we can construct the BXG directly from the CFG, without having to explicitly construct the BXT.
As we prove in section IV, this partition of the integers falls into contiguous ranges from a minimum value to a maximum value, and including all values between. For our algorithm, we refer to these partitions of the integers as intervals, and use these as the basis for a memoization scheme.
III. THE ALGORITHM
Before we formally present the algorithm there are several preliminary details to define.
First, we assume that the code has already been read into a CFG with \( S \) and \( T \) nodes. \( \nu(u) \) represents the cycle cost to traverse node \( u \), represented as an outgoing edge weight in our CFGs.
We also assume that we have an INTERVAL data type. We represent each INTERVAL as a pair \([a, b]\) where \( a < b \). Each INTERVAL is treated as the set \( \{x|a \leq x \leq b\} \), with the usual definitions for intersection, subset, overlapping INTERVALS, disjoint INTERVALS, and element predicate (\( \in \)). We define scalar addition on an INTERVAL as \([a, b] + x = [a + x, b + x] \). Finally, we define the null INTERVAL as the empty set.
Given the INTERVAL type, we define an INTERVAL search object with two functions.
- **INTERVAL function insert(vertex v, INTERVAL i)**
Adds a tuple \(<v, i>\) to the search object; returns the INTERVAL.
There are two types of complexity that matter for this algorithm. First, we have the computational complexity of the algorithm. Second, we have the spatial complexity of the generated code.
This algorithm is intended for static analysis of program code submitted for admission. The algorithm will run once at admission time and then (if admitted) never again. Thus, while we need the computational complexity to be feasible, we consider spatial complexity to be the more important factor.
1) Computational Complexity: We can associate each recursive call with an edge in the CFG. Let us examine the algorithm in terms of the number of recursive calls per edge.
For us to insert a vertex-interval pair, we must reach the vertex by a series of bxg calls. Since the remaining cycles $R$ is monotonically non-increasing from $B$, and we have at most one negatively-lower-bounded interval at each vertex, there are at most $O(B)$ intervals associated with each vertex. We only make recursive bxg calls along outgoing edges on the first failure to find an interval in the interval search object. (Thereafter, the interval will be present.) Therefore, we can make at most $O(B)$ recursive calls along each outgoing edge.
Next, let us examine the number of operations per recursive call. We have two non-constant operations per call—a single search of the interval search object, and a possible single insert into the object. Both can easily be implemented as $O(\log B)$ operations using a standard interval search tree associated with each vertex [3].
By an aggregate analysis over edges, we have $O(mB)$ recursive calls, using $m$ for the number of edges. Each takes $O(\log B)$ time, for a total computational complexity of $O(mB \log B)$.
2) Spatial Complexity: Spatial complexity of the emitted code for a vertex in the CFG depends upon three factors: the number of paths from $S$ to the vertex, the number of paths from the vertex to $T$, and the budget $B$.
At each vertex, we emit duplicated code corresponding to each interval that is both present and reachable from the source $S$ by paths of cost no more than $B$.
Individual budget values at vertex $u$ are divided into equivalence classes by the weight of each path from $u$ to $T$. More precisely, we have exactly one interval present for each path from $u$ to $T$ of distinct cost, plus one for exceptions. Therefore, the number of paths of distinct cost from $u$ to $T$ forms an upper bound on the number of intervals that may be present at $u$.
Each interval corresponds to some number of cycles remaining at this point in the CFG. For an interval to be emitted, it must be reachable: there must be a path $\rho$ from $S$ to that vertex such that $B - w(\rho)$ is within the interval. Therefore, the number of paths of distinct cost from $S$ to each vertex is an upper bound on the number of emitted intervals.
As a direct consequence of this and the monotonically non-increasing budget, the number of intervals we emit is upper-bounded by $B$.
Thus, our spatial complexity is upper bounded by the minimum of three factors: the number of paths to $T$, the number of paths from $S$, and the budget $B$.
C. Natural Extensions
There are two natural extensions of this algorithm that bear mention.
1) Variable Budgets: Our context of network virtualization is event-driven by packet arrival. Performance guarantees are missed in the networking context when packets have arrived at the inputs but are unable to be processed fast enough to forward them to the output at line rate, resulting in output
underflow and queuing. If the problem is persistent, packets will be lost. In the case of a shared processor, there is no way to guarantee that discarded packets belong to the offending code.
However, not all packets are the same size. Since a larger packet will take more time at the output, we have more time for processing. Fortunately, our model can be easily extended to cope with this situation without changing the algorithm.
Let us take our example of Figure 1 and extend it to handle packets of two sizes, with cycle budgets of 6 and 10 respectively. We can do this by adding code at the beginning of the CFG to check the length of the packet and jump to the appropriate starting point for this length.
See Figure 5. Here we have the modified CFG. Our new start node, $S$, contains the code to check the packet length and branch to $S1$ for short packets and $S2$ for the long packets. $S1$ and $S2$ do not actually generate code, but are entered into our CFG as if they cost 4 and 0 cycles, respectively. To analyze the CFG, we simply call $bxt(S, 10)$ as usual, resulting in Figure 6. The algorithm is unaware that no long packets will reach $S1$, but the semantic knowledge is unnecessary. It could serve to reduce the code duplication, of course.
Variable budgets are not free. We do have a small constant cost in the test-and-branch for budget selection. Since we increase the number of early branches, this also serves to drive up duplication of nodes. Nevertheless, for packet processing this is a worthwhile investment.
2) Notify and Continue: We consider it worthwhile in our problem context to consider a modification to the paradigm of partial admission. We currently view the exception handler as an abortion of the code block. However, we could use the exception handler to register a notification that we went over our budget, then continue execution.
This requires a modification to our algorithm. Presently, our algorithm prunes away all subtrees that go over budget. To notify and continue, we would need to return to the flow of execution. To incorporate this notion, we would need to modify the algorithm to add an outgoing edge from the exception handler back to the node we pruned, with an unbounded budget.
Using completely unbounded budgets also requires that our algorithm be adjusted to deal with loops as a special case. Because nodes that sit along paths containing cycles may have an unbounded number of intervals, we would need to explicitly recognize that an unbounded budget forms a special interval, and to handle this separately.
IV. PROOFS OF CORRECTNESS
Our proofs of correctness proceed as follows. First, we present a rigorous treatment of the constructions from CFG to CFT, BXT, and BXG. Next, we prove the key properties of the constructions. Finally, we demonstrate that our dynamic programming algorithm creates our BXG and therefore has all of our required properties.
A. Bounded Execution Subtrees
While it is conceptually clear to proceed from the CFG to the CFT and thence to the BXT, this is mathematically inconvenient. In the case of a cyclic CFG, the depth of the corresponding CFT is unbounded. We prefer to work within the domain of finite graphs. Therefore, we proceed directly from the CFG to the BXT.
Given a CFG $G = (V, E, s, t, w)$ where $V$ is the set of vertices, $E$ is the set of directed edges connecting these vertices, $s$ is our source vertex, $t$ is our sink vertex, and $w$ is a weight function over edges, we construct a BXT $T$ from $G$ as follows.
Initialize $T$ to have a single vertex $r$ and assign a label $\lambda(r) = s$. For any node $u$ of $T$, let $p(u)$ be the path from $r$ to $u$, and extend the weight function $w$ to paths in the natural way. Repeat the following step as long as possible.
Select a leaf $u$ of $T$ with $\lambda(u) \neq t$ and $w(p(u)) \leq B$. Let $v = \lambda(u)$ and let $v_1, \ldots, v_k$ be the successors of $v$ in $G$. Add nodes $u_1, \ldots, u_k$
to $T$ with edges $(u, u_i)$ and let $\lambda(u_i) = v_i$ and $w(u, u_i) = w(v, v_i)$.
This intermediate construction is the CFT up to and just over the budget frontier. That is, we continue to build on our paths until all leaves $u$ (and only leaves) are either over budget or correspond to $t$. We convert this to our BXT by the following pruning steps.
For all nodes $u$ in $T$ with path $\rho = r \leadsto u$ and $w(\rho) > B$, let $\lambda(u) = X$, where $X$ is a new label denoting “exception.”
Call a subtree of $T$ open if it contains a node $u$ with $\lambda(u) = t$. Otherwise call it closed. For every node that is the root of a closed subtree and whose parent is not, prune the subtree and let $\lambda(u) = X$.
The tree $T$ obtained in this way is called $bxt_B(s)$. Since the construction can be applied equally well to any node $u$ in $G$ with any non-negative budget $B$, we can always use $bxt_B(u)$ to refer to any subtree of the BXT rooted at $u$, so long as we adjust the budget $B$ appropriately.
The $bxt_B(s)$ has four important characteristics: completeness, boundedness, termination, and equivalent functionality.
**Theorem 1** (Completeness). If $G = (V, E, s, t, w)$ is a CFG and $T = bxt_B(s)$ is the corresponding BXT, then all paths $s \leadsto t$ with cost less than $B$ have corresponding paths in $T$.
**Proof:** Suppose there were a path $\rho$ in the CFG with $w(\rho) \leq B$ but no corresponding path in $T$. Without loss of generality, let $\rho$ be the shortest such path, and let $\rho = s \leadsto u \leadsto v$. Then $\sigma = s \leadsto u$ must be in $T$. However, we have an available construction step from node $u$, so our construction was incomplete. Thus, $T$ cannot be $bxt_B(s)$.
**Theorem 2** (Boundedness). If $G = (V, E, s, t, w)$ is a CFG and $T = bxt_B(s)$ is the corresponding BXT, then no path in $T$ has cost greater than $B$.
**Proof:** Suppose there were a path $\rho$ in $T$ with $w(\rho) > B$. Without loss of generality, let $\rho$ be the shortest such path, and let $\rho = s \leadsto u$. If $u$ has no descendants $v$ with $\lambda(v) = t$, then the subtree rooted at $u$ is closed and should have been pruned during the pruning phase. Alternatively, suppose $u$ does have descendant $v$ with $\lambda(v) = t$. Because path costs are monotonically non-decreasing, we know that the cost from $r$ to $v$ also exceeds $B$. Then it should have been relabeled to $\lambda(v) = X$ during the relabeling phase of the pruning step. In either case, $T$ could not have been $bxt_B(s)$.
**Theorem 3** (Termination). If $G = (V, E, s, t, w)$ is a CFG and $T = bxt_B(s)$ is the corresponding BXT, then all paths in $G$ that exceed the budget $B$ have a corresponding truncated subpath in $T$ terminating at exception node $X$.
**Proof:** From the construction, we know that construction continues until all leaves $u$ and $\lambda(u) = t$ or $p(u) > B$. Thus, for each path in $G$ that exceeds the budget $B$, there is a corresponding subpath in $T$ that runs beyond the budget frontier.
During the relabeling step of the pruning stage, all nodes $u$ over the budget are relabeled to $\lambda(u) = X$. Since these are all leaf nodes, they represent the roots of closed subtrees. Since the pruning stage can never open a subtree once closed, the corresponding truncated subpath will always terminate in an exception, although it may be further truncated.
**Theorem 4** (Equivalent Functionality). If $G = (V, E, s, t, w)$ is a CFG and $T = bxt_B(s)$ is the corresponding BXT, then all paths $r \leadsto u$ in $T$ with $\lambda(u) \neq X$ have labels that correspond directly to paths in $G$.
**Proof:** By our construction, no path enters $T$ without coming from a corresponding path in $G$, and labels are retained pointing back to the original nodes in $G$. Since the pruning phase only relabels to $X$, and completes with only leaf nodes relabeled, all labels on safe subpaths in $T$ are retained.
We can use $T$ to create a program that is functionally equivalent to the parts of $G$ that stay under budget but which is guaranteed to finish within budget (either at $t$ or at $X$). For each vertex $u$ in $T$, we generate code equivalent to $\lambda(u)$ from the original CFG. E.g., see figure 2.
**B. Bounded Execution Flow Graphs**
The BXT construction repeats many code segment unnecessarily. We can generate a more compact program by merging identical subtrees in $T$ to produce a new CFG, the Bounded Execution Flow Graph, $bxt_B(s)$, or BXG.
We first define our notion of equivalent subtrees. Let $T$ be a $bxt_B(s)$ of some CFG. Let there be two nodes $u$ and $v$ in $T$ with children $u_1, \ldots, u_i$ and $v_1, \ldots, v_j$, respectively. We consider the subtrees rooted at $u$ and $v$ to be identical when $\lambda(u) = \lambda(v)$ and all subtrees rooted at corresponding children are also identical.
We begin creating our BXG $G$ from $T$ by copying $T$ completely. Next, we repeat the following step as long as possible.
Select nodes $u$ and $v$ from $G$ where the subtrees rooted at $u$ and $v$ are equivalent and their parent nodes are not. Merge these subtrees as follows.
Prune $v$ and all descendant nodes from $G$. For each node we prune, if there is a parent not in the subtree, replace the incoming edge with an edge to the corresponding node in $u$.
This construction retains all four properties of the BXT: Completeness, Boundedness, Termination, and Equivalent Functionality. Since each property relies upon the (downward) structure of the subtree rooted at each node, and these structures have not changed, no properties have been lost.
In the case of Completeness, no subtrees have been pruned without re-pointing the incoming edges at an equivalent subtree. This applies to all paths, not just paths under budget.
In the case of Boundedness, no paths have been lengthened (or shortened). Thus, if $T$ were properly bounded, so is $G$.
In the case of Termination, no paths have been lengthened and no nodes have been relabeled.
For Equivalent Functionality, the equivalence property of subtrees depends upon identical labels.
C. Correspondence to Algorithm
Proving that our algorithm corresponds to this construction requires demonstrating several properties of our intervals.
1) Intervals: We prove two properties of intervals to assist in proving that the algorithm corresponds to the BXG construction.
**Theorem 5.** Given a BXT $T$ generated from CFG $G$, consider two nodes $u$ and $v$ in $T$ with $\lambda(u) = \lambda(v)$ and identical subtrees. Let $p(u)$ be the path from the root $r$ to $u$ and $p(v)$ be the path from root $r$ to $v$. Let $i = B - w(p(u))$ and $j = B - w(p(v))$ be the remaining cycles at $u$ and $v$, and assume without loss of generality that $i \leq j$. If there is a third node $z$ with $\lambda(z) = \lambda(u)$, path $p(z)$ from root $r$ to $z$, and $i \leq B - w(p(z)) \leq j$ then the subtree rooted at $z$ is also identical.
**Proof:** Consider the (unbounded) CFT we could generate from any node $u$ in $T$, consisting of the collective enumerations of the (possibly infinite number of) paths from $\lambda(u)$ to $t$ in $G$. For each path $\rho_i$ there is a corresponding weight $w(\rho_i)$. This weight does not depend on the incoming budget to $u$ of $B_u = B - w(p(u))$.
We can order these paths as $\rho_1, \ldots, \rho_k$ where $w(\rho_1) \leq \cdots \leq w(\rho_k)$. If we reduce the incoming budget $B_u$ of $\text{bxt}_{B_u}(u)$, we will be forced to relabel and prune those leaves where $B_u - w(\rho_i) < 0$. Since our path weights and the ordering are independent of the incoming budget $B_u$, if we relabel and prune $\rho_i$ then we will also relabel and prune all paths $\rho_j$ with weight $w(\rho_i) \leq w(\rho_j)$.
Now, given $u, v$ known to be the roots of identical subtrees with corresponding cycles remaining $i = B - w(p(u))$ and $j = B - w(p(v))$, $i \leq j$, we know that the leaves of these subtrees have identical labels. Suppose there were some subtree rooted at $z$ with $i \leq B - w(p(z)) \leq j$ and subtree differing from the one rooted at $u$. Since we know that decreasing the available cycles can never admit additional paths, and $w(p(u)) \geq w(p(z))$, we know that the subtree rooted at $u$ has fewer paths to the sink under budget.
Let us consider these subtrees after relabeling, but before pruning of closed subtrees. To differ from the subtree rooted at $u$, the subtree rooted at $z$ must have some leaf at the end of a path $\rho_k$ with a label differing from the corresponding leaf in the subtree rooted at $u$ where $B - w(p(z)) - w(\rho_k) \geq 0$ but $B - i - w(\rho_k) < 0$. But because $B - w(p(z)) \leq j$, if $B - w(p(z)) - w(\rho_k) \geq 0$ then $B - j - w(\rho_k) \geq 0$ as well. This implies that $u \neq v$, contradicting our original assumptions.
Consequently, for each node $u$ the integers from 0 to $B$ can be divided into subranges such that $i$ and $j$ are in the same subrange if and only if $\text{bxt}_i(u) = \text{bxt}_j(u)$. We can represent these subranges as intervals $[i, j]$ where $i \leq j$.
For each budget $B$ and node $u$, there exists a maximal interval $[i, j]$ such that there is no value $k$ not within this interval where $\text{bxt}_i(u) = \text{bxt}_k(u) = \text{bxt}_j(u)$.
**Theorem 6.** Given a budget $B_u = B - w(p(u))$, a node $u$ with $\lambda(u) \neq t$ and $k$ children $u_1, \ldots, u_k$ and known maximal intervals for each child as $[i_1, j_1], \ldots, [i_k, j_k]$ such that for each $x$th child, $B - w(u_{x, u}) \in [i_x, j_x]$, we can compute the corresponding maximal interval for the parent node $u$ as the intersection of the child intervals, each shifted upward by $w(u_{x, u})$. That is:
$$[i_u, j_u] = \bigcap_{x=1}^{k} ( [i_x, j_x] + w(u_{x, u}) )$$
is the maximal interval at $u$ containing $B_u$.
**Proof:** We use the same construction as in Theorem 5. There are paths from $\lambda(u)$ to $t$ in $G$. However, these paths consist of the union of all paths from $\lambda(u_x)$ to $t$ in $G$ with $\lambda(u)$ prepended. Let us denote the paths from $u_x$ to our sink as $\rho_{xy}$. Thus, given the weight of paths $\rho_{xy}$ for $u_x$ as $w(\rho_{xy})$, the weight of the corresponding paths from $u$ are $w(u_{x, u}) + w(\rho_{xy})$. This accounts for the upward shift by $w(u_{x, u})$.
Also as in Theorem 5, these paths can be ordered independently of $B_u$. Given interval $[i_x, j_x]$ for child $u_x$, we know that these limits represent the budget points where for some $y$, $B - w(p(u_{x,y})) - w(\rho_{xy})$ changes sign. (Increasing beyond $j_x$ will cause a negative value $B - w(p(u_{x,y})) - w(\rho_{xy})$ to become zero; decreasing below $i_x$ will cause a positive value $B - w(p(u_{x,y})) - w(\rho_{xy})$ to become negative.)
Since we are given that the upwardly shifted intervals are overlapping, we know that there is some value $B_u$ contained within each shifted interval. That is, $B_u \in [i_x, j_x] + w(u_{x, u})$ for all $x$. If we sort the paths $\rho_{xy}$ in weight order, there will be some smallest value greater than $B_u$ selected from the $i_x + w(u_{x, u})$ values, and some greatest value less than $B_u$ selected from the $j_x + w(u_{x, u})$ values. These are the endpoints where the most sensitive path changes state. This is the very definition of interval intersection.
Finally, by definition, the intervals for node $u$ with $\lambda(u) = t$ are $[-\infty, -1]$ and $[0, \infty]$. That is, we completed under budget if we reached $t$ without going negative on cycles.
Using these maximal intervals, we can merge all subtrees with remaining budgets $B$ within the same interval.
2) Algorithmic Correctness:
**Theorem 7 (Algorithm computes maximal intervals).** Given a CFG $G = (V, E, s, t, w)$ and budget $B$, our algorithm generates only maximal intervals.
**Proof:** Given node $u$ and remaining budget $R$, our algorithm first looks to see if we already have node $u[i, j]$ such that $R \in [i, j]$. This is our dynamic programming step and exists only for optimization. We may ignore it in our proof.
Next, we check to see if we’re over budget (pruning step). Suppose we will exceed our budget. The pre-computed shortest path values allow us to immediately compute the maximal interval without examining the children. First, we know that the subtree rooted at $u$ is closed, as all paths to the sink are longer than our remaining budget $R$. A closed subtree has an interval unbounded on the left. Finally, we have the shortest
path weight $\delta(u, T)$, which tells us at what value of $R$ the first path becomes admissible. This provides the upper bound on the maximal interval.
Suppose we’re not over budget. We also check the basis step of our recursive definition in Equation 1. If we match the sink ($\lambda(u) = t$), we can compute the result directly from the basis.
Finally, if we don’t have a shortcut, we follow Equation 1. Since our pruning step computes maximal intervals directly, and our basis step does the same, all we have left is our recursive step. By Theorem 6, this also computes the maximal interval for $u$.
Therefore, all intervals computed will be maximal.
**Theorem 8 (Algorithm computes only necessary intervals).** Given a CFG $G = (V, E, s, t, w)$ and budget $B$, our algorithm only computes intervals reachable from the source within our budget.
**Proof:** Our algorithm proceeds in depth-first search from $s$, and therefore only visits those vertices reachable from $s$. Since the cycles remaining is decremented appropriately at each recursive call, we also only investigate those intervals we can actually reach.
**Theorem 9 (Algorithm Correctness).** Given a CFG $G = (V, E, s, t, w)$ and budget $B$, our algorithm generates $bxg_B(s)$.
**Proof:** Follows automatically from Theorems 7 and 8.
V. PERFORMANCE
We have implemented this algorithm and tested it on a variety of CFGs and budgets.
A. Synthetic CFGs
Our synthetic CFGs were generated by a series of vertex substitutions that parallel grammar production rules in a C-like language. For our acyclic CFGs, we include simple statements, if, if-then-else, and switch/case statements. For our cyclic CFGs, we added while, do/while, and for loops. In both cases, the typical size of the synthetic input CFG was roughly double the size of the largest packet processing code block we have seen in our router virtualization efforts, and quadruple the target size for a typical code block.
Examine Figure 7. This represents the results of running the algorithm on 1000 different acyclic synthetic CFGs. We show the resulting distribution of the maximum code duplication factor required for each synthetic CFG over all possible budgets. The vast majority (82%) require a maximum duplication factor from 1–2, with an average maximum of 1.6. Large duplication factors are actually very rare; one pathological case required a duplication factor of 23.5. Subsequent analysis of this example showed that it was composed almost exclusively of a series of nested switch/case statements.
The results on cyclic CFGs are uninteresting and omitted. While the algorithm works on cyclic CFGs, it works by implicitly unrolling the loop to the limit of the budget. Thus, the code duplication factor is bounded only by the budget. As expected, in simulation the code duplication factor for cyclic graphs is linear in the budget.
B. Real CFG: IPv4 Header Rewriting
For a real CFG, we used the code that rewrites the IPv4 header for next-hop forwarding. This consists of 180 instructions, designed to run at over 5 Gbps on our virtualized router.
See Figure 8. The real CFG necessitated some minor modification to the algorithm to deal with pipeline stalls due to unfilled deferral slots.
At very small budgets, the algorithm actually generates less code than the original CFG. This is due to pruning when the budget is too low for this code block. That is, so many paths are pruned that many vertices are never emitted at all. For most application code, this represents a serious developer error and would be reported as such. It is simple for our algorithm to report when certain paths are never admitted, and we implemented this in our experimental version.
Above 108 cycles, we reach the maximum length path of the CFG. At this point, all paths are admissible and no duplication is necessary. The original CFG is accepted with no modification.
A suitable budget for 5 Gbps would be 170 cycles. Clearly, we are under 170. For 10 Gbps we need 85 cycles. The IPv4 header format code is not currently able to achieve 10 Gbps, as the chart makes obvious. Even worse, 85 cycles is the peak of our code duplication, at 296 instructions. This still yields a duplication factor of only 1.64, well in line with our synthetic cases.
VI. RELATED WORK
The major competing technology is WCET analysis using mixed integer programming [4]. This differs from our work in that it makes no effort to solve the code emission problem, and requires that we trust the developer to provide semantic information on branch constraints.
Our problem is different. We need to accept and handle untrusted code in a shared environment. Thus, we must derive any semantic information from the program, not the developer. In the absence of programmer specific semantic information, we can re-write programs to create provably safe CFGs via code duplication.
We also note that the decision to use integer programming to solve the WCET problem was because the developers considered explicit path enumeration infeasible. This fails to consider the possibilities of dynamic programming.
for (i=0; i<100; i++) {
if (rand() > 0.5) j++;
else k++;
}
Fig. 9. “Difficult” WCET analysis for explicit path enumeration
Consider the code snippet in Figure 9. The argument is that this snippet contains $2^{100}$ possible paths, and that to enumerate them all is simply impractical. However, using a dynamic programming approach with loop bounds, we can determine WCET for this snippet in linear time.
Another approach which bears discussion is Proof Carrying Code [5]. In this approach, the developer generates a proof of the correctness of the block of code which can be validated automatically at load time. This approach could be very promising for our problem context. However, it places the burden of generating this proof squarely on the shoulders of the developer. We prefer to allow the developer as much freedom as possible, and generate our own proofs of correctness.
VII. FUTURE WORK
Our current implementation of the algorithm does not yet perform emission, nor does it incorporate a parser to accept real-world code. This is our current developmental priority, and requires addressing a number of “real-world” issues we neglect in our theoretical version.
A. Real World Details
First, we have neglected the problem of control flow fallthrough. When a branch is reached, we can re-write the target address. The other side of the branch will simply fall straight through. In consequence, only one block can fall through to another block. To have multiple blocks fall through to the same target requires additional changes.
For cases where paths are not close to budget values, we can simply insert a jump instruction. When this is not practical, we can continue to duplicate code until we reach a point where we can merge the paths. For the IXP architecture, this is likely to be soon—a vacant pipeline deferral slot provides the single slack cycle we need.
Next, the IXP architecture supports asynchronous memory access to allow developers to hide memory latencies. In practical development, both a compute budget and a memory latency budget must be maintained and respected. Adding this functionality to the algorithm appears to be straight-forward, but the impact on code duplication must be examined.
The IXP is a heavily multithreaded environment. In our studies, we have only considered applications with no inter-thread dependencies in packet processing. Higher, trusted layers ensure in-order packet forwarding, but processing code has never required inter-thread dependencies. This assumption is naive and needs to be examined. It may be possible to construct a multi-threaded model of our CFG, analogous to the work with WCET analysis in [6].
Finally, this algorithm only applies to CFGs. A function call has no place in a CFG. Most heavily optimized, high-speed networking code inlines all functions for speed. In these cases, the code represents a CFG. However, for code that does not inline, we have control flow that cannot adequately be represented in a CFG. One approach would be to implicitly inline the function calls and analyze normally; then use a new merge rule to combine inlined function code when possible.
B. Improvements
We have also identified additional ways to reduce duplication. One immediate gain can be made by noting duplicated paths that contain no safe paths “close” to the budget. We can merge these paths by adding runtime checks that lengthen safe paths but do not actually push them over the budget. One possible way to reduce the expense of the runtime check is inspired by Ball and Larus [7], who developed single-counter methods for tracking execution paths through a CFG and applied those to optimize the “hot” paths. In our work, we are interested in using the same techniques to differentiate safe vs. unsafe paths.
Much greater gains can be made by extracting semantic information from the code itself. If we have complete semantic information, we can avoid path enumeration for impossible paths in the CFG. The problem becomes a limited, finite form of the Halting Problem: does this code, when started with any of the possible inputs, halt within $B$ cycles? Any finite form of the Halting Problem is decidable.
We believe that a data flow framework solution is appropriate. With explicit path enumeration, we can solve the constant propagation problem to completion over branch conditions. This would allow us to deduce loop iteration bounds, mutually exclusive paths, and even unreachable code.
We consider this the most important area for additional study. The current state of the algorithm allows duplication to stand in lieu of semantic knowledge. Code that is semantically safe but unsafe in the CFG can be admitted by rewriting the code to guarantee that the unsafe but semantically impossible paths are never taken. With a complete semantic analysis, we would never need to strip those paths, and our code duplication would be reserved for those cases where a genuinely unsafe path is included.
In our application of event-driven, tight budget real-time guarantees, this line of research is very promising. The number of input values to examine is limited by the paucity of available cycles for reading data from memory. We know that our constant propagation will never need to deal with more than a few dozen values, because any code that examines more than this will be over budget due to memory latencies.
VIII. CONCLUSION
In this paper, we have introduced a new technique for partial program admission. We have demonstrated that dynamic programming can be used to render explicit path enumeration eminently feasible. The same construction can be used to emit a modified CFG that meets event-drive real-time guarantees.
This method shows great promise in the realm of network virtualization. Other applications in similar fields may be equally promising.
REFERENCES
|
{"Source-Url": "https://openscholarship.wustl.edu/cgi/viewcontent.cgi?article=1234&context=cse_research", "len_cl100k_base": 11071, "olmocr-version": "0.1.50", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 45878, "total-output-tokens": 12281, "length": "2e13", "weborganizer": {"__label__adult": 0.0004451274871826172, "__label__art_design": 0.00034809112548828125, "__label__crime_law": 0.0005121231079101562, "__label__education_jobs": 0.0005064010620117188, "__label__entertainment": 9.16123390197754e-05, "__label__fashion_beauty": 0.0002028942108154297, "__label__finance_business": 0.00031685829162597656, "__label__food_dining": 0.00044655799865722656, "__label__games": 0.0010271072387695312, "__label__hardware": 0.0029354095458984375, "__label__health": 0.0008196830749511719, "__label__history": 0.0003440380096435547, "__label__home_hobbies": 0.00016260147094726562, "__label__industrial": 0.0006070137023925781, "__label__literature": 0.00026416778564453125, "__label__politics": 0.000331878662109375, "__label__religion": 0.0005869865417480469, "__label__science_tech": 0.0703125, "__label__social_life": 7.748603820800781e-05, "__label__software": 0.006275177001953125, "__label__software_dev": 0.91162109375, "__label__sports_fitness": 0.0005044937133789062, "__label__transportation": 0.00106048583984375, "__label__travel": 0.0002956390380859375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 48587, 0.00931]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 48587, 0.38342]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 48587, 0.90808]], "google_gemma-3-12b-it_contains_pii": [[0, 814, false], [814, 1676, null], [1676, 6994, null], [6994, 11893, null], [11893, 16271, null], [16271, 19840, null], [19840, 23819, null], [23819, 29854, null], [29854, 36370, null], [36370, 40087, null], [40087, 45679, null], [45679, 48587, null]], "google_gemma-3-12b-it_is_public_document": [[0, 814, true], [814, 1676, null], [1676, 6994, null], [6994, 11893, null], [11893, 16271, null], [16271, 19840, null], [19840, 23819, null], [23819, 29854, null], [29854, 36370, null], [36370, 40087, null], [40087, 45679, null], [45679, 48587, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 48587, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 48587, null]], "pdf_page_numbers": [[0, 814, 1], [814, 1676, 2], [1676, 6994, 3], [6994, 11893, 4], [11893, 16271, 5], [16271, 19840, 6], [19840, 23819, 7], [23819, 29854, 8], [29854, 36370, 9], [36370, 40087, 10], [40087, 45679, 11], [45679, 48587, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 48587, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
6493b32a4d245677dc8b71dc9ec6a602e9f9a7db
|
Chhabra, Tamanna; Faro, Simone; Külekci, M. Ouzhan; Tarhio, Jorma
Engineering order-preserving pattern matching with SIMD parallelism
Published in:
SOFTWARE-PRACTICE AND EXPERIENCE
DOI:
10.1002/spe.2433
Published: 01/05/2017
Please cite the original version:
This material is protected by copyright and other intellectual property rights, and duplication or sale of all or part of any of the repository collections is not permitted, except that material may be duplicated by you for your research use or educational purposes in electronic or print form. You must obtain permission for any other use. Electronic or print copies may not be offered, whether for sale or otherwise to anyone who is not an authorised user.
Engineering order-preserving pattern matching with SIMD parallelism
Tamanna Chhabra¹,*,†, Simone Faro², M. Oğuzhan Külekci³ and Jorma Tarhio¹
¹Department of Computer Science, Aalto University, Espoo, Finland
²Department of Mathematics and Computer Science, Università di Catania, Catania, Italy
³Informatics Institute, Istanbul Technical University, Istanbul, Turkey
SUMMARY
The order-preserving pattern matching problem has gained attention in recent years. It consists in finding all substrings in the text, which have the same length and relative order as the input pattern. Typically, the text and the pattern consist of numbers. Since recent times, there has been a tendency to utilize the ability of the word RAM model to increase the efficiency of string matching algorithms. This model works on computer words, reading and processing blocks of characters at once, so that usual arithmetic and logic operations on words can be performed in one unit of time. In this paper, we present a fast order-preserving pattern matching algorithm, which uses specialized word-size packed string matching instructions, grounded on the single instruction multiple data instruction set architecture. We show with experimental results that the new proposed algorithm is more efficient than the previous solutions. © 2016 The Authors. Software: Practice and Experience Published by John Wiley & Sons Ltd.
Received 12 December 2015; Revised 5 July 2016; Accepted 9 July 2016
KEY WORDS: SIMD; SSE; AVX/AVX2; order-preserving pattern matching
1. INTRODUCTION
Let x be a pattern of length m and y be a text of length n, over the alphabet Σ of size σ, and then, the exact pattern matching problem consists of finding all substrings in y, of length m, which are same as x. Such a problem is one of the most important subjects in the domain of text processing. There are many variations of the exact pattern matching problem. One of them is the order-preserving pattern matching (OPPM) problem [1–6]. Some solutions have been devoted to such a problem in recent years. Specifically, given a pattern x and a text y, whose characters are drawn from an alphabet Σ with a total order relation defined on it, OPPM consists in finding all the substrings of y with the same length and the same relative order as the pattern x. Typically, the text and the pattern consist of numbers.
For instance, given the pattern \(x = (34, 45, 30, 26, 33, 40)\), value 26 is the smallest number of the string, while the value 33 is the second smallest, and so on. Therefore, the relative order of the pattern is given by \((3, 5, 1, 0, 2, 4)\). Thus, \(x\) occurs in the text \(y = (12, 08, 14, 30, 40, 16, 13, 21, 33, 26, 23)\) at position 3, because \(x\) and the substring \(u = (30, 40, 16, 13, 21, 33)\) share the same relative order (Figure 1).
The OPPM problem finds applications in all those fields where someone is interested in finding patterns with the same relative order and not in finding patterns with identical values. For example, it can be applied to musical information retrieval and, in particular, matching melodies of two different musical scores [2]. It also finds applications in matching time series, like prices in stock markets [2] and weather data.
The first solution for solving the OPPM problem was presented in 2013 by Kubica et al. [1]. They presented a solution based on the Knuth–Morris–Pratt algorithm [7] and working in $O(n + m \log m)$ time. In the same year, Kim et al. [2] announced another solution to the problem also based on the Knuth–Morris–Pratt approach running in $O(n + m \log m)$ time. In 2013, Cho et al. [3] proposed a solution to the OPPM problem based on the Boyer–Moore approach [8] showing a sublinear behavior on average. In the same year, Belazzougui et al. [6] proposed an optimal sublinear algorithm with $O\left(\frac{n \log m}{m \log \log m}\right)$ average-case time complexity.
Another sublinear solution to the OPPM problem was presented in 2014 by Chhabra and Tarhio [4], based on a filtering approach. Specifically, their algorithm performs a conversion of the input strings to binary strings; later, the converted pattern is searched in the converted version of the text by using any standard algorithm for exact string matching. A verification procedure is then applied when a candidate occurrence of the pattern is found.
More recently, some solutions to the OPPM problem, based on the word RAM model of computation (see for instance [9, 10]), were presented. Such a model has been used in the last two decades to speed-up string matching algorithms. It consists in operating on computer words, reading and processing blocks of characters at once, so that usual arithmetic and logic operations on words can be performed in one unit of time.
Specifically, two online solutions to the OPPM problem were proposed by Chhabra et al. [5]. The online solutions use the single instruction multiple data (SIMD) architecture [11]. Two different SIMD instruction sets streaming SIMD extensions (SSE) and advanced vector extensions (AVX) are used to implement the solutions. Other solutions were also proposed in [12] and [13]. Faro and Külekci [13] presented two filtering approaches in which the original string is translated into a new string over large alphabets. This in turn increases the performance of the solutions as the number of match candidates decrease significantly. Later, Cantone et al. [12] proposed another efficient solution based on the Skip Search algorithm [14]. It computes the fingerprint of all substrings of a pattern of a given length. Thereafter, the fingerprints are indexed to obtain the match candidates, which are then located in the text. Cantone et al. used the SSE instruction set architecture for the computation of the fingerprint. The solutions were faster than the previous algorithms in many cases.
In this paper, we introduce an efficient and practical algorithm for the OPPM utilizing the SIMD extensions (SSE) technology [11, 15], and the algorithm is shown to be faster than the best algorithms known in the literature. The algorithm, named SIMD-OPPM, uses specialized packed instructions with a low latency.
The paper is organized as follows. In Section 2, we give preliminary notions and definitions in relation to the order-preserving matching problem. Then, we present our new solution in Section 3 and evaluate its performance against the previous algorithms in Section 4. Conclusions are drawn in Section 5.
2. NOTIONS AND BASIC DEFINITIONS
Following notations and terminology are used throughout the paper. A string $x$, of length $m > 0$, is represented as a finite array $x[0..m-1]$ of characters from a finite alphabet $\Sigma$ of size $\sigma$, and $x[i..j]$ will denote a factor (or substring) of $x$, for $0 \leq i \leq j < m$. We suppose that a total order relation ‘$\leq$’ is defined on the alphabet, so that we could establish if $a \leq b$ for each $a, b \in \Sigma$ and we denote by $|x|$ the length of $x$. We indicate with the symbol $w$ the length of the SIMD registers (= 128).
We say that two strings $x, y \in \Sigma^*$ are order isomorphic if the relative order of their elements is the same. In formal words, we give the following definition.
**Definition 1 (Order isomorphism.)**
Let $\Sigma$ be the alphabet, and let $x, y$ be two strings of the same length over the alphabet, and then, we say that $x$ and $y$ are order isomorphic and write $x \approx y$, if the following conditions hold
1. $|x| = |y|
2. $x[i] \leq x[j]$ if and only if $y[i] \leq y[j]$, for $0 \leq i, j < |x|
Similarly relative order can be more formally defined as follows:
**Definition 2 (Rank function.)**
Let $x$ be a string of length $m$ over an alphabet $\Sigma$. Then, the rank function of $x$ is a mapping $r : \{0, 1, \ldots, m-1\} \rightarrow \{0, 1, \ldots, m-1\}$ such that $x[r(i)] \leq x[r(j)]$ holds for each pair $0 \leq i < j < m$. If $x[r(i)] = x[r(i+1)]$ holds, then $r(i) < r(i+1)$.
In addition, we define the equality function of $x$, which indicates which elements of the string are equal (if any). More formally, we have the following definition.
**Definition 3 (Equality function.)**
Consider $x$ to be a string of length $m$ over an alphabet $\Sigma$, and let $r$ be the rank function of $x$. Then, the equality function of $x$ is a mapping $eq : \{0, 1, \ldots, m-2\} \rightarrow \{0, 1\}$ such that for each $0 \leq i < m$
$$
eq(i) = \begin{cases} 1 & \text{if } x[r(i)] = x[r(i + 1)] \\ 0 & \text{otherwise} \end{cases}$$
Let $r$ be the rank function of a string $x$, such that $m = |x|$, and let $eq$ be its equality function. It is easy to prove that $x$ and $y$ are order isomorphic if and only if they share the same rank and equality function, that is, if and only if the following two conditions hold
1. $y[r(i)] \leq y[r(i + 1)]$, for $0 \leq i < m - 1$
2. $y[r(i)] = y[r(i + 1)]$ if and only if $eq(i) = 1$, for $0 \leq i < m - 1$
**Example 1**
Let $x = (4, 6, 5, 1, 3, 6)$ and $y = (3, 7, 5, 1, 2, 7)$ be two strings of length six. The rank $r$ of $x$ is $(3, 4, 0, 2, 1, 5)$ while its equality function is $eq(x) = (0, 0, 0, 1)$. The two strings are order isomorphic according to the conditions given earlier, that is, $x \approx y$.
The problem of OPPM is to find all substrings in the text, which have the same length and relative order as the pattern. Specifically, we have the following formal definition.
**Definition 4 (Order-preserving matching function.)**
Let $x$ and $y$ be two strings of length $m$ and $n$, respectively (and $n > m$), both over an alphabet $\Sigma$. The OPPM consists in finding all indexes $i$, with $0 \leq i \leq n - m$, such that $y[i..i+m-1] \approx x$.
We also make use of bitwise infix operators, like the bitwise and ‘$\&$’ and the left shift ‘$\ll$’ operator.
3. NEW METHOD FOR ORDER-PRESERVING MATCHING
This section presents a new algorithm for the OPPM. The algorithm utilizes the Intel SSE instruction set [11, 15], hence the name SIMD-OPPM.
In packed string matching [9, 10], sets of adjacent characters are packed into one single word, according to the size of the word in the target machine. Input is standard text, and packing is carried out on line with SIMD instructions. This allows us to compare set of characters in a bulk rather than individually, by comparing the corresponding words. Therefore, when the characters are taken from an alphabet of size \( \sigma \), \( \gamma = \lceil \log \sigma \rceil \) bits are used to encode a single character and \( \lceil w/\gamma \rceil \) characters fit in a register. In this case, we will use the symbol \( \alpha = \lceil w/\gamma \rceil \) to indicate the packing factor. In the following section, we will discuss the details of our model.
Several values of \( \alpha \) and \( \gamma \) are possible, but we assume that \( \alpha = 16 \) and \( \gamma = 8 \), which is the most common case when we deal with a word RAM model with 128-bit registers. In our experimental evaluation (Section 4), we have \( \sigma = 256 \).
3.1. The model
In the design and implementation of our solution, we make use of specialized word-size packed string matching instructions, based on the SSE instruction set architecture [11, 15]. SIMD instructions allow the processor to execute multiple data on a single instruction using a set of special instructions working on special registers. SSE [11, 15] is a family of SIMD instruction sets supported by Pentium III processors since 1999. It makes use of sixteen 128-bit registers known as XMM0 through XMM15. Because the registers are 128 bits long, 16 integer numbers could be handled at the same time (an integer is considered 8 bits long), thereby providing important speedups in algorithms. In SSE4.2, we have the following data types:
- \_m128: four 32-bit floating point values
- \_m128d: two 64-bit floating point values
- \_m128i: 16/8/4/2 integer values, depending on the size of the integers
The SIMD-OPPM algorithm makes use of the word-size parallel comparison (wspc) and word-size equality checker (wsec) specialized word-size packed instructions. These two instructions are described below.
The instruction \texttt{wspc}(A, B) handles two \( w \)-bit registers \( A \) and \( B \) as a block of \( \alpha \) small integers values and computes an \( \alpha \)-bit fingerprint from it. It compares in parallel all the \( \alpha \) values contained in \( A \) against the \( \alpha \) values in \( B \). More formally, assuming \( B[0..\alpha - 1] \) and \( A[0..\alpha - 1] \) are a \( w \)-bit integer parameters, \texttt{wspc}(A, B) returns \( \alpha \)-bit value \( r[0..\alpha - 1] \), where \( r[j] = 1 \) if and only if \( A[j] < B[j] \), and \( r[j] = 0 \) otherwise.
The \texttt{wspc}(A, B) instruction uses the following sequence of specialized SIMD instructions and can be completed in constant time:
\[
\texttt{wspc}(A, B) \\
B \leftarrow \text{\_mm_cmpgt_epi8}(B, A) \\
r \leftarrow \text{\_mm_movemask_epi8}(B) \\
\text{return } r
\]
The instruction \_mm\_cmpgt\_epi8(B, A) computes the 128-bit vector by comparing the 16 signed 8-bit integers in \( A \) and the 16 signed 8-bit integers in \( B \) for greater than. If a data element in \( A \) is greater than the corresponding data element in \( B \), then the corresponding data element in \( B \) is set to 1; otherwise, it is set to 0. The 128-bit vector \( B \) is then handled by \_mm\_movemask\_epi8(B) instruction as sixteen 8-bit integers, and as a result, a 16-bit mask is formed from the most significant bits of the 16 integers in \( B \).
The instruction \texttt{wsec}(A, B) handles two \( w \)-bit registers \( A \) and \( B \) as a block of \( \alpha \) small integers values and computes an \( \alpha \)-bit fingerprint from it. Assuming \( A[0..\alpha - 1] \) and \( B[0..\alpha - 1] \) are the \( w \)-bit integer parameters, \texttt{wsec}(A, B) returns an \( \alpha \)-bit value \( r[0..\alpha - 1] \), where \( r[j] = 1 \) if and only if \( A[j] = B[j] \), and \( r[j] = 0 \) otherwise.
The wsec\((A, B)\) instruction uses the following sequence of specialized SIMD instructions and can also be completed in constant time:
\[
\begin{align*}
\text{wsec}(A, B) & \\
B & \leftarrow \_\text{mm cmpgt_epi8}(A, B) \\
r & \leftarrow \_\text{mm movemask_epi8}(B) \\
\text{return } r
\end{align*}
\]
The \_\text{mm cmpgt_epi8}(A, B) instruction compares the unsigned 8-bit or 16 signed integers in \(A\) and the unsigned 8-bit or 16 signed integers in \(B\) for equality. If a pair of data elements in \(A\) and \(B\) is equal, the corresponding data element in \(B\) is set to 1; otherwise, it is set to 0. The \_\text{mm movemask_epi8} instruction works as described earlier.
We will also make use of the popcount\((C)\) instruction, when we will be interested in counting the number of bit set in an \(\alpha\)-bit register \(C\). This can be carried out in \(\log(\alpha)\) operations by using a population count function. In our implementation, we make use of a constant time ad hoc procedure \[16\] designed to work with 16-bit registers.
The performance of SIMD instructions depends on the architecture of the processor. The performance of a single instruction is measured by latency and throughput. Latency is the number of cycles taken by the processor to give the desired outcome form the given input. Throughput \[17\] refers to the number of cycles between subsequent calls of the same instruction. The processor used in our experiments is i7-3820 QM, and its micro-architecture is Sandy Bridge. The latency and throughput of the SIMD instructions used in our algorithms for this processor is given in Table I.
### 3.2. The algorithm
The SIMD-OPPM algorithm is designed to search order-preserving occurrences of sequences. Before execution, the arrays corresponding to functions \(r\) and eq are computed based on the pattern.
Let \(x\) be the pattern of length \(m\) over the alphabet \(\Sigma\), and if \(Y\) is a block of \(\omega\) bits (\(\alpha\) elements) of the text \(y\), we can find all the occurrences of \(x\) having their leftmost position in \(Y\). Let \(Y = Y_0Y_1 \ldots Y_{k-1}\), where \(k = \lfloor n/\alpha \rfloor + 1\). The idea behind the algorithm is to check in parallel for groups of occurrences of \(x\) in \(y\) while scanning each block \(Y_i\) of the text. In particular, during each iteration of the algorithm, we check groups of \(\alpha\) occurrences of \(x\).
Formally, let \(Y_i = y[i\alpha \ldots i\alpha + \alpha - 1]\) be the current block of the text. The substring \(y[j \ldots j + m - 1]\) is an order preserving occurrence of \(x\) if and only if
1. \(y[j + r(h)] < y[j + r(h + 1)]\), for \(0 \leq h < m - 1\)
2. \(y[j + r(h)] = y[j + r(h + 1)]\) if and only if \(eq(h) = 1\), for \(0 \leq h < m - 1\)
The pseudocode of the SIMD-OPPM algorithm is shown in Figure 2. During each iteration, the algorithm checks the match candidates whose first position is in the block \(Y = y[i \ldots i + \alpha - 1]\). At the end of the iteration, the value of \(i\) is advanced \(\alpha\) positions to the right. Thus, the total number of iterations of the algorithm is \(n/\alpha\).
During each iteration, the algorithm maintains a bit mask \(C\) of \(\alpha\) bits, which contains occurrences of the pattern starting in the current block \(Y\). Specifically, at the end of the iteration, the bit \(C[j]\) is set if and only if \(x \approx Y[j \ldots j + m - 1]\), for \(j = 0 \ldots \alpha - 1\), while \(C[i] = 0\) otherwise. At the beginning of each iteration, \(C\) is initialized as \(1^\alpha\) (line 3).
In order to understand how such a value is computed, let \(A_j = B_{j-1} = y[i + r(j) \ldots i + r(j + \alpha - 1)]\) (line 6) and \(B_j = y[i + r(j + 1) \ldots i + r(j + 1) + \alpha - 1]\) (line 7). For simplicity, let
<table>
<thead>
<tr>
<th>Architecture</th>
<th>SIMD instruction</th>
<th>Latency</th>
<th>Throughput</th>
</tr>
</thead>
<tbody>
<tr>
<td>Sandy Bridge</td>
<td>_mm cmpgt_epi8</td>
<td>1</td>
<td>0.5</td>
</tr>
<tr>
<td></td>
<td>_mm cmpseq_epi8</td>
<td>1</td>
<td>0.5</td>
</tr>
<tr>
<td></td>
<td>_mm movemask_epi8</td>
<td>2</td>
<td>1</td>
</tr>
</tbody>
</table>
T. CHHABRA ET AL.
Figure 2. Order-preserving pattern matching algorithm.
SIMD-OPPM$(x, m, y, n, r, eq)$
1. $k \leftarrow 0$
2. for $i \leftarrow 0$ to $n - m$ step $\alpha$ do
3. $C \leftarrow 1$
4. $B \leftarrow y[i + r(0) \ldots i + r(0) + \alpha - 1]$
5. for $j \leftarrow 0$ to $m - 2$ do
6. $A \leftarrow B$
7. $B \leftarrow y[i + r(j) \ldots i + r(j) + 1]$
8. if $eq(j)$ then
9. $C \leftarrow C \& wspc(A, B)$
10. else $C \leftarrow C \& wspc(A, B)$
11. if $C = 0$ then goto out
12. $k \leftarrow k + \text{popcount}(C)$
13. out:
14. return $k$
us assume that all values of the pattern are distinct. Let $C_j = \text{wspc}(A_j, B_j)$ (line 10). According with the definition of the wspc instruction, we have $C[h] = 1$ if and only if $A[h] < B[h]$ (i.e. $y[i + h + r(j)] < y[i + h + r(j + 1)]$) and $C[h] = 0$ otherwise, for $h = 0 \ldots \alpha - 1$. The value of the bit mask $C$ is computed as $C = C_0 \& C_1 \& \cdots \& C_{m-2}$. It is easy to prove that $C[h]$ is set if and only if $y[i + h + r(j)] < y[i + h + r(j + 1)]$ for $j = 0 \ldots m - 2$, which implies that $x \approx y[i \ldots i + h + m - 1]$.
At the end of each iteration, we count the number of bits set in the bit mask $C$. This is the number of occurrences the algorithm found in the current block. Such a value is accumulated in a counter $k$ (line 12), which will contain the total number of occurrences of $x$ in $y$.
If $(n - m + 1) \alpha$ is not zero, the popcount of the last block may contain extra matches. So the popcount of the $\alpha \cdot [(n - m + 1) \mod \alpha]$ extra bits got from the last block must be subtracted from $k$ after the outside loop.
In our practical experiments, we used a slightly modified version of the SIMD-OPPM algorithm. Because $\alpha$ match candidates are checked at same time, the variable $C$ is not zero on average during the first iterations of the innermost loop. Therefore, the testing of the variable $C$ is rather useless during the first iterations. Thus, peeling of this loop is beneficial. In the beginning of the innermost loop, $C$ holds $\alpha$ set bits. Each iteration roughly halves the number of set bits in $C$ in the case of random data. For $\alpha = 16$, we moved four iterations in front of the loop, and the value of $C$ was tested for the first time after these iterations. In best cases, this optimization doubles the speed of the algorithm on modern processors.
The total time complexity of the algorithm is $O(nm/\alpha)$. In practice, the algorithm shows a linear behavior on average, as we can observe in the subsequent section. If one is interested in retrieving the position of each occurrence, additional $O(s)$ work is needed in order to locate $s$ occurrences. More specifically, if the $h$-th bit of $C$ is set, then an occurrence at position $i + h$ must be reported.
Example 2
We illustrate the algorithm using an example. Let $x = (8, 5, 13, 10)$ be a pattern of length 4 and $y = (7, 9, 5, 14, 13, 22, 16, 10, 3, 13, 11, 10, 11, 8, 9, 2)$ be a text of length 16. The rank values and equality values for the pattern $x$ are $(1, 0, 3, 2)$ and $(0, 0, 0)$. The result is computed in $m - 1 = 3$ steps. We know that $x[r(i)] \leq x[r(i + 1)]$ holds for $0 \leq i \leq m - 1$. In order to have an occurrence beginning at position $j$ of $Y = y$, we must have $Y[j + r(i)] \leq Y[j + r(i + 1)]$, for $0 \leq i \leq m - 1$. Then, $C_i$ is a 16-bit register where $C_i[j]$ is set to 1 if $Y[j + r(i)] \leq Y[j + r(i + 1)]$ and $C_i[j]$ is set to 0 otherwise.
Now, $C$ is a 16-bit register where $C = C_0 \& C_1 \& \cdots \& C_{m-2}$ and $C[j]$ is set if we have an occurrence of $x$ at position $j$ of $Y$ and $C[j] = 0$ otherwise.
Step 1 is as follows:
\[
\begin{array}{cccccccccccccccc}
Y &=& &9 &5 &14 &13 &22 &16 &10 &3 &13 &11 &10 &11 &8 &9 &2 &0 \\
Y &=& &7 &9 &5 &14 &13 &22 &16 &10 &3 &13 &11 &10 &11 &8 &9 &2 \\
C_0 &=& &0 &1 &0 &1 &0 &1 &1 &0 &1 &1 &0 &1 &0 &1 &1 &1 \\
\end{array}
\]
Step 2:
\[
\begin{array}{cccccccccccccccc}
Y &=& &7 &9 &5 &14 &13 &22 &16 &10 &3 &13 &11 &10 &11 &8 &9 &2 \\
Y &=& &14 &13 &22 &16 &10 &3 &13 &11 &10 &11 &8 &9 &2 &0 &0 &0 \\
C_1 &=& &1 &1 &1 &0 &0 &0 &1 &1 &0 &0 &0 &0 &0 &0 &0 &0 \\
\end{array}
\]
Step 3:
\[
\begin{array}{cccccccccccccccc}
Y &=& &14 &13 &22 &16 &10 &3 &13 &11 &10 &11 &8 &9 &2 &0 &0 &0 \\
Y &=& &5 &14 &13 &22 &16 &10 &3 &13 &11 &10 &11 &8 &9 &2 &0 &0 \\
C_2 &=& &0 &1 &0 &1 &1 &1 &0 &1 &1 &0 &1 &0 &1 &1 &0 &0 \\
C &=& &0 &1 &0 &1 &0 &0 &1 &0 &0 &0 &0 &0 &0 &0 &0 &0 \\
\end{array}
\]
Then we can compute the value of \( C \) as follows:
\[
\begin{array}{cccccccccccccccc}
C_0 &=& &0 &1 &0 &1 &0 &1 &1 &0 &1 &0 &1 &1 &1 &1 &1 &1 \\
C_1 &=& &1 &1 &1 &0 &0 &0 &1 &1 &0 &0 &0 &0 &0 &0 &0 &0 \\
C_2 &=& &0 &1 &0 &1 &1 &1 &0 &1 &1 &0 &1 &1 &0 &0 &0 &0 \\
C &=& &0 &1 &0 &1 &0 &0 &1 &0 &0 &0 &0 &0 &0 &0 &0 &0 \\
\end{array}
\]
Then we found three occurrences of \( x \) in \( y \). The first is at position 1, the second at position 3, and the last at position 7.
4. EXPERIMENTAL RESULTS
This section presents experimental results in order to compare the behavior of the SIMD-OPPM algorithm against the best known solutions in the literature for the OPPM problem.
The tests were run on an Intel 2.70 GHz i7 processor running Ubuntu 12.10 with 16 GB of memory. All the algorithms were implemented using the C programming language and run in the modified testing framework of Hume and Sunday [18].
We tested our algorithm SIMD-OPPM against the most effective previous solutions, which include S2OPPM and S4OPPM [4], SSEOPPM and AVXOPPM [5], FFK-OPPM [13], and SKIP-OPPM [12]. S2OPPM and S4OPPM [4] solutions are based on SBNDM2 and SBNDM4 [19]. SSEOPPM and AVXOPPM [5] represent the online solution grounded on SSE4.2 and AVX instruction set, respectively. FFK-OPPM [13] presents the filtration approach by Faro and Külekci. SKIP-OPPM [12] represents the solution based on Skip Search algorithm.
We tested our algorithms against a random and a real data set. The real data comprises the time series of relative humidity of UK. The data contains 33,510 integers representing the relative humidity of UK in percentage in the years 1961–1990. The random text of 4 MB contains integers between −128 and 127. We randomly selected 300 and 200 patterns of length 5, 10, 15, 20, 25, 30, and 50 from random and real data, respectively. Tables I and II show the average execution times
<table>
<thead>
<tr>
<th>m</th>
<th>S2OPPM</th>
<th>S4OPPM</th>
<th>SSEOPPM</th>
<th>AVXOPPM</th>
<th>FFK-OPPM</th>
<th>SKIP-OPPM</th>
<th>SIMD-OPPM</th>
</tr>
</thead>
<tbody>
<tr>
<td>5</td>
<td>1.186</td>
<td>1.549</td>
<td>1.587</td>
<td>—</td>
<td>0.922</td>
<td>0.836</td>
<td>0.125</td>
</tr>
<tr>
<td>10</td>
<td>0.544</td>
<td>0.587</td>
<td>0.483</td>
<td>0.412</td>
<td>0.691</td>
<td>0.601</td>
<td>0.123</td>
</tr>
<tr>
<td>15</td>
<td>0.354</td>
<td>0.389</td>
<td>0.312</td>
<td>0.211</td>
<td>0.413</td>
<td>0.357</td>
<td>0.116</td>
</tr>
<tr>
<td>20</td>
<td>0.257</td>
<td>0.291</td>
<td>0.292</td>
<td>0.188</td>
<td>0.309</td>
<td>0.269</td>
<td>0.111</td>
</tr>
<tr>
<td>25</td>
<td>0.205</td>
<td>0.242</td>
<td>0.281</td>
<td>0.151</td>
<td>0.254</td>
<td>0.229</td>
<td>0.106</td>
</tr>
<tr>
<td>30</td>
<td>0.176</td>
<td>0.186</td>
<td>0.256</td>
<td>0.144</td>
<td>0.234</td>
<td>0.204</td>
<td>0.102</td>
</tr>
<tr>
<td>50</td>
<td>0.172</td>
<td>0.179</td>
<td>0.246</td>
<td>0.131</td>
<td>0.206</td>
<td>0.199</td>
<td>0.089</td>
</tr>
</tbody>
</table>
© 2016 The Authors Software: Practice and Experience
Published by John Wiley & Sons Ltd.
Figure 3. Execution times of algorithms for random data (left) in seconds and for relative humidity data (right) in milliseconds.
Table III. Execution times of algorithms in milliseconds for relative humidity data.
<table>
<thead>
<tr>
<th>m</th>
<th>S2OPPM</th>
<th>S4OPPM</th>
<th>SSEOPPM</th>
<th>AVXOPPM</th>
<th>FFK-OPPM</th>
<th>SKIP-OPPM</th>
<th>SIMD-OPPM</th>
</tr>
</thead>
<tbody>
<tr>
<td>5</td>
<td>19.24</td>
<td>31.84</td>
<td>28.55</td>
<td>—</td>
<td>21.36</td>
<td>20.72</td>
<td>2.43</td>
</tr>
<tr>
<td>10</td>
<td>10.35</td>
<td>10.95</td>
<td>11.26</td>
<td>10.68</td>
<td>18.88</td>
<td>18.91</td>
<td>2.15</td>
</tr>
<tr>
<td>15</td>
<td>6.72</td>
<td>6.86</td>
<td>6.94</td>
<td>5.77</td>
<td>13.54</td>
<td>14.23</td>
<td>1.99</td>
</tr>
<tr>
<td>20</td>
<td>4.91</td>
<td>4.81</td>
<td>6.38</td>
<td>4.56</td>
<td>9.92</td>
<td>12.76</td>
<td>1.87</td>
</tr>
<tr>
<td>25</td>
<td>3.83</td>
<td>3.73</td>
<td>5.39</td>
<td>3.85</td>
<td>6.93</td>
<td>11.59</td>
<td>1.63</td>
</tr>
<tr>
<td>30</td>
<td>3.15</td>
<td>2.95</td>
<td>4.96</td>
<td>3.41</td>
<td>5.36</td>
<td>10.17</td>
<td>1.54</td>
</tr>
<tr>
<td>50</td>
<td>2.86</td>
<td>2.67</td>
<td>3.87</td>
<td>1.83</td>
<td>3.88</td>
<td>9.56</td>
<td>1.49</td>
</tr>
</tbody>
</table>
per pattern set of all the algorithms for random data in seconds and humidity data in milliseconds, respectively. A graph on times for both the data sets is also shown in Figure 3.
The tests were made with 180 repeated runs. In Tables I and II, S2OPPM represents the algorithm based on SBNDM2 filtration, S4OPPM represents the algorithm based on SBNDM4 filtration, SSEOPPM represents the algorithm based on SSE4.2 instruction set architecture, AVXOPPM represents the algorithm based on AVX instruction set architecture, and FFK-OPPM represents best execution times for both the filtration approaches in [13]. SKIP-OPPM represents the solution based on Skip Search algorithm, and SIMD-OPPM represents our new algorithm.
From Tables II and III, it can be seen that SIMD-OPPM is the fastest for all tested values of m. For both the data sets, the difference between the execution times of SIMD-OPPM and other solutions is the maximum when \( m = 5 \), and thereafter, the difference drops. Moreover, our algorithm is faster in case of real data than for random data. We noticed that our algorithm becomes slightly faster when \( m \) increases likely because of reduced number of popcount operations.
5. CONCLUDING REMARKS
We proposed an efficient solution for OPPM. Our solution employs the SSE4.2 instruction set architecture. SIMD instructions were originally developed for multimedia but are recently employed for pattern matching. Our results show that SIMD instructions can also be very efficient in order-preserving matching as well. We place special emphasis on the practical efficiency of the algorithm. Therefore, we show with practical experiments that our solution is faster than the previous solutions.
REFERENCES
|
{"Source-Url": "https://research.aalto.fi/files/11426110/Chhabra_et_al_2016_Software_Practice_and_Experience_1.pdf", "len_cl100k_base": 8963, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 36548, "total-output-tokens": 11366, "length": "2e13", "weborganizer": {"__label__adult": 0.000415802001953125, "__label__art_design": 0.0004351139068603515, "__label__crime_law": 0.0005478858947753906, "__label__education_jobs": 0.0007200241088867188, "__label__entertainment": 0.00010567903518676758, "__label__fashion_beauty": 0.0002084970474243164, "__label__finance_business": 0.00028967857360839844, "__label__food_dining": 0.0004582405090332031, "__label__games": 0.0007576942443847656, "__label__hardware": 0.0038661956787109375, "__label__health": 0.0007276535034179688, "__label__history": 0.00037288665771484375, "__label__home_hobbies": 0.00014138221740722656, "__label__industrial": 0.000858306884765625, "__label__literature": 0.0002753734588623047, "__label__politics": 0.0004429817199707031, "__label__religion": 0.0007457733154296875, "__label__science_tech": 0.201416015625, "__label__social_life": 9.125471115112303e-05, "__label__software": 0.0108642578125, "__label__software_dev": 0.77490234375, "__label__sports_fitness": 0.0003650188446044922, "__label__transportation": 0.0007610321044921875, "__label__travel": 0.00022089481353759768}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33369, 0.08396]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33369, 0.44749]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33369, 0.8158]], "google_gemma-3-12b-it_contains_pii": [[0, 936, false], [936, 3758, null], [3758, 7124, null], [7124, 10767, null], [10767, 14994, null], [14994, 19135, null], [19135, 22885, null], [22885, 26438, null], [26438, 29519, null], [29519, 33369, null]], "google_gemma-3-12b-it_is_public_document": [[0, 936, true], [936, 3758, null], [3758, 7124, null], [7124, 10767, null], [10767, 14994, null], [14994, 19135, null], [19135, 22885, null], [22885, 26438, null], [26438, 29519, null], [29519, 33369, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33369, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33369, null]], "pdf_page_numbers": [[0, 936, 1], [936, 3758, 2], [3758, 7124, 3], [7124, 10767, 4], [10767, 14994, 5], [14994, 19135, 6], [19135, 22885, 7], [22885, 26438, 8], [26438, 29519, 9], [29519, 33369, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33369, 0.10849]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
10267847b93215ec4522c54a5c10c98bdbab0154
|
Memory Bounds for the Distributed Execution of a Hierarchical Synchronous Data-Flow Graph
Karol Desnos, Maxime Pelcat, Jean François Nezan, Slaheddine Aridhi
To cite this version:
HAL Id: hal-00721335
https://hal.archives-ouvertes.fr/hal-00721335
Submitted on 27 Jul 2012
Memory Bounds for the Distributed Execution of a Hierarchical Synchronous Data-Flow Graph
Karol Desnos, Maxime Pelcat, Jean-Francois Nezan
IETR, INSA Rennes, CNRS UMR 6164, UEB
20, Av. des Buttes de Coésmes, 35708 Rennes
e-mail: kdesnos, mpelcat, jnezan@insa-rennes.fr
Slaheddine Aridhi
Texas Instruments France
06271 Villeneuve Loubet, France
e-mail: saridhi@ti.com
Abstract—This paper presents an application analysis technique to define the boundary of shared memory requirements of Multiprocessor System-on-Chip (MPSoC) in early stages of development. This technique is part of a rapid prototyping process and is based on the analysis of a hierarchical Synchronous Data-Flow (SDF) graph description of the system application. The analysis does not require any knowledge of the system architecture, the mapping or the scheduling of the system application tasks.
The initial step of the method consists of applying a set of transformations to the SDF graph so as to reveal its memory characteristics. These transformations produce a weighted graph that represents the different memory objects of the application as well as the memory allocation constraints due to their relationships. The memory boundaries are then derived from this weighted graph using analogous graph theory problems, in particular the Maximum-Weight Clique (MWC) problem. State-of-the-art algorithms to solve these problems are presented and a heuristic approach is proposed to provide a near-optimal solution of the MWC problem. A performance evaluation of the heuristic approach is presented, and is based on hierarchical SDF graphs of realistic applications. This evaluation shows the efficiency of proposed heuristic approach in finding near optimal solutions.
I. INTRODUCTION
During the design of an embedded system, the treatment of memory issues strongly impact the final system quality and performance, as the area occupied by the memory can be as large as 80% of the chip and may be responsible for a major part of its power consumption [1]. Prior work on memory issues for Multiprocessor System-on-Chip (MPSoC) mostly consists of optimization techniques that minimizes the amount of memory allocated to run an application, thus reducing the capacity and area of memory of the developed system [2], [3], [4]. These techniques rely on a precise knowledge of system behavior, particularly scheduling and mapping the application tasks on the system processors, and so may only be applied during late stages of the system design process.
The purpose of the method presented in this paper is to derive the memory bound requirements of a system (Figure 1) in the early stages of its development when there is a complete abstraction of the system architecture. This method is based on an analysis of the system application, and allows the developer of a multi-core shared-memory system to adequately size the chip memory.
This paper focuses on memory characterization of applications described by a Dataflow Process Network (DPN) Model of Computation (MoC) [5]. A MoC defines the semantics of an algorithm model: which components the model can contain, how they are interconnected and how they interact. A DPN MoC divides the application into computational entities named actors that exchange data via First-In First-Out (FIFO) channels. The algorithm model is specified as a directed application graph in which nodes represent actors and edges represent FIFO queues. Each actor is associated to firing rules specifying its behavior in terms of token consumption and production. Tokens are abstract representations of a data quantum, independent of its size. The actors themselves are “black boxes” of the model and may be implemented in any programming language. Firing an actor consists of starting its preemption free execution.
The Synchronous Data-Flow (SDF) MoC is certainly the most widely used DPN model. It consists of a static model in which all production and consumption token rates are fixed and known at compile time. This property makes the model analysis possible at compile time. Interface Based SDF (IBSDF) is a hierarchical DPN MoC based on SDF that can be analyzed hierarchically [6].
The rapid prototyping context and the IBSDF model, which serves as an input to our method, are introduced in section II and the successive transformations applied to the application graph to reveal its memory characteristics are developed in section III. Then, section IV presents existing algorithms and a new heuristic approach to derive the memory bounds of an application. An overview of previous research on memory issues for multi-core systems is given in section V. Finally, after an evaluation of the performance of our method in section VI, we conclude this paper and propose possible directions for future work in section VII.
II. CONTEXT
A. Interface-Based Synchronous Data-Flow Graph
IBSDF [6] is a static hierarchical dataflow MoC defined as an extension of SDF. Figure 2 shows an example of an IBSDF graph, where the top level comprises 3 actors A, B and h that respect the SDF semantics. Edges are labeled with their token production and consumption rates. An edge with a black dot signifies that initial tokens are present in the FIFO queue when the system starts to execute. The number of initial tokens is specified by the \( x/100 \) label. Initial tokens are a semantic element of the SDF MoC that makes communication possible between successive iterations of the graph execution. \( h \) is an IBSDF hierarchical actor. Its behavior is given by a subgraph containing source and sink interfaces. These interfaces insulate the behavior of the top graph and subgraph. This property makes the algorithm description process simpler and less error-prone [6].
Like SDF, IBSDF is an untimed MoC. It specifies actor dependencies but does not take into account the time needed to execute the actors. The memory bounds presented in this paper are thus computed without actor timing consideration and, in that sense, characterize an application IBSDF graph independent of implementation. This is in contrast to the related work from the literature, which tends to focus on post-scheduling and execution timing analysis (see Section V).
B. Hardware/Software Exploration Workflow
Rapid prototyping consists of extracting information from a system in the early stages of its development. It enables hardware/software co-design and favors early decisions that improve system architecture efficiency. The work presented in this paper aims to extract memory information from an application graph at an early stage of system design and independent of architecture details. It allows the system designer to discard architectures with insufficient memory and to evaluate the degree of memory optimization required to produce the final system with an optimal amount of memory.
Figure 3 illustrates the position of the memory bounds computation in the rapid prototyping process as proposed in [7]. Rapid prototyping inputs consist of an algorithm model respecting the IBSDF MoC, an architecture model respecting the System-Level Architecture Model (S-LAM) semantics [8] and a scenario providing constraints and prototyping parameters. The scenario ensures the complete separation of algorithm and architecture models. Algorithm and architecture models undergo transformations in preparation for the rapid prototyping steps. Static multi-core scheduling is then applied to dispatch the algorithm actors to the architecture processing elements and to schedule their executions [9][10]. Finally, the scheduling information is used to simulate the system behavior and to generate compilable code for the targeted architecture. It can also be exported to an external SystemC based simulator.
In this process, the memory bounds computation is executed on the transformed algorithm graph and has no dependency on the architecture graph. The next section explains the IBSDF graph properties and Section III-A details the algorithm transformations applied to IBSDF prior to the memory bounds computation.
III. PREPROCESSING TOWARD MEMORY ANALYSIS
A. Algorithm Transformations
The first step to characterize the memory bounds of an application consists of applying a set of transformations to its IBSDF Graph. The SDF model of the application is successively flattened, and first converted into a Single Rate SDF, then into a Directed Acyclic Graph (DAG). As presented in [6], these transformations are used to reveal the parallelism embedded in the IBSDF model, thus enabling a better mapping and scheduling of its actors on a multicore architecture.
To illustrate these transformations, we successively apply them to the IBSDF graph of Figure 2. The first transformation consists of flattening the hierarchy of the graph by replacing all hierarchical nodes with their content. The IBSDF graph is thus transformed into a SDF graph presented in Figure 4.
The second transformation is the conversion into a Single-Rate SDF graph: a SDF where the production and consumption values on each edge are equal. In this model, each vertex corresponds to a single actor firing from the SDF graph. This conversion is performed by computing the topology matrix [11], multiplying actors by the number of their firings and reorganizing edges, as shown in Figure 5, where actor B is split in two instances. An algorithm to perform this conversion can be found in [12].
The last conversion consists of generating an acyclic precedence graph by isolating one iteration of the algorithm. This is achieved by ignoring the edges with initial tokens in the single-rate SDF. In our example, this transformation results in ignoring the feedback edge $F \rightarrow A$ which stores 100 initial tokens.

In the context of memory analysis, these transformations are applied to fulfill the following objectives:
- **Expose data parallelism:** Concurrent analysis of data parallelism and data precedence gives information on the lifetime of memory objects prior to any scheduling step. Indeed, two FIFO queues belonging to parallel data-paths may contain data tokens simultaneously and consequently are forbidden from sharing a memory space. Conversely, two FIFOs linked with a precedence constraint, such as $A \rightarrow B$ and $C \rightarrow F$ FIFOs in Figure 4, will never store data tokens simultaneously, thus can share the same memory space.
- **Break FIFOs into buffers shared between actors:** In the SDF model, the channels carrying data tokens between actors behave like FIFO queues. The memory needed to allocate each FIFO corresponds to the maximum number of tokens stored during an execution of the graph. As this number of tokens depends on the schedule of the actors, methods exist to derive a schedule that minimizes the memory needed to allocate the FIFOs [13]. However, in our method, the memory analysis is not dependent on scheduling considerations. It is for this reason that FIFOs of undefined sizes before the scheduling step are replaced with buffers of fixed sizes during the transformation of the graph into a single-rate SDF. In Figure 5, buffers linking two actors will be written and read only once with a data token of fixed size, which simplifies the memory analysis.
- **Derive an acyclic model:** Cyclic data-paths in an IBSDF graph are an efficient way to model iterative or recursive calls to a subset of actors. In order to use efficient static scheduling algorithms [14], SDF models are often converted into DAGs before being scheduled. Besides revealing data-parallelism, this transformation makes it easier to schedule an application, as each actor is fired only once per execution of the resulting DAG. Similarly, in the absence of a schedule, deriving a DAG permits the use of memory objects (communication buffers) that will be written and read only once per execution of the DAG. Consequently, before a memory object is written and after it is read, its memory space will be reusable to store another object.
**B. Memory objects**
The DAG resulting from the transformations of an IBSDF graph, contains three types of memory objects:
- **Communication buffers:** The first type memory object corresponds to the directed edges of the DAG and are the buffers used to transfer data between consecutive actors. In our approach, we consider that the memory allocated to these buffers is reserved from the execution start of the edge producer actor until the completion of the edge consumer actor. This choice is made to enable custom token consumption throughout actor firing time. As a consequence, the memory used to store an input buffer of an actor should not be reused to store an output buffer of the same actor. In Figure 5, the 150 units of memory used to carry data between actors C and D can not be reused, even partially, to transfer data from D to F.
- **Working memory of actors:** This second type of memory object is the maximum amount of memory allocated by an actor during its execution. This working memory represents the memory needed to store the data used during the computations of the actor but does not include the input nor the output buffers memory. In our method, we assume that an actor keeps an exclusive access to its working memory during its execution. In Figures 2 to 5, the size of the working memory associated with each actor is given by the number below the actor name. This memory is equivalent to a task stack space in an operating system.
- **Feedback FIFOs:** The final type of memory object corresponds to the memory needed to store edges ignored as a result of the transformation of a Single-Rate SDF into a DAG. These edges which are ignored to break cycles, can still carry data between successive executions of the DAG and behave like FIFO queues. These feedback edges may not share memory space with any other memory object of the application.
**C. Memory Exclusion Graph**
Once an application has been transformed into a DAG, and all its memory objects have been identified, the last preprocessing step of our method consists of deriving the memory exclusion graph which will serve as a basis to our analysis. A memory exclusion graph is an undirected weighted graph denoted by $G = \langle V, E, w \rangle$ where:
• $V$ is the set of vertices. Each vertex represents an indivisible memory object.
• $E$ is the set of edges representing the memory exclusions.
• $w : V \rightarrow \mathbb{N}$ is a function with $w(v)$ the weight of a vertex $v$. The weight of a vertex corresponds to the size of the associated memory object.
We also denote:
• $N(v)$ the neighborhood of $v$, i.e. the set of vertices linked to $v$ by an edge. Vertices of this set are said to be adjacent to $v$.
• $|S|$ the cardinality of a set $S$. $|V|$ and $|E|$ are thus respectively the number of vertices and edges of the graph.
• $\delta(G) = \frac{2|E|}{|V|(|V|-1)}$ the edge density of the graph corresponding to the ratio of existing edges to all possible edges.
Two memory objects of any type exclude each other (i.e. they can not share the same memory space) if a schedule can be derived from the DAG where both these memory objects store data simultaneously. Some exclusions are directly caused by the properties of the memory objects, such as exclusions between input and output buffers of an actor. Other exclusions result from the parallelism of an application, as is the case with the working memory of actors from parallel data-paths that might be executed concurrently.
The memory exclusion graph presented in Figure 6 is derived from the IBSDF graph of Figure 2. The complete graph contains 17 memory objects and 66 exclusions but, for clarity, only the vertices corresponding to the buffers between actors (type 1) are presented.
Building a memory exclusion graph based on a DAG consists of scanning its actors and data-transfers in order of precedence, so as to identify its parallel branches. As part of this scan, the memory objects and the exclusions caused by a precedence relationship are added to the memory exclusion graph. The, exclusions are then inserted between all memory objects which have been identified in the DAG as belonging to parallel branches. An alternative way of building an exclusion graph is to first build its complement graph, within which two vertices are linked if the corresponding memory objects can share a memory space. Then, the exclusion graph is simply obtained by considering that two of its vertices are linked if they are not connected by an edge in the complement graph.
In our method, the memory exclusion graph is built based on a non-scheduled DAG allowing the characterization of the application independent of architecture constraint. However, a subsequent update of this graph to incorporate the changes resulting from a schedule is possible. Indeed, a static schedule introduces an execution order of the graph actors, which can be seen as a new precedence relationship between actors. The addition of this new precedence link to a DAG, results in the possible disappearance of a certain amount of parallelism and the resulting exclusions. For example in Figure 5, if actor D is scheduled before actor E, then the exclusion disappears between the working memory of D and the $E \rightarrow F$ communication buffer.
IV. MEMORY ALLOCATION BOUNDS
The upper and lower bounds of the static memory allocation of an application are respectively a maximum and a minimum limit to the amount of memory needed to run an application, as presented in Figure 1. These bounds are crucial information in the co-design process, as they allow the developer to adjust the size of the memory available accordingly to the application requirements. Furthermore, as these bounds can be computed during the early development of a MPSoC, they might prevent the developer from mapping an insufficient or an unnecessarily large memory chip.
A. Least upper bound
The least upper memory allocation bound of an application corresponds to the size of the memory needed to allocate each memory object in a dedicated memory space. Such an allocation would be a waste of memory, as a memory space used to store an object would never be reused to store another. However, this allocation scheme must be used for exclusion graphs where an exclusion exists for every pair of vertices.
Given an exclusion graph $G$, its upper memory allocation bound is thus the sum of the weight of its vertices:
$$\text{Bound}_{\text{Max}}(G) = \sum_{v \in V} w(v) \quad (1)$$
The upper bound for the exclusion graph of Figure 6 is 850 units, and the upper bound for the complete exclusion graph derived from the IBSDF of Figure 2 is 1020 units.
B. Greatest lower bound
The greatest lower memory allocation bound of an application is the least amount of memory required to execute it. Finding this optimal allocation based on an exclusion graph can be done by solving the equivalent Interval Coloring Problem [15], [3].
A k-coloring of an exclusion graph is the association of each vertex $v_i$ of the graph with an interval $I_i = \{a, a + 1, \ldots, b - 1\}$ of consecutive integers - called colors -, such that $b - a = w(v_i)$. Two distinct vertices $v_i$ and $v_j$ linked by an edge must be associated to non-overlapping intervals. Assigning an interval to a weighted vertex is equivalent to allocating a range of memory address to a memory object. Consequently, a k-coloring of an exclusion graph corresponds to an allocation of its memory objects.
The Interval Coloring Problem consists of finding a k-coloring of the exclusion graph with the fewest integers used in the $I_i$ intervals. This objective is equivalent to finding the allocation of memory objects that uses the least memory possible, thus giving the greatest lower bound of the memory allocation.
Unfortunately, as presented in [15], this problem is known to be NP-Hard, therefore it would be prohibitively long to solve in the rapid prototyping context which involves applications with hundreds or thousands of buffers. Moreover, a sub-optimal solution to the Interval Coloring problem corresponds to an allocation that uses more memory than the minimum possible: more memory than the greatest lower bound. Consequently, a sub-optimal solution fails to achieve our objective which is to find a lower bound to the size of the memory allocated for a given application.
C. Lower bound using exact solution to the Maximum-Weight Clique Problem
Since the greatest lower bound can not be found in reasonable time, we focus our attention on finding a lower bound close to the size of the optimal allocation. In [3], Fabri introduces another lower bound derived from an exclusion graph: the weight of the Maximum-Weight Clique (MWC).
A clique is a subset of vertices that forms a subgraph within which each pair of vertices is linked with an edge. As memory objects of a clique can not share memory space, their allocation requires a memory as large as the sum of the weights of the clique elements, also called the clique weight. The subsets \{CD,CE,CF,DF,EF\} or \{AB_1,AB_2,B_2C\} are examples of cliques in the exclusion graph of Figure 8. Their respective weights are 550 and 250. By definition, a single vertex can also be considered as a clique, and a clique is called maximal if no vertex can be added to it to form a larger clique. In Figure 8, clique \{CD,CE,CF,DF,EF\} is maximal, but \{AB_1,AB_2,B_2C\} is not as B_2C is linked to all the clique vertices and can therefore be added to the clique.
The Maximum-Weight Clique (MWC) of a graph is the clique whose weight is the largest of all cliques in the graph. Although this problem is also known to be NP-Hard, several branch-and-bound algorithms have been proposed to solve it efficiently. In [16], Östergård proposes an exact algorithm which is, to our knowledge, the fastest algorithm for exclusion graphs with an edge density under 0.80. For graphs with an edge density above 0.80, a more efficient algorithm was proposed by Yamaguchi et al in [17]. Both algorithms are recursive and use a similar approach: beginning with a single vertex, they search for the MWC $C_i$ in a subset of the graph, then add a vertex to the considered subset and use the weight of $C_i$ to bound the search for a larger clique $C_{i+1}$ in the new subgraph. The two algorithms were implemented in order to compare their performances on exclusion graphs derived from different applications (cf. section VI).
In the exclusion graph of Figure 8, the MWC is \{CD,CE,CF,DF,EF\} with a weight of 550 units.
The weight of the MWC corresponds to the amount of memory needed to allocate the memory objects belonging to this subset of the graph. By extension, the allocation of the whole graph will never use less memory than the weight of its MWC. Therefore, this weight is a lower bound to the memory allocation and is less than or equal to the greatest lower bound, as shown in Figure 7.
D. Lower bound using approximate solution to the Maximum-Weight Clique Problem
Östergård’s and Yamaguchi’s algorithms are exact algorithms and not heuristics. As the MWC problem is an NP-Hard problem, and even using these fast algorithms, an exact solution in polynomial time cannot be guaranteed. In the rapid prototyping context, all methods and algorithms used must have a short and predictable runtime; that is why we developed a heuristic algorithm for the MWC problem.
The proposed heuristic approach, presented in Figure 9, is an iterative algorithm whose basic principle is to remove a judiciously selected vertex at each iteration, until the remaining vertices form a clique.
Our algorithm can be divided into 3 parts:
- **Initializations (lines 1-5):** For each vertex of the graph, the cost function is initialized with the weight of the vertex summed with the weights of its neighbors. In order to keep the input graph unaltered through the algorithm execution, its set of vertices $V$ and its number of edges $|E|$ are copied in local variables $C$ and $nb\_edges$.
- **Algorithm core loop (lines 6-13):** During each iteration of this loop, the vertex with the minimum cost $v^*$ is removed from $C$ (line 8). In few cases where several vertices have the same cost, the lowest number of neighbor $|N(v)|$ then the smallest weight $w(v)$ are used to determine the vertex to remove. By doing so, less edges are removed from the graph and the edge density of remaining vertices will be
Input: a memory exclusion graph \( G = < V, E, w > \)
Output: a maximal clique \( C \)
1: \( C \leftarrow V \)
2: \( nb_{edges} \leftarrow |E| \)
3: for each \( v \in C \) do
4: \( \text{cost}(v) \leftarrow w(v) + \sum_{v' \in N(v)} w(v') \)
5: end for
6: while \( |C| > 1 \) and \( \frac{2 \cdot nb_{edges}}{|C| \cdot (|C| - 1)} < 1.0 \) do
7: Select \( v^* \) from \( V \) that minimizes \( \text{cost}(\cdot) \)
8: \( C \leftarrow C \setminus \{ v^* \} \)
9: \( nb_{edges} \leftarrow nb_{edges} - |N(v^*) \cap C| \)
10: for each \( v \in N(v^*) \cap C \) do
11: \( \text{cost}(v) \leftarrow \text{cost}(v) - w(v^*) \)
12: end for
13: end while
14: Select a vertex \( v_{random} \in C \)
15: for each \( v \in N(v_{random}) \setminus C \) do
16: if \( C \subset N(v) \) then
17: \( C \leftarrow C \cup \{ v \} \)
18: end if
19: end for
Fig. 9. Maximum-Weight Clique Heuristic Algorithm
higher, which is desirable when looking for a clique. If there still are multiple vertices with equal properties, a random vertex is selected among them. This loop is iterated until the vertices in subset \( C \) become a clique. This condition is checked line 6, by comparing 1.0 - the edge density of a clique - with the edge density of the subgraph of \( G \) formed by the remaining vertices in \( C \). To this purpose \( nb_{edge} \), the number of edges of this subgraph, is decremented line 9 by the number of edges in \( E \) linking the removed vertex \( v^* \) to vertices in \( C \). Lines 10 to 12, of the costs of the remaining vertices are updated for the next iteration.
- **Clique maximization (lines 14-19):** This last part of the algorithm ensures that the clique \( C \) is maximal by adding neighbor vertices to it. To become a member of the clique, a vertex must be adjacent to all its members. Consequently, the candidates to join the clique are the neighbors of a vertex randomly selected in \( C \). If a vertex among these candidates is linked to all vertices in \( C \), it is added to the clique.
The complexity of this heuristic algorithm is of the order of \( O(|N|^2) \), where \( |N| \) is the number of vertices of the exclusion graph.
For example, in Table I, the algorithm is applied to the exclusion graph of Figure 6. For each iteration, the costs of the remaining vertices are given, and the vertex removed during the iteration is crossed out. The column Density corresponds to the edge density of the subgraph formed by the remaining vertices.
In this simple example, the clique found by the heuristic algorithm and the exact algorithm are the same, and their weight also corresponds to the size of the optimal allocation. This example proves that, as shown in Figure 7, the result of the heuristic can be equal to the exact solution of the MWC problem, whose size can also equal the size of the optimal allocation.
### V. RELATED WORKS
To the extent of our knowledge, memory optimization for multi-core systems has generally been studied in the literature as a post-scheduling process. Using the scheduling information, the lifetimes of the different memory objects of an application are derived. Minimization is then achieved by allocating several memory objects whose lifetimes do not overlap in the same memory space.
Once the lifetimes of the memory objects are obtained, the memory allocation is performed using one of these different approaches:
- Running static - or offline - allocation algorithms inspired by dynamic allocators, such as those proposed in [1], [4], [2]. As opposed to dynamic allocators which allocate memory objects in order they are brought to them, static allocators have a global knowledge of all memory objects at compile time, thus making further optimizations possible.
- Coloring an exclusion graph that models the conflicting memory objects [15], [4]. An equivalent approach is to use the complement graph, where memory objects are linked if they have non-overlapping lifetime, and perform a clique partitioning of its vertices [18].
- Using constraint programming, as is the case in [19] where memory constraints are used together with cost, resource usage and execution time constraints.
Besides the post-scheduling techniques, a few publications also consider the memory optimizations during the scheduling process. In [2], [13], algorithms are presented to derive a schedule from a SDF graph so that the size of the FIFOs between actors is minimized. Another technique proposed in [20], consists of iterating the scheduling and the memory allocation steps and keeping only the schedule whose corresponding memory allocation uses the least memory.
Finally, certain optimization techniques can be applied before the scheduling step. These techniques mostly consist of modifying the description of the application behavior so as to maximize the impact of later optimizations. Variable renaming, instruction re-ordering, loop merging and splitting
are examples of modifications for imperative languages that can reduce the memory needs of an application [3]. Similar modifications can be applied to SDF graphs, as was done in [6] where a technique used to extract parallelism from nested loops in imperative languages is adapted to reveal data parallelism embedded in an IBSDF graph.
As explained in [21], the former optimization techniques often require a partial system synthesis and the execution of time-consuming algorithms. Although these techniques provide an exact or highly optimized memory requirement, they may be too slow to be used in the rapid prototyping context. In [21], Balasa et al. survey existing estimation techniques that provide a reliable memory size approximation in a reasonable computation time. The main difference between these estimation techniques and our bounding method is the abstraction level considered. Indeed, these techniques are mostly based on the analysis of imperative code while our method deals with applications modeled with SDF graphs.
VI. RESULTS
The memory bounds are computed in the PREESM\(^1\) rapid prototyping framework [7]. PREESM is an open-source Eclipse-based tool providing graph transformation algorithms, multi-core scheduling and C code generation from IBSDF graphs. Algorithms for memory bound computation have been implemented in Java in this framework. All results presented in this section are obtained by running the algorithms on a 3.1GHz CPU.
<table>
<thead>
<tr>
<th>Graphs properties</th>
<th>Exact algorithms</th>
<th>Heuristic</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>(\delta(G))</td>
<td>(\delta(G))</td>
</tr>
<tr>
<td>(</td>
<td>V</td>
<td>)</td>
</tr>
<tr>
<td>(60)</td>
<td>0.80</td>
<td>0.05 s</td>
</tr>
<tr>
<td>80</td>
<td>0.80</td>
<td>0.43 s</td>
</tr>
<tr>
<td>100</td>
<td>0.80</td>
<td>3.4 s</td>
</tr>
<tr>
<td>120</td>
<td>0.80</td>
<td>17.93 s</td>
</tr>
<tr>
<td>60</td>
<td>0.90</td>
<td>0.35 s</td>
</tr>
<tr>
<td>80</td>
<td>0.90</td>
<td>9.34 s</td>
</tr>
<tr>
<td>100</td>
<td>0.90</td>
<td>188.00 s</td>
</tr>
</tbody>
</table>
**TABE II**
**PERFORMANCE OF MAXIMUM-WEIGHT CLIQUE ALGORITHMS ON RANDOM EXCLUSION GRAPHS**
Table II shows the performance of different algorithms for the MWC problem. Each entry presents the mean performance obtained from 400 randomly generated exclusion graphs with a fixed number of vertices \((|V|)\) and a fixed density of edges \(\delta(G)\). For each exclusion graph, the weights of its vertices are uniformly distributed in a predefined range. The 400 graphs are generated with ranges varying from \([1000; 1010]\) to \([1000; 11000]\). Columns \(\delta(G)\), \(\delta(G)\) and \(\delta(G)\) respectively give the mean runtime of each of the three algorithms, and the \(\text{Efficiency}\) column gives the average ratio of the clique size found by the heuristic algorithm over the size of the MWC.
It should be noted that the clique maximization part of the heuristic algorithm was deactivated in all tests of this section. Indeed, several tests showed that this maximization improved the mean efficiency by only 2\%, while multiplying the runtime of the heuristic by a factor 1.6.
Table II shows that the runtime of exact algorithms grows exponentially with the number of nodes of the exclusion graphs, and is highly dependent on the edge density of the graphs. Conversely, the runtime of the heuristic algorithm is roughly proportional to \(|V|^2\) and is not strongly influenced by the edge density of the graphs. The results in table II also reveal that the mean efficiency of the heuristic algorithm for random exclusion graphs is of the order of 90\%, and decreases slightly as the number of vertices increases. Finally, these results additionally confirm that Yamaguchi’s algorithm has better performance than Østergård’s algorithm for graphs with more than 80 vertices and an edge density higher than 0.80.
<table>
<thead>
<tr>
<th>SDF graph</th>
<th>Exclusion graph</th>
<th>Memory Bounds</th>
</tr>
</thead>
<tbody>
<tr>
<td>Graph</td>
<td>Actors</td>
<td>FIFOs</td>
</tr>
<tr>
<td>RACH</td>
<td>233</td>
<td>468</td>
</tr>
<tr>
<td>LTE(_1)</td>
<td>667</td>
<td>907</td>
</tr>
<tr>
<td>LTE(_2)</td>
<td>56</td>
<td>84</td>
</tr>
<tr>
<td>MP4P2</td>
<td>143</td>
<td>146</td>
</tr>
<tr>
<td>Diff</td>
<td>19</td>
<td>27</td>
</tr>
</tbody>
</table>
**TABLE III**
**PROPERTIES OF THE TEST GRAPHS**
The performance of each of the three algorithms was also tested using exclusion graphs derived from IBSDF graphs of functional applications. Table III shows the characteristics of the tested graphs. The first three entries of this table, namely \(\text{RACH, LTE}\(_1\) and \(\text{LTE}\(_2\)}\), correspond to application graphs describing parts of the Long Term Evolution (LTE) wireless communication standard. The last two entries, MP4P2 and \(\text{Diff}\), respectively, are a description of the MPEG-4 (Moving Picture Experts Group) Part2 video encoder, and a dummy application that computes the difference between successive video frames. The values given for \(\text{Actors}\) and \(\text{FIFOs}\) are those of the flattened IBSDF graph, before its conversion into a DAG. It may also be noted that the lower memory bound indicated in Table III corresponds to the size of the MWC. In this section, only the memory objects corresponding to the communication buffers (type 1) were considered to generate the exclusion graphs.
To take advantage of a multi-core architecture, an application modeled with an SDF graph must present a high degree of parallelism. Exclusion graphs derived from such applications will therefore have a high edge density, as is the case with the graphs of Table III. The performance of each of the three algorithms on these graphs is related in Table IV.
As shown in Table IV, the efficiency of the heuristic algorithm for exclusion graphs derived from real applications is much higher than for randomly generated exclusion graphs. In-
TABLE IV
PERFORMANCE OF MAXIMUM-WEIGHT CLIQUE ALGORITHMS ON EXCLUSION GRAPHS DERIVED FROM THE TEST GRAPHS
<table>
<thead>
<tr>
<th>Graph</th>
<th>Exact algorithms</th>
<th>Heuristic</th>
</tr>
</thead>
<tbody>
<tr>
<td>RACH</td>
<td>∞</td>
<td>207.00 s</td>
</tr>
<tr>
<td>LTE1</td>
<td>∞</td>
<td>1.200 s</td>
</tr>
<tr>
<td>LTE2</td>
<td>996.70 s</td>
<td>869.320 s</td>
</tr>
<tr>
<td>MP4P2</td>
<td>1.12 s</td>
<td>0.052 s</td>
</tr>
<tr>
<td>Diff</td>
<td>0.42 s</td>
<td>0.120 s</td>
</tr>
</tbody>
</table>
deed, the heuristic algorithm always finds a clique with weight almost equal to the weight of the MWC and has a runtime at least 4 times faster. Moreover, contrary to the exact algorithms which sometimes fail to find a solution within 12 hours (as denoted by ∞), the runtime of the heuristic algorithm is highly predictable as it is solely dependent on the number of memory objects |V|. In the case of LTE1, because of the large number of vertices in the exclusion graph, exact algorithms never ran to completion, thus we are unable to give the MWC exact size nor the efficiency of our heuristic algorithm for this graph. However, this example shows that our heuristic algorithm may succeed in finding a lower bound to memory requirements in cases where exact approaches fail. Additionally, it can also be noted that Yamaguchi’s algorithm presents a slightly better performance than Østergård’s algorithms for exclusion graphs derived from SDF graphs.
Finally, the algorithms were tested against 120 exclusion graphs derived from randomly generated SDF graphs. The resulting exclusion graphs presented edge densities from 0.49 to 0.93 and possessed between 50 and 500 vertices. These tests confirmed that Yamaguchi’s algorithm is faster than Østergård’s for exclusion graphs derived from SDF graphs. These tests also showed that our heuristic approach finds the optimal solution 81% of the time, and that when the optimal solution is not found, the average efficiency is 96.5%.
VII. CONCLUSION AND FUTURE WORKS
This paper outlines a complete method for deriving the memory allocation bounds (Figure 1) of an application modeled with a hierarchical SDF graph. The bounds are derived as a part of a rapid prototyping process for MPSoC and are independent of mapping/scheduling considerations. Our method is based on a weighted graph, derived from an application graph, which models the ability of memory objects to share a memory space. In addition to presenting existing algorithms to derive optimal bounds, we propose a new heuristic approach for determining a lower bound to the memory requirement. Our experiments indicate good performances for this approach, both in terms of speed and efficiency. Future work on this subject is likely to include further testing of our method with exclusion graphs incorporating the working memory of actors. Other potential directions for future research are the design of an allocation algorithm using an exclusion graph as input and the iterative computation of memory bounds to influence a scheduling process.
ACKNOWLEDGMENTS
The authors would like to thank Renaud Keller for creating LTE applications that were used to evaluate the heuristic approach performance.
REFERENCES
The authors would like to thank Renaud Keller for creating LTE applications that were used to evaluate the heuristic approach performance.
|
{"Source-Url": "https://hal.archives-ouvertes.fr/hal-00721335/file/Samos12.pdf", "len_cl100k_base": 8643, "olmocr-version": "0.1.53", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 33479, "total-output-tokens": 10749, "length": "2e13", "weborganizer": {"__label__adult": 0.0005803108215332031, "__label__art_design": 0.000888824462890625, "__label__crime_law": 0.0005698204040527344, "__label__education_jobs": 0.0007300376892089844, "__label__entertainment": 0.00016295909881591797, "__label__fashion_beauty": 0.00030994415283203125, "__label__finance_business": 0.00045228004455566406, "__label__food_dining": 0.000579833984375, "__label__games": 0.0014705657958984375, "__label__hardware": 0.017333984375, "__label__health": 0.0009212493896484376, "__label__history": 0.0006299018859863281, "__label__home_hobbies": 0.0002377033233642578, "__label__industrial": 0.0014276504516601562, "__label__literature": 0.0003139972686767578, "__label__politics": 0.0005297660827636719, "__label__religion": 0.0008788108825683594, "__label__science_tech": 0.41162109375, "__label__social_life": 7.808208465576172e-05, "__label__software": 0.0078125, "__label__software_dev": 0.5498046875, "__label__sports_fitness": 0.0005841255187988281, "__label__transportation": 0.0017185211181640625, "__label__travel": 0.0003414154052734375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 43364, 0.04519]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 43364, 0.54917]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 43364, 0.90606]], "google_gemma-3-12b-it_contains_pii": [[0, 604, false], [604, 5431, null], [5431, 9548, null], [9548, 14965, null], [14965, 20205, null], [20205, 25152, null], [25152, 30079, null], [30079, 36290, null], [36290, 43364, null]], "google_gemma-3-12b-it_is_public_document": [[0, 604, true], [604, 5431, null], [5431, 9548, null], [9548, 14965, null], [14965, 20205, null], [20205, 25152, null], [25152, 30079, null], [30079, 36290, null], [36290, 43364, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 43364, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 43364, null]], "pdf_page_numbers": [[0, 604, 1], [604, 5431, 2], [5431, 9548, 3], [9548, 14965, 4], [14965, 20205, 5], [20205, 25152, 6], [25152, 30079, 7], [30079, 36290, 8], [36290, 43364, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 43364, 0.13613]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
a1d05371afbaefb81a2cb7c0f4be9739cedeaa1f
|
[REMOVED]
|
{"Source-Url": "http://haskell.cs.yale.edu/wp-content/uploads/2011/02/paralfl_ijpp.pdf", "len_cl100k_base": 15185, "olmocr-version": "0.1.49", "pdf-total-pages": 24, "total-fallback-pages": 0, "total-input-tokens": 80261, "total-output-tokens": 17708, "length": "2e13", "weborganizer": {"__label__adult": 0.0003578662872314453, "__label__art_design": 0.0003790855407714844, "__label__crime_law": 0.00027179718017578125, "__label__education_jobs": 0.0007796287536621094, "__label__entertainment": 8.022785186767578e-05, "__label__fashion_beauty": 0.00014972686767578125, "__label__finance_business": 0.00025272369384765625, "__label__food_dining": 0.00038313865661621094, "__label__games": 0.0005407333374023438, "__label__hardware": 0.0011167526245117188, "__label__health": 0.0004901885986328125, "__label__history": 0.0002677440643310547, "__label__home_hobbies": 0.00010597705841064452, "__label__industrial": 0.00047659873962402344, "__label__literature": 0.00046634674072265625, "__label__politics": 0.00025963783264160156, "__label__religion": 0.0006165504455566406, "__label__science_tech": 0.033782958984375, "__label__social_life": 7.402896881103516e-05, "__label__software": 0.004856109619140625, "__label__software_dev": 0.953125, "__label__sports_fitness": 0.0002758502960205078, "__label__transportation": 0.0007028579711914062, "__label__travel": 0.00019371509552001953}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 56554, 0.03033]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 56554, 0.74366]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 56554, 0.84481]], "google_gemma-3-12b-it_contains_pii": [[0, 1196, false], [1196, 3718, null], [3718, 6015, null], [6015, 8731, null], [8731, 11762, null], [11762, 14670, null], [14670, 16741, null], [16741, 19364, null], [19364, 22923, null], [22923, 26092, null], [26092, 29568, null], [29568, 31129, null], [31129, 32848, null], [32848, 35443, null], [35443, 37547, null], [37547, 40197, null], [40197, 40229, null], [40229, 43513, null], [43513, 46669, null], [46669, 50114, null], [50114, 51428, null], [51428, 53166, null], [53166, 55257, null], [55257, 56554, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1196, true], [1196, 3718, null], [3718, 6015, null], [6015, 8731, null], [8731, 11762, null], [11762, 14670, null], [14670, 16741, null], [16741, 19364, null], [19364, 22923, null], [22923, 26092, null], [26092, 29568, null], [29568, 31129, null], [31129, 32848, null], [32848, 35443, null], [35443, 37547, null], [37547, 40197, null], [40197, 40229, null], [40229, 43513, null], [43513, 46669, null], [46669, 50114, null], [50114, 51428, null], [51428, 53166, null], [53166, 55257, null], [55257, 56554, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 56554, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 56554, null]], "pdf_page_numbers": [[0, 1196, 1], [1196, 3718, 2], [3718, 6015, 3], [6015, 8731, 4], [8731, 11762, 5], [11762, 14670, 6], [14670, 16741, 7], [16741, 19364, 8], [19364, 22923, 9], [22923, 26092, 10], [26092, 29568, 11], [29568, 31129, 12], [31129, 32848, 13], [32848, 35443, 14], [35443, 37547, 15], [37547, 40197, 16], [40197, 40229, 17], [40229, 43513, 18], [43513, 46669, 19], [46669, 50114, 20], [50114, 51428, 21], [51428, 53166, 22], [53166, 55257, 23], [55257, 56554, 24]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 56554, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-26
|
2024-11-26
|
bd146afb609b1c3059018e3d1f1b6880c4216908
|
Abstract—Model checking techniques for software product lines (SPL) are actively researched. A major limitation they currently have is the inability to deal efficiently with non-Boolean features and multi-features. An example of a non-Boolean feature is a numeric attribute such as maximum number of users which can take different numeric values across the range of SPL products. Multi-features are features that can appear several times in the same product, such as processing units which number is variable from one product to another and which can be configured independently. Both constructs are extensively used in practice but currently not supported by existing SPL model checking techniques. To overcome this limitation, we formally define a language that integrates these constructs with SPL behavioural specifications. We generalize SPL model checking algorithms correspondingly and evaluate their applicability. Our results show that the algorithms remain efficient despite the generalization.
Index Terms—Software Product Lines; Numeric Features; Feature Cardinalities; Model Checking; Semantics; Tools
I. INTRODUCTION
Software Product Line (SPL) engineering is an increasingly widespread software development paradigm in which similar software products are designed and developed as a family to make economies of scale [1]. A key challenge in SPL engineering is the management of the differences between the products, aka variability. Features are first-class abstractions to model and reason on variability. They specify characteristics that may be present or absent in a product. Relations between features, like parent-child, implication and exclusion, are usually captured in a feature model (FM). Since they were first introduced by Kang et al. in 1990 [2], FMs became more sophisticated. For detailed surveys of FM languages see, e.g., [3], [4]. In this paper we use TVL [4], one of the latest incarnations of FMs, due to some of its advantages: high expressiveness, formal semantics and tool support. A TVL excerpt is given in Figure 1. TVL and the example are properly introduced in Section II.
Many SPLs are complex critical systems and variability is known to be the source of additional complexity. Therefore, efficient quality assurance is paramount. Model checking is an established automated technique for verifying system behaviour. As for single systems, model checking techniques for SPLs are actively researched [5]–[10]. Our past work [7]–[9] was concerned with one of the major challenges in SPL model checking: as the number of features grows, the number of products increases exponentially. We thus proposed various techniques for efficient SPL model checking. We introduced Featured Transition Systems (FTS), a mathematical formalism to model the behaviour of SPL products in a concise manner [7]. We also designed efficient algorithms that identify which products exhibit undesired behaviour. However, FTS are a fundamental formalism, not meant to be used directly by engineers. We thus proposed high-level specification languages to be used on top of FTS, notably fPromela [11], an SPL-specific dialect of Promela [12]. Given the large number of different products in an SPL, it is unrealistic to describe their behaviour separately. Instead, our languages associate optional behaviour with features that must (not) be present to enable it.
A fundamental limitation of existing SPL model checking approaches, including ours, is that they do not deal with numeric features, nor features appearing several times in a product, viz. multi-features1. Numeric (as opposed to purely Boolean) features occur in FMs in the form of attributes associated with features. An example could be Maximum number of users which can take different numeric values across the range of products. The multi-feature construct could be used to represent, e.g., processing units which number is variable from one product to another and which can be configured independently. A recent survey, dubbed by our own experience, showed that engineers commonly need these constructs [15]. To transfer of our model checking techniques to industry, we thus have to support them. Despite evidence of their usefulness in practice, no SPL modelling tool currently supports multi-features [4], and SPL model checking tools support neither numeric nor multi-features.
In this paper, we propose a combined formalism that integrates TVL with fPromela to model the behaviour of SPLs with numeric attributes and multi-features. With the addition of attributes, optional behaviour can be made dependent not only on the presence or absence of features, but also on the satisfaction of arithmetic constraints over attributes. This implies a generalization of the underlying formalism, viz. FTS, and its associated model checking algorithms. We implemented the complete method on top of SNIP, an FTS-based model checker [11]. The addition of arithmetic constraint solving
---
1 Sometimes misleadingly referred to as clones in the literature [13], [14].
naturally lead us to use Satisfiability Modulo Theory (SMT) solvers. Still, those are more computationally expensive than solutions for purely Boolean satisfiability. Our experiments show that while multi-feature support does not constitute a threat to performance, the use of SMT solvers to support attributes drastically increases verification time. We thus also propose an alternative solution where attributes are converted into sets of Boolean variables. This latter approach turns out to be more efficient.
**Structure of the paper.** Section II recalls the necessary background. In Section III we expose the challenges related to multi-features and define an extension of TVL that supports them through an array-based semantics. In Section IV, we define multi-features and attributes in fPromela. The implementation and evaluation are described in Section V. Related work is discussed in Section VI.
## II. BACKGROUND
The necessary concepts and background are now recalled, most of them based on our running example which is first introduced.
### A. Running Example
CFDP is a highly-configurable deep-space file transfer protocol [16]. In the past, our team helped Spacebel, a Belgian company, to develop an implementation of CFDP as an SPL [17]. The original CFDP FM has 98 features. Here, we consider a small subset of the protocol, i.e., the ack modes it offers. The corresponding sub-FM has 14 features and yields 1,058 different valid products. We had to limit ourselves to this subset because in addition to the variability, we had to model the behaviour of the protocol’s features, which is far more complex. We did that based on the protocol specification [16] and experimented with various SPL behavioural modelling languages. This turned out to be a difficult and time-consuming activity. The resulting models describe a communication scenario where an entity (e.g., a spacecraft) has to transfer a message to another one. Depending on the features of the protocol’s instance in each entity, properties like successful transmission may or may not be satisfied during the transaction. The final model has 1,812,652 states.
### B. TVL
Figure 1 shows an excerpt of the TVL model of CFDP. The equivalent graphical representation appears on its right. In both representations, the FM’s fundamental structure remains a tree that reflects the parent-child hierarchy between the features. At the top of the tree lies the root feature (CFDP). The root is always part of a product, regardless of its other features. A feature may have child features; for instance, CFDP has three: Entity, Message and Channel. Group cardinalities define how many children a feature may have in any given product. This construct is common in FM languages, including TVL. Here, it specifies that CFDP must have exactly three children, whereas Entity must have one or two.
Group cardinalities are not to be confused with feature cardinalities [13], [14] which specify how many instances of a feature may exist in any given product. When no feature cardinality is given, as is the case for all features in the model except Entity, the feature is implicitly assumed to occur at most once in each product. But whenever there is a need to allow a feature to have multiple instances, an explicit cardinality is added to the feature itself (as opposed to the group); we call such a feature a multi-feature. In the excerpt, the only multi-feature is Entity. It has a feature cardinality of exactly 2; hence two instances of this feature exist in each product. In our scenario, each instance corresponds to one of the two communicating spacecrafts. Defining Entity as a multi-feature allows the spacecrafts to bear a different configuration. If Entity was a normal feature, the spacecrafts should necessarily be identical. In reality, it is more likely that they are not.
Each instance of Entity must have at least one of the following child features: Snd_min (sending capabilities) and Rcv_min (receiving capabilities). Under Snd_min and Rcv_min lie additional features, which have been omitted in the figure. Features Rcv_min and Message both have an integer attribute. The attribute timeout determines the number of communication flaws that are allowed before aborting a communication while size models the number of data packets that must be sent for the transmission to end. Feature Channel has an optional child feature Reliable, which specifies whether or not the communication channel is reliable.
In addition to the specification of the tree structure, TVL allows the definition of additional constraints (i.e., Boolean formulae) over both the features and their attributes (omitted in the figure). The semantics of an FM is usually defined as the set of valid products, i.e. products whose features satisfy all the constraints defined by the FM [3].
### C. Featured Transition Systems (FTS)
FTS [7] is the formalism at the core of our model checking approach. Its main advantage over competing approaches is that it uses an explicit notion of feature, which brings
performance improvements and allows one to relate errors and undesired behaviours to the exact set of products where they occur. FTS are directed graphs whose transitions are annotated with feature expressions, i.e. Boolean formulae defined over the set of features. For instance, the feature expression $\text{Message} \land \text{Channel}$ represents the set of products that have the feature $\text{Message}$ and the feature $\text{Channel}$.
Reliable models the set of products that do not have the feature $\text{Reliable}$. A product is thus able to execute a transition iff its set of features satisfies the associated feature expression. A model-checking algorithm takes that information into account while looking for error states. It can thus keep track of which products are able to execute the currently analysed behaviour. Feature expressions constitute an intuitive and flexible way to represent variability in behavioural models. In their current form, FTS do not support expressions over multi-features and attributes. Formally, they are defined as follows.
**Definition 1** An FTS is a tuple $(S, A, t, \alpha, \rho, \gamma)$, where $S$ is a set of states, $A$ is a set of actions, $t$ is a set of transitions, $\alpha$ is a set of initial states, $\rho$ is a set of atomic propositions, and $\gamma$ associates each transition to a feature expression.
### D. SPL Behavioural Specification in fPromela
There are two kinds of approaches to implement (or model) SPLs [18]. Compositional methods capture the effects of features in isolated modules. A desired product is then obtained by composing the right set of modules. On the contrary, annotative approaches directly adorn code/models with constraints over the features, e.g., feature expressions. These annotations express that parts of the code/model are exclusive to the set of products satisfying the formulae.
fPromela [11] falls into the latter category. It is an executable language based on SPIN’s input syntax [12]. In an fPromela model, the behaviour of each process is described in a `proctype` structure. Within a process, executable statements are expressed using constructs inspired by imperative programming. Each of these can be annotated with feature expressions, such that only the products satisfying a formula are able to execute the associated statement. Let us consider the excerpt shown in Figure 2. Features are declared as Boolean fields of a user-defined structure called `features`. In this model, two processes of type `cfdp_entity` are specified. At some point, the specification of a `cfdp_entity` splits into two parts: one for the products satisfying the feature expression $\text{Snd\_min}$ and one for the others.
The semantics of an fPromela model is an FTS. Each process is first translated into an FTS. A state corresponds to a variable valuation and a node of execution. Transitions between states are determined according to the executable statements. Feature expressions labelling transitions are directly derived from the fPromela model. Once all the processes have been translated, the final FTS is obtained by computing their parallel composition [11].


written between brackets next to the name of the feature (see Figure 1). Additional constraints over multi-features are described further in this section. For clarity, we name TVL* our new version of TVL. Before we give it a formal semantics, we have to deal with a number of issues.
A. Challenges in the Definition of Feature Cardinalities
As explained by Michel et al. [14], feature cardinalities introduce semantic ambiguities. When a non-terminal feature has a maximum cardinality greater than one, two ambiguities exist regarding its group cardinalities. For instance, consider the FM in Figure 3a and the two instances shown in Figure 3b. According to the feature diagram shown in the former figure, feature A has two instances and a group cardinality of [1..2] (only one or two child features of A may exist). However, it is unclear whether the group cardinalities apply locally under each instance of A or globally for all instances of A. In other words, either each instance can have one or two child features (left diagram in Figure 3b), or there must be one or two child features of A altogether (right diagram). Since our approach is centred on multi-features, we consider the local interpretation like Michel et al. [14].
The second ambiguity lies in the scope of group cardinalities; see Figure 3c. In the left diagram, cardinalities restrict the number of instances of A’s child features. On the contrary, in the right diagram, the two instances of B under the leftmost instance of A are counted once; in this case, cardinalities constrain the number of distinct child features. Like Michel et al. [14], we believe that the original intent of group cardinalities is to restrict the number of features in a product; we thus consider the second option.
Multiple interpretations are also possible for feature cardinalities. Let us consider Figure 3d. As for group cardinalities, either feature cardinalities apply globally and count the total number of instances of a feature (left diagram), or they apply locally and count the number of instances of a feature under a specific instance of its parent feature (right diagram). As before, we adopt the most local option, i.e. the latter.
Another issue concerns the identification of a feature by means of its name. In FMs without multi-features, the (relative) name of a feature works as a unique reference to that feature. This is not the case for multi-features. Let us consider again the TVL* model of CFDP (Figure 1). The relative name $Snd_{min}$ can refer to a child feature of either the first instance of Entity or the second one. We have to identify this instance using an “absolute” name (called fully qualified). In TVL, however, no construct exists for referring to a precise instance. Moreover, the semantics proposed by Michel et al. defines the children of a feature as a multiset of instances [14]. One cannot refer to a precise element of a multiset in natural language. Our SPL behaviour specification language requires this capability; the definition of Michel et al. is thus inappropriate for our purpose.
We propose to represent the children of a feature by a set of arrays of instances. In a given array, all the instances are from the same feature. Each of them is identified by an index, the first index being zero. For example, the $Snd_{min}$ child feature of the second instance of Entity is identified by the fully qualified name $CFDP[0].Entity[1].Snd_{min}[0]$. When the maximum number of instances of a given feature is 1, the index can be omitted; $CFDP.Entity[1].Snd_{min}$ is thus equivalent to the above name. An attribute of a given instance must be referred to using its fully qualified name as well, e.g., $CFDP.Message.size$.
Since we introduce multi-features in the syntax of TVL*, we must provide means to specify constraints over them, their attributes, and their number. Fully qualified names are already suitable to refer to precise instances or attributes. However, it is currently impossible to reason over a whole array. For example, one cannot express that the number of instances of a feature must not exceed the value of another feature’s attribute, or that every instance of a feature must satisfy a given constraint. To address this limitation, we define the operator $\text{card}$ which, given a fully qualified multi-feature name, returns its number of instances. E.g., $\text{card}(CFDP.Entity)$ always returns 2. We also define two new types of constraints: $\forall (m) \{ \phi \}$ and $\exists (m) \{ \phi \}$. Intuitively, they specify that for each (resp. at least one) instance of a multi-feature $m$, the sub-tree of this instance satisfies the constraint $\phi$. For example, the constraint $\forall (CFDP.Entity) \{ Snd_{min} \lor Rcv_{min} \}$ is satisfied if and only if every instance of Entity has at least one child. As we will see, a notion of context is required for defining the semantics of such formulae.
The last challenge is related to constraints over attributes. If a feature is not part of a product, references to its attributes point to an unknown value. In this case, it is undetermined how the constraint must be evaluated. Our new types of constraints already provide a solution to that problem by specifying constraints within the context of an instance. The evaluation of a constraint is thus performed under the assumption that the instance exists within its context. Still, this issue makes such descriptions error-prone.
B. Abstract Syntax TVL*
Now that the new TVL* constructs have been informally introduced, we give them an abstract syntax and a formal semantics. Note that this abstract syntax remains valid for most feature modelling languages that support multi-features and attributes, and whose diagrams follow a tree structure.
Definition 2 A TVL* model is a tuple $(F, r, DE, \omega, \lambda, A, \rho, \tau, \Phi)$ where $F$ is a non-empty set of features, $r \in F$ is the root. $A$ is the set of attributes, and:
- $DE \subseteq F \times F$ is the decomposition (hierarchy) relation between features. For any $(f, f') \in DE$, $f$ is the parent and $f'$ is the child feature. By $\text{children}(f)$ we denote the set of child features of $f$.
- $\omega : F \to \mathbb{N} \times (\mathbb{N} \cup \{*\})$ gives the cardinality of each feature. If $\omega(f) = (n, m)$ then $n$ is the minimum cardinality of $f$ and $m$ its maximum cardinality. If $m = \ast$ then $m$ can be any finite value.
• \( \lambda : F \rightarrow \mathbb{N} \times (\mathbb{N} \cup \{\ast\}) \) indicates the decomposition operator of a feature, i.e., its group cardinalities. It follows the same pattern as \( \omega \).
• \( \rho : A \rightarrow F \) is a total function that returns the feature declaring a given attribute.
• \( \tau : A \rightarrow \{\text{int, bool}\} \) assigns a type to each attribute.
• \( \Phi \) is a Boolean-valued expression over the features \( F \) and the attributes \( A \), respectively.
Furthermore, each FM must satisfy the following well-formedness rules: (1) \( r \) exists and is unique: \( \omega(r) = (1,1) \); any terminal feature \( f \) has no child: \( \lambda(f) = (0,0) \); \( DE \) is acyclic, that is, \( \exists n_{1}, \ldots, n_{k} \in N \cdot (n_{k}, n_{1}) \in DE \land \forall i \in \{1..k-1\} \cdot (n_{i}, n_{i+1}) \in DE \).
The main difference between this new syntax and that of TVL [4] is the definition of \( \omega \). In the latter, \( \omega \) was meant to identify optional features. Here, we make it more general since it defines the cardinality of a feature. Although this difference might seem thin at first sight, we already showed in Section III-A that it raises a number of important issues. The definition of \( \Phi \) is also different here, as we have introduced a new operator and two new types of constraints. For convenience, we suppose that attributes are either integer or Boolean. Enumeration types can be mapped to integer and are thus implicitly supported too. The introduction of real attributes in the syntax is straightforward, but requires additional type checking. The current version of our tools (presented further in Section V) does not support them.
As explained in Section III-A, only a fully qualified name can be used to identify a specific instance or an attribute. Formally, it is a tuple of the form \((f_{0}, i_{0}), (f_{1}, i_{1}), \ldots, (f_{k}, i_{k})\) \( \in (F \times \mathbb{N})^{k+1} \) or \((f_{0}, i_{0}), (f_{1}, i_{1}), \ldots, (f_{k}, i_{k}), a\) \( \in (F \times \mathbb{N})^{k+1} \times A \), respectively for an instance or an attribute. A fully qualified instance name must satisfy the following: (1) The first feature is the root indexed by 0: \( (f_{0}, i_{0}) = (r, 0) \); (2) the decomposition hierarchy is respected: \( \forall i \cdot 0 < j \leq k \cdot f_{k} \in \text{children}(f_{k-1}) \); (3) each index satisfies the cardinalities of its associated feature: \( \forall i \cdot 0 < j \leq k \cdot w(f_{j}) = (n, m) \Rightarrow 0 \leq i_{j} < m \). A fully qualified attribute name \( (f_{0}, i_{0}), (f_{1}, i_{1}), \ldots, (f_{k}, i_{k}), a \) is valid if and only if \( (f_{0}, i_{0}), (f_{1}, i_{1}), \ldots, (f_{k}, i_{k}) \) is valid and \( a \) is an actual attribute of \( f_{k} \). From now on, we assume the validity of every fully qualified name.
### C. Formal Semantics
The purpose of TVL is to define the set of valid products in an SPL, i.e., the valid combinations of features and attribute values. We mentioned before that in SPLs without attributes and multi-features, a product is uniquely identified by a set of features. For reasons explained above, this representation is not appropriate when multi-features or attributes occur in the FM. Therefore we redefine a product as a couple \((F, A)\) where \( F \subseteq (F \times \mathbb{N})^{+} \) is a set of instances and \( A : ((F \times \mathbb{N})^{+} \times A) \rightarrow \mathbb{Z} \cup \{\top, \bot\} \) is a partial function that associates attributes with values.
We have defined all the attributes of any instance in \( F \) have a value defined by \( \mathcal{A} \):
\[
\forall (f_{0}, i_{0}) \ldots (f_{k}, i_{k}) \in F, a \in A \Rightarrow f_{k} = \rho(a) \Rightarrow \forall (f_{0}, i_{0}) \ldots (f_{k}, i_{k}), a \in \text{dom}(\mathcal{A}).
\]
Conversely, for any attribute whose value is defined, \( F \) must include the corresponding feature:
\[
\forall (f_{0}, i_{0}) \ldots (f_{k}, i_{k}), a \in \text{dom}(\mathcal{A}) \Rightarrow \forall (f_{0}, i_{0}) \ldots (f_{k}, i_{k}) \in F.
\]
Although we could ignore these two assumptions, they simplify the definition of the semantics.
**Definition 3** Let \( M = (F, r, DE, \omega, \lambda, A, \rho, \tau, \Phi) \) be a TVL* model and \( p = (F, A) \) be a product. Then \( p \) is valid according to \( M \), noted \( p \models M \), if and only if, for \( (f_{0}, i_{0}) \ldots (f_{k}, i_{k}) \in F \):
- \( F \) contains the root: \( (r, 0) \in F \);
- \( F \) respects the decomposition hierarchy: \( (f_{k-1}, f_{k}) \in DE \);
- every instance in \( F \) has its parent in \( F \) as well: \( (f_{0}, i_{0}), (f_{1}, i_{1}) \ldots, (f_{k}, i_{k}) \in F \);
- \( F \) satisfies the group cardinalities: \( \lambda(\rho_{k}) = (n, m) \Rightarrow n \leq \exists \{(f_{0}, i_{0}), \ldots, (f_{k}, i_{k}) | (f_{k+1}, 0) \in F\} \leq m \);
- the ith instance of a feature exists only if the i-th does: \( i_{k} > 0 \Rightarrow (f_{0}, i_{0}) \ldots (f_{k}, i_{k} - 1) \in F \);
- \( F \) satisfies the feature cardinalities: \( \omega(\rho_{k}) = (n, m) \Rightarrow (i_{k} < m \land n > 0 \Rightarrow (f_{0}, i_{0}) \ldots (f_{k}, n - 1) \in F) \);
- \( p \) satisfies the additional constraints \( \Phi \).
We consider that instances are contiguously placed in arrays, which simplifies the verification of multi-features and constraints over them.
Due to lack of space, we do not provide all the satisfaction rules for the additional constraints. However, we detail them for the constructs we have added. As explained above, the constraints of the form \( \text{forall}(m) \{\phi\} \) and \( \text{exists}(m) \{\phi\} \) introduce the notion of context, i.e., a sub-tree of the model. For the context to be well-defined, it is sufficient to know the instance it refers to.
**Definition 4** Let \( M = (F, r, DE, \omega, \lambda, A, \rho, \tau, \Phi) \) be a TVL* model. Then \( (f_{0}, i_{0}) \ldots (f_{k}, i_{k}) \in (F \times \mathbb{N})^{+} \) is a context of \( M \) if it is a valid fully qualified feature name.
The initial context is the root feature. A product \( p = (F, A) \) must thus satisfy the additional constraints \( \Phi \) in the context \( (r, 0) \): \( p \models_{(r, 0)} \Phi \). The context changes only when one of the above two constructs are encountered:
- \( p \models_{c} \text{forall}(m) \{\phi\} \Leftrightarrow \forall (c, m, i) \in F \Rightarrow p \models_{(c, m, i)} \phi \);
- \( p \models_{c} \text{exists}(m) \{\phi\} \Leftrightarrow \exists (c, m, i) \in F \Rightarrow p \models_{(c, m, i)} \phi \).
Here, \( c \) merely acts as a prefix for features name. References to multi-features can occur in constraints, and express that at least one instance of the feature exists. Given our convention about array contiguity, this boils down to requiring the presence of the instance at index 0: \( p \models_{c} (c, m, 0) \in F \). As in
TVL [4], the semantics of some constraints depend on the evaluation of arithmetic expressions, variables, or operators. For our new \( \text{card} \) operator, the evaluation is given by: \( \lvert \text{card}(m) \rvert = \lvert \{(c, m, i) \in F \} \rvert \). If the \( \text{card} \) operator is compared to a constant value \( v \) then the constraint can be reduced to a Boolean form:
\[
p \models \text{card}(m) \geq v \iff (c, m, v - 1) \in F
\] (1)
We will also make use of this property to reduce the cost of verifying these constraints in behavioural specifications.
IV. A BEHAVIOURAL SPECIFICATION LANGUAGE FOR MULTI-FEATURES AND ATTRIBUTES
The semantics of TVL* allows us to determine which combinations of features constitute valid products. Still, we need a way to express the behaviour of these products in a concise (i.e., not individual) manner. Based on the principles of TVL*, we generalize fPromela with feature expressions supporting multi-features and attributes. Then we can verify models of this language against properties using a model checking tool (described in Section V). We name our extension fPromela*.
fPromela* extends fPromela’s syntax with more general data types for representing features. For example, the data structure defined for the \( \text{Entity} \) feature would be:
```c
typedef _Entity {
bool is_in;
bool Snd_min;
bool Rcv_min;
};
```
The first field of a structure is a Boolean called \( \text{is\_in} \). Its truth value defines the presence or the absence of a feature. The structures can be nested. Hence if, say, \( \text{Snd\_min} \) is not a terminal feature, we can declare it as an instance of another structure. Attributes are declared as integer fields. More generally, there exists a transformation from a TVL* model to a set of fPromela* data structures. Each data structure represents a non-terminal feature, and contains one field per attribute and child feature. The type of the latter is either Boolean or another data structure, respectively if the feature is terminal or not. When the maximum cardinality of a feature is \( 1 < m < \infty \), the feature must be declared as an \( m \)-sized array of its corresponding type:
```c
typedef _features {
_Entity Entity[2];
...
};
```
Because fPromela does not allow arrays of an unknown size, the transformation does not support infinite maximum cardinality (represented as \( \ast \) in TVL*). However, we may assume that in real cases, an upper bound can be defined.
Unlike in fPromela, the use of nested user-defined structures is compulsory in fPromela*. Indeed, a fully qualified name is required to refer to an instance (see Section III-A). fPromela* offers a simple way to represent valid fully qualified names:
References to fields of the corresponding user-defined structures, starting from that of the root feature. For example, the \( \text{Snd\_min} \) child feature of the second instance of \( \text{Entity} \) is uniquely referred to by \( \text{CFDP.Entity[1].Snd\_min} \).
fPromela* generalizes feature expressions as well. We distinguish between static and dynamic formulae. The former specify one of the following requirements: (1) the presence or absence of a given instance (e.g., \( \text{CFDP.Entity[1].Snd\_min} \)); (2) constraints over attributes (e.g., \( \text{CFDP.Message.size}>0 \)); and (3) constraints over number of instances (e.g., \( \text{card(CFDP.Entity)}==2 \)). As in TVL*, the third type of constraints is subsumed by the first one when \( \text{card} \) is compared to integers (see Equation 1). We can determine the products able to execute a transition associated with a static formula without executing the fPromela* model.
On the contrary, dynamic formulae are evaluated at runtime. Typically, they define constraints over attributes and number of instances in terms of variable values. For example, one could analyse the content of a CFDP message by iterating on its size:
```c
i = 0;
do :: CFDP.Message.size > i
-> ... i++;
:: else -> break;
od;
```
Note that for a given product, the actual size of the above model (in terms of states) depends on the value of the attribute in the product. Attributes in fPromela* are thus the key for specifying variable-size models. Instead of attributes, we could also use the number of instances of a feature. In Section V, we show that it is more efficient to verify a variable-size model rather than a set of fixed-size models, i.e. one per possible size.
Let us illustrate the last addition of fPromela* by means of the CFDP model. As mentioned before, two processes are declared and model the behaviour of two spacecrafts. These may have different configurations. In the partial model shown in Figure 2, however, we find but an ambiguous reference to feature \( \text{Snd\_min} \). We have seen that the introduction of multi-features permits the distinction between the features of the two entities. Still, we need a way to associate each process with the corresponding instance of \( \text{Entity} \). To that aim, we define the feature context of a process as an instance of a feature. We propose two constructs to associate a context to a process. The first specifies it explicitly in the \text{run} statement of fPromela, which is used to dynamically instantiate a new process:
```c
run \{CFDP.Entity[1]\} cfdp_entity(...)
```
The second method consists in declaring that the number of instances of a process is equal to the number of instances of a specific feature:
```c
active \{card(CFDP.Entity)\} cfdp_entity(...)
```
In this case, the context of each instance of \text{cfdp\_entity} is
a distinct instance of Entity. A process cannot exist outside its associated context. Accordingly, the first transition of a process is implicitly constrained by the formula representing its context. Once defined, contexts can act as a prefix in feature expressions. For this purpose, one may use the keyword this, which points to the context of the process:
```plaintext
if :: this.Snd_min -> ...
:: else -> ...
fi;
```
References to features outside the context are still possible by using fully qualified names.
More generally, one can regard contexts as an association between an fPromela* process and a FM. Thanks to them, one can describe the behaviour of parallel processes built from different SPLs. Analysing the behaviour of compositions of SPLs rather than individual SPLs is thus made possible. However, this remains out of the scope of this paper and is left for future work.
In [11], we defined the semantics of an fPromela model as an FTS. Since apart from feature expressions, fPromela* does not differ from fPromela, its semantics can be defined in terms of FTS where feature expressions have been generalized. We call the resulting formalism FTS*.
V. IMPLEMENTATION AND EVALUATION
Since the semantics of an fPromela* is an FTS, we may reuse algorithms based on this formalism [7]–[9]. We describe how we implemented the approach on top of SNIP [11], which we further used to carry out experiments.
A. Tool Description
SNIP is a model checker based on the FTS formalism. Figure 4 gives an overview of its architecture. Given an fPromela model, the parser builds a program graph representing it. The semantic engine generates on-the-fly the FTS equivalent to the program graph. The verifier module checks the FTS using dedicated algorithms [7], [9]. Feature handler abstracts feature formulae from their representations, whereas feature solver checks their satisfiability. The TVL library is used to compute the feature formula representing the valid products of a TVL model.
No change to the verifier module is required since it is independent of how feature formulae are represented. On the contrary, we extended or built new variants of the other modules. We updated the parser so that feature formulae are declared and referenced as user-defined data structures. We also relaxed syntactic constraints for feature formulae and modified the construction of program graphs accordingly. Dynamic formulae are handled by the semantic engine, which builds them at runtime.
Given that more general feature formulae are allowed, the previous feature handler and checker are not appropriate anymore. Both the representation and satisfiability checking of features were based on Binary Decision Diagrams (BDDs) [19] and their implementation in the CUDD library2. While BDDs have proven their efficiency in SNIP [11], they are unable to represent arithmetic constraints. Multi-features alone are not a problem in this regard but feature attributes are not necessarily Boolean. Satisfiability Modulo Theory (SMT) solvers appear as natural candidates for arithmetic constraints checking. Accordingly, our new feature solver is linked to Microsoft’s Z3 [20]. Z3 implements many advanced techniques and structures for SMT solving. We thus implemented a new feature handler that relies on them. Equipped with Z3, our tool is able to check the satisfiability of formulae over both the features and their attributes. However, this comes at the price of a significant reduction in efficiency, as further experiments reveal. Consequently, we developed two variants of the tool: one equipped with CUDD and the other with Z3; the former supports only multi-features whereas the latter handles attributes as well. They are now included in the ProVeLines tool suite (http://info.fundp.ac.be/fts/provelines).
B. Overhead Measurement
We carried out experiments to evaluate the benefits of our approach, and to quantify the overhead that results from the additional verifications (see above) and/or the use of Z3 in place of BDD-based satisfiability solvers. The following tools are involved in the experiments: SNIP (as described in [11]), our BDD-based variant that supports multi-features (SNIP-MF), and our second variant equipped with Z3 (SNIP-Z3).
The addition of multi-features to fPromela* generates additional computations in the semantic engine of SNIP. Moreover, feature attributes require more general satisfiability-checking techniques, i.e., SMT solvers. These two generalizations are likely to negatively impact the performance of our tool. To quantify the resulting loss, we compare the time needed to model check three fPromela models against five properties each. The models include neither multi-features nor attributes. They are: (1) a minepump system [7], [21] (250,561 states to explore); (2) a restricted version of CFDP where attributes have received a fixed value and the two instances of Entity are regarded as a single feature (1,801,581 states to explore); (3) an elevator model inspired from Plath and Ryan [22]
Fig. 4: Architecture of SNIP.
---
2vlsi.colorado.edu/~fabio/CUDD
(58,945,690 states to explore). We checked different kinds of properties including deadlock freedom, safety properties, and liveness properties. All benchmarks were run on a MacBook Pro with a 2,8 GHz Intel Core i7 processor and 8 GB of DDR3 RAM. We coded an automated script to execute them. To avoid random variations, we repeated each experiment several times and computed the average.
Results are shown in Table I. We observe that multi-feature support in our BDD-based tool generates but a small increase in verification time (between 2% and 18% for Minepump; between 0% and 5% for CFDP; between 1% and 12% for Elevator). This increment does not depend on the size of the model. In every model, the increase is less than 6% for longest-to-check properties. This indicates that the size of the property has no impact on the loss of performance either.
On the contrary, the Z3 variant suffers from a large performance drop. In Minepump and CFDP, the checking time ranges from 21 to 31 times more than for the other two tools. The tool performs even worse on Elevator; the verification time it is multiplied by a factor between 468 and 962. For the first and fifth properties of that model, we could not even get accurate results; according to our estimations, verifying those properties would take 24 and 15 hours, respectively. Our SMT-based tool generates but a small increase in verification time (between 2% and 18% for Minepump; between 0% and 5% for CFDP; between 1% and 12% for Elevator). This indicates that the size of the property has no impact on the loss of performance either.
For SNIP and SNIP-MF, the absolute time needed for satisfiability checking (Sat) and formula manipulation (Man) are identical. Indeed, given that all the models are free from multi-feature and attribute, the only difference between the two tools are the detection of dynamic formulae, which does not depend on Sat or Man. For these tools, it turns out that Sat constitutes but a small part of the total time (between 6% and 15%). Although this share tends to slightly increase with the size of the model, it is not a major threat to scalability. On the contrary, Man is an increasingly important part of the total time. While the time share does not grow significantly between Minepump and CFDP, it nearly doubles in the Elevator case, ranging from 38% to 53%. This indicates that managing a large number of BDDs is the main challenge for efficient BDD-based tools.
In the third tool, Sat is the most costly computation. Its share is already between 73% and 82% for Minepump and CFDP, reaching astonishing proportions in the Elevator case (about 95%). This clearly shows that expensive satisfiability checking threatens scalability. We measured the average time for a Sat computation. It amounts to $6.59 \times 10^{-4}$ seconds for SNIP-Z3 as opposed to $3.36 \times 10^{-7}$ in BDD-based tools. In every model and property, the remaining computation time is due to feature manipulations for the most part. The average time of these is $1.62 \times 10^{-5}$ seconds as opposed to $3.68 \times 10^{-7}$ in the other tools. Together, Sat and Man always make up more than 92% of the overall verification time, and even over 95.5% in Elevator.
In our context, SMT-based solutions thus appear less efficient than BDDs. This has to be confirmed through the replacement of Z3 by other SMT solvers. Indeed, Z3 offers many facilities that we do not need. Using another solver specifically designed for our purpose might yield better results. We leave that for future work. Nevertheless, the poor performance raises the need for alternatives to SMT.
### C. Explicit Attributes versus Boolean Conversion
As an alternative to Z3, we propose to transform integer attributes into a set of Boolean variables, *i.e.,* one per attribute value. This results in a model without attribute, which can thus be checked by SNIP-MF. Each time a constraint over attributes is encountered in an fPromela model, it is first re-written in a form where only Boolean negation and greater operators occur. For instance, the constraint $\langle CFDP.Message.size <= 3 \rangle$ is converted into $\neg (CFDP.Message.size > 3)$. Then, a Boolean variable representing that constraint is created and
<table>
<thead>
<tr>
<th>Model</th>
<th>Property</th>
<th>SNIP-MF</th>
<th>SNIP-Z3</th>
</tr>
</thead>
<tbody>
<tr>
<td>Minepump #1</td>
<td>2.72</td>
<td>3.21 (+18%)</td>
<td>78.22 (+2.776%)</td>
</tr>
<tr>
<td>Minepump #2</td>
<td>2.88</td>
<td>2.95 (+2%)</td>
<td>67.86 (+2.256%)</td>
</tr>
<tr>
<td>Minepump #3</td>
<td>4.90</td>
<td>5.47 (+12%)</td>
<td>88.65 (+1.709%)</td>
</tr>
<tr>
<td>Minepump #4</td>
<td>2.96</td>
<td>3.42 (+16%)</td>
<td>65.72 (+2.120%)</td>
</tr>
<tr>
<td>Minepump #5</td>
<td>9.22</td>
<td>9.76 (+6%)</td>
<td>197.43 (+2.041%)</td>
</tr>
<tr>
<td>CFDP #1</td>
<td>10.96</td>
<td>11.00 (+1%)</td>
<td>282.47 (+2.477%)</td>
</tr>
<tr>
<td>CFDP #2</td>
<td>2.23</td>
<td>2.41 (+2%)</td>
<td>63.37 (+2.274%)</td>
</tr>
<tr>
<td>CFDP #3</td>
<td>2.88</td>
<td>2.95 (+2%)</td>
<td>25.62 (+2.208%)</td>
</tr>
<tr>
<td>CFDP #4</td>
<td>3.82</td>
<td>4.00 (+5%)</td>
<td>113.36 (+2.867%)</td>
</tr>
<tr>
<td>CFDP #5</td>
<td>17.58</td>
<td>18.52 (+5%)</td>
<td>548.98 (+3.023%)</td>
</tr>
<tr>
<td>Elevator #1</td>
<td>280.50</td>
<td>301.10 (+5%)</td>
<td>TIMEOUT</td>
</tr>
<tr>
<td>Elevator #2</td>
<td>9.53</td>
<td>10.47 (+10%)</td>
<td>9,170.05 (+9.144%)</td>
</tr>
<tr>
<td>Elevator #3</td>
<td>7.07</td>
<td>7.90 (+12%)</td>
<td>3,316 (+46.799%)</td>
</tr>
<tr>
<td>Elevator #4</td>
<td>3.5</td>
<td>3.55 (+1%)</td>
<td>1,858.65 (+53.004%)</td>
</tr>
<tr>
<td>Elevator #5</td>
<td>235.67</td>
<td>252.94 (+6%)</td>
<td>9,172.05 (+96.144%)</td>
</tr>
</tbody>
</table>
### TABLE II: Share of verification time due to satisfiability checking and formula manipulation (in percentage).
<table>
<thead>
<tr>
<th>Model</th>
<th>Property</th>
<th>SNIP-MF</th>
<th>SNIP-Z3</th>
</tr>
</thead>
<tbody>
<tr>
<td>Minepump #1</td>
<td>8.50</td>
<td>27.72</td>
<td>72.28</td>
</tr>
<tr>
<td>Minepump #2</td>
<td>6.07</td>
<td>18.26</td>
<td>71.73</td>
</tr>
<tr>
<td>Minepump #3</td>
<td>5.94</td>
<td>17.87</td>
<td>52.12</td>
</tr>
<tr>
<td>Minepump #4</td>
<td>6.02</td>
<td>18.18</td>
<td>52.17</td>
</tr>
<tr>
<td>Minepump #5</td>
<td>5.98</td>
<td>17.36</td>
<td>52.14</td>
</tr>
<tr>
<td>CFDP #1</td>
<td>10.14</td>
<td>28.79</td>
<td>73.07</td>
</tr>
<tr>
<td>CFDP #2</td>
<td>11.79</td>
<td>27.24</td>
<td>77.85</td>
</tr>
<tr>
<td>CFDP #3</td>
<td>8.44</td>
<td>20.77</td>
<td>77.14</td>
</tr>
<tr>
<td>CFDP #4</td>
<td>9.25</td>
<td>22.70</td>
<td>81.87</td>
</tr>
<tr>
<td>CFDP #5</td>
<td>9.38</td>
<td>24.32</td>
<td>78.57</td>
</tr>
<tr>
<td>Elevator #1</td>
<td>16.70</td>
<td>52.82</td>
<td>78.57</td>
</tr>
<tr>
<td>Elevator #2</td>
<td>12.47</td>
<td>37.97</td>
<td>95.45</td>
</tr>
<tr>
<td>Elevator #3</td>
<td>12.09</td>
<td>38.23</td>
<td>95.48</td>
</tr>
<tr>
<td>Elevator #4</td>
<td>13.17</td>
<td>39.94</td>
<td>95.48</td>
</tr>
<tr>
<td>Elevator #5</td>
<td>11.31</td>
<td>37.57</td>
<td>75.38</td>
</tr>
</tbody>
</table>
\[\text{CFDP.Message.size} < 3\]
D. Variable-Size Models
In Section IV, we mentioned that multi-features and attributes can represent variable-size models. Our new tools have thus the capability to verify in a single execution a set of models varying only by their size. They avoid redundant checking of common parts of these models, and are thus potentially more efficient. However, multi-feature support has an impact of performance. We must therefore evaluate if the avoidance of redundancy offsets the loss in efficiency.
We consider the sieve of Eratosthenes modelled in fPromela. The size of the model is determined by the number of primes to compute. We compare the performance of (1) SNIP applied on models where the number of primes is fixed, and (2) SNIP-MF executed on a model where this number is equal to the number of instances of a feature. For a maximum number $n$, we compute the time needed by SNIP for successively checking models with 1 to $n$ numbers to compute. Then, we execute SNIP-MF on the variable-size model.
Figure 5a shows the resulting verification times for $n$ ranging from 30 to 48, whereas Figure 5b presents the number of explored states. Although the checking time always grows exponentially, it turns out that SNIP-MF performs increasingly better than SNIP. As illustrated in Figure 5b, this is because the former explores fewer and fewer states than the latter. However, we observe that for SNIP-MF, the verification time raises more rapidly than the number of explored states; the overhead due to multi-features has again a visible impact on the overall performance. Moreover, the benefit might not always exist. When the largest model has significantly more states than the smaller ones, the time needed to check these is negligible. In such cases, SNIP-MF might not be more efficient than SNIP applied on each model size.
VI. RELATED WORK
Other SPL model checkers equipped with specification languages exist. Plath and Ryan [22] designed fSMV in the context of feature interaction detection. This is a compositional language where features are described in separate modules. We previously developed an fSMV model checker [8]. Gruler et al. [10] extended the CCS process algebra with a variability operator that models alternative choices. Their language is however less human-readable than fPromela. Apel et al. [5] developed SPLVerifier, a tool chain for product-line model checking. Features are specified in separate modules written in C or Java. The framework of Asirelli et al. [6] centred on modal transition systems does not include high-level specification languages. None of the above considers attributes and multi-features.
A few attempts to reason on FM with attributes and multi-features exist. Czarnecki et al. [13] define the semantics of an FM with feature cardinalities as the semantics of a context-free grammar derived from the FM. Michel et al. [14] defined another semantics based on multisets. Czarnecki and Kim [23] argue that OCL is suitable for expressing additional constraints over multi-features. Mazo et al. [24] use constraint logic programming to reason on FMs with multi-features and attributes. However, their procedure relies on a heavyweight transformation from visual FMs to XML, then to Gnu-Prolog’s input syntax. Zhang et al. [25] propose a BDD-based approach for reasoning over multi-features. However, it does not support attributes and does not rely on an array-based semantics. A broader survey of feature cardinality analyses is found in [14].
SMT solvers are increasingly used in model checking. They have proven their usefulness in bounded model checking and verification of infinite-state and array-based systems [26]. We showed that SPL model checking is another interesting application domain for SMT solvers. However, they have to be combined with heuristics in order to provide an acceptable level of performance in our context.
VII. CONCLUSION
We deal with feature cardinalities and numeric features in SPL model checking. Multi-features and feature attributes

have received little support, be it in FMs or behavioural specification languages. These constructs pose many problems of both theoretical and technical nature. Nonetheless, the definition of languages supporting them is essential. Both constructs are useful in terms of expressiveness, conciseness, and readability. In a behaviour specification, they allow the definition of variable-size models. Our experiments showed that it is easier to verify such models than successively checking the same model with different sizes. Multi-features are also useful for specifying parallel processes that differ by their configuration. To deal with them, we introduced the notion of feature context for parallel processes. More generally, contexts can relate parallel process with different FMs. Combined with feature-model composition [27], this result opens the way for behavioural verification of SPL compositions. This is an interesting direction for our future work.
Handling attributes in SPL model checking is difficult. SMT solvers appeared as natural candidates to represent and reason over numeric feature formulae. However, our experiments with Microsoft’s Z3 revealed that the overhead of SMT satisfiability checking is too important, which forced us to consider alternatives like converting attributes into Boolean variables. Yet, we need but a small subset of Z3’s capabilities. Another solver optimised for our purpose could perform better. A complementary solution is not to check feature formulae each time it is needed. This leads to exploring more states than necessary but reduces the number of calls to the solver.
Moreover, Boolean conversion is not always practical. The behavioural models we considered do not include quantitative aspects like real-time or cost/rewards. Feature attributes in these contexts likely represent more complex forms of variability, e.g., data throughput, processor speed, or energy consumption. Model checking such SPLs requires the combination of this work with quantitative formalisms like Featured Timed Automata [28]. This is non-trivial; several theoretical issues like decidability are expected. On the technical side, the use of SMT solvers might be a mandatory step. Still, it constitutes an interesting problem and we will most likely pursue our work in that direction.
ACKNOWLEDGEMENTS
We thank Philippe Warnon and Raphael Michel for their suggestions regarding TVL’s syntax, as well as Andreas Classen for proofreading. This work was funded by the Fund for Scientific Research – FNRS in Belgium (project FC 91490).
REFERENCES
|
{"Source-Url": "http://www.researchgate.net/profile/Pierre_Yves_Schobbens/publication/257947979_Beyond_Boolean_Product-Line_Model_Checking_Dealing_with_Feature_Attributes_and_Multi-Features/links/0deec52bc534c94a1a000000.pdf", "len_cl100k_base": 12494, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 36868, "total-output-tokens": 14744, "length": "2e13", "weborganizer": {"__label__adult": 0.0002472400665283203, "__label__art_design": 0.0002956390380859375, "__label__crime_law": 0.00024187564849853516, "__label__education_jobs": 0.00042128562927246094, "__label__entertainment": 4.9173831939697266e-05, "__label__fashion_beauty": 0.0001138448715209961, "__label__finance_business": 0.00020194053649902344, "__label__food_dining": 0.00027060508728027344, "__label__games": 0.00042891502380371094, "__label__hardware": 0.00054931640625, "__label__health": 0.0002930164337158203, "__label__history": 0.00016796588897705078, "__label__home_hobbies": 6.198883056640625e-05, "__label__industrial": 0.000286102294921875, "__label__literature": 0.00019729137420654297, "__label__politics": 0.0001984834671020508, "__label__religion": 0.0003185272216796875, "__label__science_tech": 0.0128631591796875, "__label__social_life": 6.258487701416016e-05, "__label__software": 0.006832122802734375, "__label__software_dev": 0.97509765625, "__label__sports_fitness": 0.0001901388168334961, "__label__transportation": 0.0003275871276855469, "__label__travel": 0.0001456737518310547}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 54928, 0.05501]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 54928, 0.25815]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 54928, 0.88945]], "google_gemma-3-12b-it_contains_pii": [[0, 5049, false], [5049, 10115, null], [10115, 13393, null], [13393, 19858, null], [19858, 26799, null], [26799, 32437, null], [32437, 37551, null], [37551, 43642, null], [43642, 47744, null], [47744, 54928, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5049, true], [5049, 10115, null], [10115, 13393, null], [13393, 19858, null], [19858, 26799, null], [26799, 32437, null], [32437, 37551, null], [37551, 43642, null], [43642, 47744, null], [47744, 54928, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 54928, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 54928, null]], "pdf_page_numbers": [[0, 5049, 1], [5049, 10115, 2], [10115, 13393, 3], [13393, 19858, 4], [19858, 26799, 5], [26799, 32437, 6], [32437, 37551, 7], [37551, 43642, 8], [43642, 47744, 9], [47744, 54928, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 54928, 0.14655]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
e546985f75ab3e9b951702789329d70a33738e55
|
Chapter 1. Introduction to Programming
In This Chapter
In this chapter we will take a look at the basic programming terminology and we will write our first C# program. We will familiarize ourselves with programming – what it means and its connection to computers and programming languages.
Briefly, we will review the different stages of software development.
We will introduce the C# language, the .NET platform and the different Microsoft technologies used in software development. We will examine what tools we need to program in C#. We will use the C# language to write our first computer program, compile and run it from the command line as well as from Microsoft Visual Studio integrated development environment. We will review the MSDN Library – the documentation of the .NET Framework. It will help us with our exploration of the features of the platform and the language.
What Does It Mean "To Program"?
Nowadays computers have become irreplaceable. We use them to solve complex problems at the workplace, look for driving directions, have fun and communicate. They have countless applications in the business world, the entertainment industry, telecommunications and finance. It’s not an overstatement to say that computers build the neural system of our contemporary society and it is difficult to imagine its existence without them.
Despite the fact that computers are so wide-spread, few people know how they really work. In reality, it is not the computers, but the programs (the software), which run on them, that matter. It is the software that makes computers valuable to the end-user, allowing for many different types of services that change our lives.
How Do Computers Process Information?
In order to understand what it means to program, we can roughly compare a computer and its operating system to a large factory with all its workshops, warehouses and transportation. This rough comparison makes it easier to imagine the level of complexity present in a contemporary computer. There are many processes running on a computer, and they represent the workshops and production lines in a factory. The hard drive, along with the
files on it, and the operating memory (RAM) represent the warehouses, and
the different protocols are the transportation systems, which provide the input
and output of information.
The different types of products made in a factory come from different
workshops. They use raw materials from the warehouses and store the
completed goods back in them. The raw materials are transported to the
warehouses by the suppliers and the completed product is transported from
the warehouses to the outlets. To accomplish this, different types of
transportation are used. Raw materials enter the factory, go through different
stages of processing and leave the factory transformed into products. Each
factory converts the raw materials into a product ready for consumption.
**The computer is a machine for information processing.** Unlike the
factory in our comparison, for the computer, the raw material and the product
are the same thing – information. In most cases, the input information is
taken from any of the warehouses (files or RAM), to where it has been
previously transported. Afterwards, it is processed by one or more processes
and it comes out modified as a new product. Web based applications serve as
a prime example. They use HTTP to transfer raw materials and products, and
information processing usually has to do with extracting content from a
database and preparing it for visualization in the form of HTML.
**Managing the Computer**
The whole process of manufacturing products in a factory has many levels of
management. The separate machines and assembly lines have operators, the
workshops have managers and the factory as a whole is run by general
executives. Every one of them controls processes on a different level. The
machine operators serve on the lowest level – they control the machines with
buttons and levers. The next level is reserved for the workshop managers.
And on the highest level, the general executives manage the different aspects
of the manufacturing processes in the factory. They do that by issuing orders.
It is the same with computers and software – they have many levels of
management and control. The lowest level is managed by the **processor** and
its registries (this is accomplished by using machine programs at a low level)
– we can compare it to controlling the machines in the workshops. The
different responsibilities of the **operating system** (Windows 7 for example),
lke the file system, peripheral devices, users and communication protocols,
are controlled at a higher level – we can compare it to the management of the
different workshops and departments in the factory. At the highest level, we
can find the **application software**. It runs a whole ensemble of processes,
which require a huge amount of processor operations. This is the level of the
general executives, who run the whole factory in order to maximize the
utilization of the resources and to produce quality results.
The Essence of Programming
The essence of programming is to control the work of the computer on all levels. This is done with the help of "orders" and "commands" from the programmer, also known as programming instructions. To "program" means to organize the work of the computer through sequences of instructions. These commands (instructions) are given in written form and are implicitly followed by the computer (respectively by the operating system, the CPU and the peripheral devices).
A sequence of steps to achieve, complete some work or obtain some result is called an algorithm. This is how programming is related to algorithms. Programming involves describing what you want the computer to do by a sequence of steps, by algorithms.
Programmers are the people who create these instructions, which control computers. These instructions are called programs. Numerous programs exist, and they are created using different kinds of programming languages. Each language is oriented towards controlling the computer on a different level. There are languages oriented towards the machine level (the lowest) – Assembler for example. Others are most useful at the system level (interacting with the operating system), like C. There are also high level languages used to create application programs. Such languages include C#, Java, C++, PHP, Visual Basic, Python, Ruby, Perl, JavaScript and others.
In this book we will take a look at the C# programming language – a modern high level language. When a programmer uses C#, he gives commands in high level, like from the position of a general executive in a factory. The instructions given in the form of programs written in C# can access and control almost all computer resources directly or via the operating system. Before we learn how to write simple C# programs, let’s take a good look at the different stages of software development, because programming, despite being the most important stage, is not the only one.
Stages in Software Development
Writing software can be a very complex and time-consuming task, involving a whole team of software engineers and other specialists. As a result, many methods and practices, which make the life of programmers easier, have emerged. All they have in common is that the development of each software product goes through several different stages:
- Gathering the requirements for the product and creating a task;
- Planning and preparing the architecture and design;
- **Implementation** (includes the writing of program code);
- Product trials (**testing**);
- **Deployment** and exploitation;
- **Support**.
Implementation, testing, deployment and support are mostly accomplished using programming.
### Gathering the Requirements
In the beginning, only the idea for a certain product exists. It includes a list of **requirements**, which define actions by the user and the computer. In the general case, these actions make already existing activities easier – calculating salaries, calculating ballistic trajectories or searching for the shortest route on Google maps are some examples. In many cases the software implements a previously nonexistent functionality such as automation of a certain activity.
The **requirements** for the product are usually defined in the form of documentation, written in English or any other language. There is no programming done at this stage. The requirements are defined by experts, who are familiar with the problems in a certain field. They can also write them up in such a way that they are easy to understand by the programmers. In the general case, these experts are not programming specialists, and they are called **business analysts**.
### Planning and Preparing the Architecture and Design
After all the requirements have been gathered comes the **planning stage**. At this stage, a technical plan for the implementation of the project is created, describing the platforms, technologies and the initial architecture (design) of the program. This step includes a fair amount of creative work, which is done by software engineers with a lot of experience. They are sometimes called **software architects**. According to the requirements, the following parts are chosen:
- The **type of the application** – for example console application, desktop application (GUI, Graphical User Interface application), client-server application, Web application, Rich Internet Application (RIA), mobile application, peer-to-peer application or other;
- The **architecture** of the software – for example single layer, double layer, triple layer, multi-layer or SOA architecture;
- The **programming language** most suitable for the implementation – for example C#, Java, PHP, Python, Ruby, JavaScript or C++, or a combination of different languages;
- The **technologies** that will be used: platform (Microsoft .NET, Java EE, LAMP or another), database server (Oracle, SQL Server, MySQL, NoSQL
database or another), technologies for the user interface (Flash, JavaServer Faces, Eclipse RCP, ASP.NET, Windows Forms, Silverlight, WPF or another), technologies for data access (for example Hibernate, JPA or ADO.NET Entity Framework), reporting technologies (SQL Server Reporting Services, Jasper Reports or another) and many other combinations of technologies that will be used for the implementation of the various parts of the software system.
- The **development frameworks** that will simplify the development, e.g. ASP.NET MVC (for .NET), Knockout.js (for JavaScript), Rails (for Ruby), Django (for Python) and many others.
- The number and skills of the **people** who will be part of the development team (big and serious projects are done by large and experienced teams of developers);
- The **development plan** – separating the functionality in stages, resources and deadlines for each stage.
- Others (size of the team, locality of the team, methods of communication etc.).
Although there are many rules facilitating the correct analysis and planning, a fair amount of intuition and insight is required at this stage. This step predetermines the further advancement of the development process. There is no programming done at this stage, only preparation.
**Implementation**
The stage, most closely connected with programming, is the implementation stage. At this phase, the program (application) is implemented (written) according to the given task, design and architecture. **Programmers** participate by **writing the program** (source) code. The other stages can either be short or completely skipped when creating a small project, but the implementation always presents; otherwise the process is not software development. This book is dedicated mainly to describing the skills used during implementation – creating a **programmer’s mindset** and building the knowledge to use all the resources provided by the C# language and the .NET platform, in order to create software applications.
**Product Testing**
Product testing is a very important stage of software development. Its purpose is to make sure that all the requirements are strictly followed and covered. This process can be implemented manually, but the preferred way to do it is by **automated tests**. These tests are small programs, which automate the trials as much as possible. There are parts of the functionality that are very hard to automate, which is why product trials include automated as well as manual procedures to ensure the quality of the code.
The testing (trials) process is implemented by quality assurance engineers (QAs). They work closely with the programmers to find and correct errors (bugs) in the software. At this stage, it is a priority to find defects in the code and almost no new code is written.
Many defects and errors are usually found during the testing stage and the program is sent back to the implantation stage. These two stages are very closely tied and it is common for a software product to switch between them many times before it covers all the requirements and is ready for the deployment and usage stages.
**Deployment and Operation**
Deployment is the process which puts a given software product into exploitation. If the product is complex and serves many people, this process can be the slowest and most expensive one. For smaller programs this is a relatively quick and painless process. In the most common case, a special program, called installer, is developed. It ensures the quick and easy installation of the product. If the product is to be deployed at a large corporation with tens of thousands of copies, additional supporting software is developed just for the deployment. After the deployment is successfully completed, the product is ready for operation. The next step is to train employees to use it.
An example would be the deployment of a new version of Microsoft Windows in the state administration. This includes installation and configuration of the software as well as training employees how to use it.
The deployment is usually done by the team who has worked on the software or by trained deployment specialists. They can be system administrators, database administrators (DBA), system engineers, specialized consultants and others. At this stage, almost no new code is written but the existing code is tweaked and configured until it covers all the specific requirements for a successful deployment.
**Technical Support**
During the exploitation process, it is inevitable that problems will appear. They may be caused by many factors – errors in the software, incorrect usage or faulty configuration, but most problems occur when the users change their requirements. As a result of these problems, the software loses its abilities to solve the business task it was created for. This requires additional involvement by the developers and the support experts. The support process usually continues throughout the whole life-cycle of the software product, regardless of how good it is.
The support is carried out by the development team and by specially trained support experts. Depending on the changes made, many different people may be involved in the process – business analysts, architects, programmers, QA engineers, administrators and others.
For example, if we take a look at a software program that calculates salaries, it will need to be updated every time the tax legislation, which concerns the serviced accounting process, is changed. The support team’s intervention will be needed if, for example, the hardware of the end user is changed because the software will have to be installed and configured again.
Documentation
The documentation stage is not a separate stage but accompanies all the other stages. Documentation is an important part of software development and aims to pass knowledge between the different participants in the development and support of a software product. Information is passed along between different stages as well as within a single stage. The development documentation is usually created by the developers (architects, programmers, QA engineers and others) and represents a combination of documents.
Software Development Is More than Just Coding
As we saw, software development is much more than just coding (writing code), and it includes a number of other processes such as: requirements analysis, design, planning, testing and support, which require a wide variety of specialists called software engineers. Programming is just a small, but very essential part of software development.
In this book we will focus solely on programming, because it is the only process, of the above, without which, we cannot develop software.
Our First C# Program
Before we continue with an in depth description of the C# language and the .NET platform, let’s take a look at a simple example, illustrating how a program written in C# looks like:
```csharp
class HelloCSharp
{
static void Main(string[] args)
{
System.Console.WriteLine("Hello C#!");
}
}
```
The only thing this program does is to print the message "Hello, C#!" on the default output. It is still early to execute it, which is why we will only take a look at its structure. Later we will describe in full how to compile and run a given program from the command prompt as well as from a development environment.
How Does Our First C# Program Work?
Our first program consists of three logical parts:
- Definition of a class `HelloCSharp`;
- Definition of a method `Main()`;
- Contents of the method `Main()`.
Defining a Class
On the first line of our program we define a class called `HelloCSharp`. The simplest definition of a class consists of the keyword `class`, followed by its name. In our case the name of the class is `HelloCSharp`. The content of the class is located in a block of program lines, surrounded by curly brackets: `{}`.
Defining the Main() Method
On the third line we define a method with the name `Main()`, which is the starting point for our program. Every program written in C# starts from a `Main()` method with the following title (signature):
```csharp
static void Main(string[] args)
```
The method must be declared as shown above, it must be `static` and `void`, it must have a name `Main` and as a list of parameters it must have only one parameter of type `array of string`. In our example the parameter is called `args` but that is not mandatory. This parameter is not used in most cases so it can be omitted (it is optional). In that case the entry point of the program can be simplified and will look like this:
```csharp
static void Main()
```
If any of the aforementioned requirements is not met, the program will compile but it will not start because the starting point is not defined correctly.
Contents of the Main() Method
The content of every method is found after its signature, surrounded by opening and closing curly brackets. On the next line of our sample program we use the system object `System.Console` and its method `WriteLine()` to print a message on the default output (the console), in this case "Hello, C#!".
In the `Main()` method we can write a random sequence of expressions and they will be executed in the order we assigned to them.
More information about expressions can be found in chapter "Operators and Expressions", working with the console is described in chapter "Console Input and Output", classes and methods can be found in chapter "Defining Classes".
C# Distinguishes between Uppercase and Lowercase!
The C# language distinguishes between uppercase and lowercase letters so we should use the correct casing when we write C# code. In the example above we used some keywords like `class`, `static`, `void` and the names of some of the system classes and objects, such as `System.Console`.
Be careful when writing! The same thing, written in uppercase, lower-case or a mix of both, means different things in C#. Writing `Class` is different from `class` and `System.Console` is different from `SYSTEM.CONSOLE`.
This rule applies to all elements of your program: keywords, names of variables, class names etc.
The Program Code Must Be Correctly Formatted
Formatting is adding characters such as spaces, tabs and new lines, which are insignificant to the compiler and they give the code a logical structure and make it easier to read. Let’s for example take a look at our first program (the short version of the Main() method):
```csharp
class HelloCSharp
{
static void Main()
{
System.Console.WriteLine("Hello C#!");
}
}
```
The program contains seven lines of code and some of them are indented more than others. All of that can be written without tabs as well, like so:
```csharp
class HelloCSharp
{
static void Main()
{
System.Console.WriteLine("Hello C#!");
}
}
```
Or on the same line:
```csharp
class HelloCSharp{
static void Main()
{
System.Console.WriteLine("Hello C#!");
}
}
```
Or even like this:
class HelloCSharp
{
static void Main()
{
System.Console.WriteLine("Hello C#!");
}
}
The examples above will compile and run exactly like the formatted code but they are more difficult to read and understand, and therefore difficult to modify and maintain.
Never let your programs contain unformatted code! That severely reduces program readability and leads to difficulties for later modifications of the code.
Main Formatting Rules
If we want our code to be correctly formatted, we must follow several important rules regarding indentation:
- Methods are indented inside the definition of the class (move to the right by one or more [Tab] characters);
- Method contents are indented inside the definition of the method;
- The opening curly bracket { must be on its own line and placed exactly under the method or class it refers to;
- The closing curly bracket } must be on its own line, placed exactly vertically under the respective opening bracket (with the same indentation);
- All class names must start with a capital letter;
- Variable names must begin with a lower-case letter;
- Method names must start with a capital letter;
Code indentation follows a very simple rule: when some piece of code is logically inside another piece of code, it is indented (moved) on the right with a single [Tab]. For example if a method is defined inside a class, it is indented (moved to the right). In the same way if a method body is inside a method, it is indented. To simplify this, we can assume that when we have the character “{“, all the code after it until its closing “}” should be indented on the right.
File Names Correspond to Class Names
Every C# program consists of one or several class definitions. It is accepted that each class is defined in a separate file with a name corresponding to the class name and a .cs extension. When these requirements are not met, the program will still work but navigating the code
will be difficult. In our example, the class is named HelloCSharp, and as a result we must save its source code in a file called HelloCSharp.cs.
The C# Language and the .NET Platform
The first version of C# was developed by Microsoft between 1999 and 2002 and was officially released to the public in 2002 as a part of the .NET platform. The .NET platform aims to make software development for Windows easier by providing a new quality approach to programming, based on the concepts of the "virtual machine" and "managed code". At that time the Java language and platform reaped an enormous success in all fields of software development; C# and .NET were Microsoft’s natural response to the Java technology.
The C# Language
C# is a modern, general-purpose, object-oriented, high-level programming language. Its syntax is similar to that of C and C++ but many features of those languages are not supported in C# in order to simplify the language, which makes programming easier.
The C# programs consist of one or several files with a .cs extension, which contain definitions of classes and other types. These files are compiled by the C# compiler (csc) to executable code and as a result assemblies are created, which are files with the same name but with a different extension (.exe or .dll). For example, if we compile HelloCSharp.cs, we will get a file with the name HelloCSharp.exe (some additional files will be created as well, but we will not discuss them at the moment).
We can run the compiled code like any other program on our computer (by double clicking it). If we try to execute the compiled C# code (for example HelloCSharp.exe) on a computer that does not have the .NET Framework, we will receive an error message.
Keywords
C# uses the following keywords to build its programming constructs (the list is taken from MSDN in March 2013 and may not be complete):
<table>
<thead>
<tr>
<th>abstract</th>
<th>as</th>
<th>base</th>
<th>bool</th>
<th>break</th>
<th>byte</th>
</tr>
</thead>
<tbody>
<tr>
<td>case</td>
<td>catch</td>
<td>char</td>
<td>checked</td>
<td>class</td>
<td>const</td>
</tr>
<tr>
<td>continue</td>
<td>decimal</td>
<td>default</td>
<td>delegate</td>
<td>do</td>
<td>double</td>
</tr>
<tr>
<td>else</td>
<td>enum</td>
<td>event</td>
<td>explicit</td>
<td>extern</td>
<td>false</td>
</tr>
<tr>
<td>finally</td>
<td>fixed</td>
<td>float</td>
<td>for</td>
<td>foreach</td>
<td>goto</td>
</tr>
<tr>
<td>if</td>
<td>implicit</td>
<td>in</td>
<td>int</td>
<td>interface</td>
<td>internal</td>
</tr>
<tr>
<td>is</td>
<td>lock</td>
<td>long</td>
<td>namespace</td>
<td>new</td>
<td>null</td>
</tr>
</tbody>
</table>
Since the creation of the first version of the C# language, not all keywords are in use. Some of them were added in later versions. The main program elements in C# (which are defined and used with the help of keywords) are classes, methods, operators, expressions, conditional statements, loops, data types, exceptions and few others. In the next few chapters of this book, we will review in details all these programming constructs along with the use of the most of the keywords from the table above.
### Automatic Memory Management
One of the biggest advantages of the .NET Framework is the built-in automatic memory management. It protects the programmers from the complex task of manually allocating memory for objects and then waiting for a suitable moment to release it. This significantly increases the developer productivity and the quality of the programs written in C#.
In the .NET Framework, there is a special component of the CLR that looks after memory management. It is called a "garbage collector" (automated memory cleaning system). The garbage collector has the following main tasks: to check when the allocated memory for variables is no longer in use, to release it and make it available for allocation of new objects.
It is important to note that it is not exactly clear at what moment the memory gets cleaned of unused objects (local variables for example). According to the C# language specifications, it happens at some moment after a given variable gets out of scope but it is not specified, whether this happens instantly, after some time or when the available memory becomes insufficient for the normal program operation.
### Independence from the Environment and the Programming Language
One of the advantages of .NET is that programmers using different .NET languages can easily exchange their code. For example a C# programmer can use the code written by another programmer in VB.NET, Managed C++ or F#. This is possible because the programs written in different .NET
languages share a common system of data types, execution infrastructure and a unified format of the compiled code (assemblies).
A big advantage of the .NET technology is the ability to run code, which is written and compiled only once, on different operating systems and hardware devices. We can compile a C# program in a Windows environment and then execute it under Windows, Windows Mobile, Windows RT or Linux. Officially Microsoft only supports the .NET Framework on Windows, Windows Mobile and Windows Phone, but there are third party vendors that offer .NET implementation on other operating systems.
**Mono (.NET for Linux)**
One example of .NET implementation for non-Windows environment is the open-source project Mono ([www.mono-project.com](http://www.mono-project.com)). It implements the .NET Framework and most of its accompanying libraries for Linux, FreeBSD, iPhone and Android. Mono is unofficial .NET implementation and some features may work not exactly as expected. It does implement well the core .NET standards (such as C# compiler and CLR) but does not support fully the latest .NET technologies and framework like WPF and ASP.NET MVC.
**Microsoft Intermediate Language (MSIL)**
The idea for independence from the environment has been set in the earliest stages of creation of the .NET platform and is implemented with the help of a little trick. The output code is not compiled to instructions for a specific microprocessor and does not use the features of a specific operating system; it is compiled to the so called Microsoft Intermediate Language (MSIL). This MSIL is not directly executed by the microprocessor but from a virtual environment called Common Language Runtime (CLR).
**Common Language Runtime (CLR) – the Heart of .NET**
In the very center of the .NET platform beats its heart – the Common Language Runtime (CLR) – the environment that controls the execution of the managed code (MSIL code). It ensures the execution of .NET programs on different hardware platforms and operating systems.
CLR is an abstract computing machine (virtual machine). Similarly to physical computers, it supports a set of instructions, registries, memory access and input-output operations. CLR ensures a controlled execution of the .NET programs using the full capabilities of the processor and the operating system. CLR also carries out the managed access to the memory and the other resources of the computer, while adhering to the access rules set when the program is executed.
The .NET Platform
The .NET platform contains the C# language, CLR and many auxiliary instruments and libraries ready for use. There are a few versions of .NET according to the targeted user group:
- **.NET Framework** is the most common version of the .NET environment because of its general purpose. It is used in the development of console applications, Windows applications with a graphical user interface, web applications and many more.
- **.NET Compact Framework** (CF) is a "light" version of the standard .NET Framework and is used in the development of applications for mobile phones and other PDA devices using Windows Mobile Edition.
- **Silverlight** is also a "light" version of the .NET Framework, intended to be executed on web browsers in order to implement multimedia and Rich Internet Applications.
- **.NET for Windows Store apps** is a subset of .NET Framework designed for development and execution of .NET applications in Windows 8 and Windows RT environment (the so called Windows Store Apps).
.NET Framework
The standard version of the .NET platform is intended for development and use of console applications, desktop applications, Web applications, Web services, Rich Internet Applications, mobile applications for tablets and smart phones and many more. Almost all .NET developers use the standard version.
.NET Technologies
Although the .NET platform is big and comprehensive, it does not provide all the tools required to solve every problem in software development. There are many independent software developers, who expand and add to the standard functionality offered by the .NET Framework. For example, companies like the Bulgarian software corporation Telerik develop subsidiary sets of components. These components are used to create graphical user interfaces, Web content management systems, to prepare reports and they make application development easier.
The .NET Framework extensions are software components, which can be reused when developing .NET programs. Reusing code significantly facilitates and simplifies software development, because it provides solutions for common problems, offers implementations of complex algorithms and technology standards. The contemporary programmer uses libraries and components every day, and saves a lot of effort by doing so.
Let’s look at the following example – software that visualizes data in the form of charts and diagrams. We can use a library, written in .NET, which draws the charts. All that we need to do is input the correct data and the
library will draw the charts for us. It is very convenient and efficient. Also it leads to reduction in the production costs because the programmers will not need to spend time working on additional functionality (in our case drawing the charts, which involves complex mathematical calculations and controlling the graphics card). The application itself will be of higher quality because the extension it uses is developed and supported by specialists with more experience in that specific field.
**Software technologies** are sets of classes, modules, libraries, programming models, tools, patterns and best practices addressing some specific problem in software development. There are general software technologies, such as Web technologies, mobile technologies, technologies for computer graphics and technologies related to some platform such as .NET or Java.
There are many .NET technologies serving for different areas of .NET development. Typical examples are the Web technologies (like ASP.NET and ASP.NET MVC), allowing fast and easy creation of dynamic Web applications and .NET mobile technologies (like WinJS), which make possible the creation of rich user interface multimedia applications working on the Internet.
.NET Framework by default includes as part of itself many technologies and class libraries with standard functionality, which developers can use. For example, there are ready-to-use classes in the system library working with mathematical functions, calculating logarithms and trigonometric functions (System.Math class). Another example is the library dealing with networks (System.Net), it has a built-in functionality to send e-mails (using the System.Net.Mail.MailMessage class) and to download files from the Internet (using System.Net.WebClient).
A .NET technology is the collection of .NET classes, libraries, tools, standards and other programming means and established development models, which determine the technological framework for creating a certain type of application. A .NET library is a collection of .NET classes, which offer certain ready-to-use functionality. For example, ADO.NET is a technology offering standardized approach to accessing relational databases (like Microsoft SQL Server and MySQL). The classes in the package (namespace) System.Data.SqlClient are an example of .NET library, which provide functionality to connect an SQL Server through the ADO.NET technology.
Some of the technologies developed by software developers outside of Microsoft become wide-spread and as a result establish themselves as technology standards. Some of them are noticed by Microsoft and later are added to the next iteration of the .NET Framework. That way, the .NET platform is constantly evolving and expanding with new libraries and technologies. For instance, the object-relational mapping technologies initially were developed as independent projects and products (like the open code project NHibernate and Telerik’s OpenAccess ORM). After they gained enormous popularity, their inclusion in the .NET Framework became a necessity. And this is how the LINQ-to-SQL and ADO.NET Entity Framework technologies were born, respectively in .NET 3.5 and .NET 4.0.
Application Programming Interface (API)
Each .NET library or technology is utilized by creating objects and calling their methods. The set of public classes and methods in the programming libraries is called Application Programming Interface or just API. As an example we can look at the .NET API itself; it is a set of .NET class libraries, expanding the capabilities of the language and adding high-level functionality. All .NET technologies offer a public API. The technologies are often referred to simply as API, which adds certain functionality. For example: API for working with files, API for working with charts, API for working with printers, API for reading and creating Word and Excel documents, API for creating PDF documents, Web development API, etc.
.NET Documentation
Very often it is necessary to document an API, because it contains many namespaces and classes. Classes contain methods and parameters. Their purpose is not always obvious and needs to be explained. There are also inner dependencies between the separate classes, which need to be explained in order to be used correctly. These explanations and technical instructions on how to use a given technology, library or API, are called documentation. The documentation consists of a collection of documents with technical content.
The .NET Framework also has a documentation officially developed and supported by Microsoft. It is publicly available on the Internet and is also distributed with the .NET platform as a collection of documents and tools for browsing and searching.
The **MSDN Library** is Microsoft’s official documentation for all their products for developers and software technologies. The .NET Framework’s technical documentation is part of the MSDN Library and can be found here: [http://msdn.microsoft.com/en-us/library/vstudio/gg145045.aspx](http://msdn.microsoft.com/en-us/library/vstudio/gg145045.aspx). The above screenshot shows how it might look like (for .NET version 4.5).
**What We Need to Program in C#?**
After we made ourselves familiar with the **.NET platform**, **.NET libraries** and **.NET technologies**, we can move on to writing, compiling and executing C# programs.
In order to program in C#, we need two basic things – an installed **.NET Framework** and a **text editor**. We need the text editor to write and edit the C# code and the .NET Framework to compile and execute it.
**.NET Framework**
By default, the **.NET Framework** is installed along with Windows, but in old Windows versions it could be missing. To install the .NET Framework, we must download it from Microsoft’s website ([http://download.microsoft.com](http://download.microsoft.com)). It is best if we download and install the latest version.
Do not forget that we need to install the .NET Framework before we begin! Otherwise, we will not be able to compile and execute the program.
If we run Windows 8 or Windows 7, the .NET Framework will be already installed as part of Windows.
**Text Editor**
The **text editor** is used to write the **source code** of the program and to save it in a file. After that, the code is compiled and executed. There are many text editing programs. We can use Windows’ built-in Notepad (it is very basic and inconvenient) or a better free text editor like Notepad++ ([notepad-plus.sourceforge.net](http://notepad-plus.sourceforge.net)) or PSPad ([www.pspad.com](http://www.pspad.com)).
**Compilation and Execution of C# Programs**
The time has come to **compile and execute** the simple example program written in C# we already discussed. To accomplish that, we need to do the following:
- Create a file named **HelloCSharp.cs**;
- Write the sample program in the file;
- Compile **HelloCSharp.cs** to an executable file **HelloCSharp.exe** using the console-based C# compiler (**csc.exe**);
- Execute the **HelloCSharp.exe** file.
Now, let’s do it on the computer!
The instructions above vary depending on the operating system. Since programming on Linux is not the focus of this book, we will take a thorough look at what we need to write and execute the sample program on Windows. For those of you, who want to program in C# in a Linux environment, we already explained the Mono project, and you can download it and experiment.
Here is the code of our first C# program:
```
HelloCSharp.cs
class HelloCSharp
{
static void Main()
{
System.Console.WriteLine("Hello C#!");
}
}
```
Creating C# Programs in the Windows Console
First we start the Windows command console, also known as Command Prompt. In Windows 7 this is done from the Windows Explorer start menu: Start -> Programs -> Accessories -> Command Prompt.
It is advised that we run the console as administrator (right click on the Command Prompt icon and choose “Run as administrator”). Otherwise some operations we want to use may be restricted.
In **Windows 8** the “Run as administrator” command is directly available when you right click the command prompt icon from the Win8 Start Screen:
After opening the console, let’s create a directory, in which we will experiment. We use the `md` command to create a directory and `cd` command to navigate to it (enter inside it):
The directory will be named IntroCSharp and will be located in C:\. We change the current directory to C:\IntroCSharp and create a new file HelloCSharp.cs, by using the built-in Windows text editor – Notepad.
To create the text file “HelloCSharp.cs”, we execute the following command on the console:
```
notepad HelloCSharp.cs
```
This will start Notepad with the following dialog window, confirming the creation of a new file:
Notepad will warn us that no such file exists and will ask us if we want to create it. We click [Yes]. The next step is to rewrite or simply Copy / Paste the program’s source code.
We save it by pressing [Ctrl+S] and close the Notepad editor with [Alt+F4]. Now we have the initial code of our sample C# program, written in the file C:\IntroCSharp\HelloCSharp.cs.
**Compiling C# Programs in Windows**
The only thing left to do is to compile and execute it. **Compiling** is done by the *csc.exe* compiler.

We got our first **error** – Windows cannot find an executable file or command with the name "csc". This is a very common problem and it is normal to appear if it is our first time using C#. Several reasons might have caused it:
- The .NET Framework is not installed;
- The .NET Framework is installed correctly, but its directory Microsoft.NET\Framework\v4.0.xxx is not added to the system path for executable files and Windows cannot find *csc.exe*.
The first problem is easily solved by installing the .NET Framework (in our case – version 4.5). The other problem can be solved by changing the system path (we will do this later) or by using the full path to *csc.exe*, as it is shown on the figure below. In our case, the full file path to the C# compiler is C:\Windows\Microsoft.NET\Framework\v4.0.30319\csc.exe (note that this path could vary depending on the .NET framework version installed). Strange or not, **.NET 4.5** coming with Visual Studio 2012 and C# 5 installs in a directory named “v4.0.30319” – this is not a mistake.
**Compiling and Running C# Programs in Windows**
Now let’s invoke the *csc* compiler through its full path and pass to it the file we want to compile as a parameter (*HelloCSharp.exe*):

After the execution `csc` is completed without any errors, and we get the following file as a result: `C:\IntroCSharp\HelloCSharp.exe`. To run it, we simply need to write its name. The result of the execution of our program is the message "Hello, C#!" printed on the console. It is not great but it is a good start:

**Changing the System Paths in Windows**
If we know to use the command line C# compiler (`csc.exe`) without entering the full path to it, we could add its folder to the **Windows system path**.
1. We open **Control Panel** and select "System". As a result this well-known window appears (the screenshot is taken from **Windows 7**):

In **Windows 8** it might look a bit different, but is almost the same:
2. We select "Advanced system settings". The dialog window "System Properties" appears:
3. We click the button "Environment Variables" and a window with all the environment variables shows up:
![Environment Variables Window]
4. We choose "Path" from the list of System variables, as shown on the figure, and press the "Edit" button. A small window appears, in which we enter the path to the directory where the .NET Framework is installed:
![Edit System Variable Window]
Of course, first we need to find where our .NET Framework is installed. By default it is located somewhere inside the Windows system directory
C:\Windows\Microsoft.NET, for example:
Adding the additional path to the already existing ones in the **Path** variable of the environment is done by adjoining the path name to the others and using a semicolon (\;) as a spacer.
---
We must be careful because if we delete any of the existing system paths, some of Windows’ functions or part of the installed software might fail to operate properly!
---
5. When we are done with **setting the path**, we can try running **csc.exe**, without entering its full path. To do so, we open a new **cmd.exe (Command Prompt)** window (it is important to **restart the Command Prompt**) and type in the "**csc**" command. We should see the C# compiler version and a message that no input file has been specified:
![Command Prompt Window]
---
**Visual Studio IDE**
So far we have examined how to compile and run C# programs using the **Windows console** (Command Prompt). Of course, there is an easier way to do it – by using an integrated development environment, which will execute all the commands we have used so far. Let’s take a look at how to work with **development environments (IDE)** and how they will make our job easier.
**Integrated Development Environments**
In the previous examples, we examined how to compile and run a program consisting of a single file. Usually programs are made of many files, sometimes even tens of thousands. Writing in a text editor, compiling and executing a single file program from the command prompt are simple, but to do all this for a big project can prove to be a very complex and time-consuming endeavor. There is a **single tool** that reduces the complexity, makes writing, compiling and executing software applications easier – the so called **Integrated Development Environment (IDE)**. Development environments usually offer many additions to the main development functions...
such as debugging, unit testing, checking for common errors, access to a
repository and others.
What Is Visual Studio?
Visual Studio is a powerful integrated environment (IDE) for developing
software applications for Windows and the .NET Framework platform. Visual
Studio (VS) supports different programming languages (for example C#,
VB.NET and C++) and different software development technologies
(Win32, COM, ASP.NET, ADO.NET Entity Framework, Windows Forms, WPF,
Silverlight, Windows Store apps and many more Windows and .NET
technologies). It offers a powerful integrated environment for writing code,
compiling, executing, debugging and testing applications, designing user
interface (forms, dialogs, web pages, visual controls and others), data and
class modeling, running tests and hundreds of other functions.
IDE means “integrated development environment” – a tool where you write
code, compile it, run it, test it, debug it, etc. and everything is integrated
into a single place. Visual Studio is typical example of development IDE.
.NET Framework 4.5 comes with Visual Studio 2012 (VS 2012). This is the
latest version of Visual Studio as of March 2013. It is designed for C# 5,
.NET 4.5 and Windows 8 development.
VS 2012 is a commercial product but has a free version called Visual Studio
Express 2012, which can be downloaded for free from the Microsoft website
Visual Studio 2012 Express has several editions (for Desktop, for Web, for
Windows 8 and others). If you want to write C# code following the content of
this book, you may use Visual Studio 2012 Express for Desktop or check
whether you have a free license of the full Visual Studio from your University
or organization. Many academic institutions (like Sofia University and Telerik
Software Academy) provide free Microsoft DreamSpark accounts to their
students to get licensed Windows, Visual Studio, SQL Server and other
development tools. If you are student, ask your university administration
about the DreamSpark program. Most universities worldwide are members of
this program.
In this book we will take a look at only the most important functions of VS
Express 2012 – the ones related to coding. These are the functions for
creating, editing, compiling, executing and debugging programs.
Note that older Visual Studio versions such as VS 2010 and VS 2008 can
also be used for the examples in this book but their user interface might look
slightly different. Our examples are based on VS 2012 on Windows 8.
Before we continue with an example, let’s take a more detailed look of the
structure of Visual Studio 2012’s visual interface. Windows are the main
part of it. Each of them has a different function tied to the development of
applications. Let’s see how Visual Studio 2012 looks after the default
installation and configuration:
Visual Studio has several windows that we will explore (see the figures above and below):
- **Start Page** – from the start page we can easily open any of our latest projects or start a new one, to create our first C# program or to get help how to use C#.
- **Code Editor** – keeps the program’s source code and allows opening and editing multiple files.
- **Error List** – it shows the errors in the program we develop (if any). We learn how to use this window later when we compile C# programs in Visual Studio.
- **Solution Explorer** – when no project is loaded, this window is empty, but it will become a part of our lives as C# programmers. It will show the structure of our project – all the files it contains, regardless if they are C# code, images or some other type of code or resources.
- **Properties** – holds a list of the current object’s properties. Properties are used mainly in the component-based programming, e.g. when we develop WPF, Windows Store or ASP.NET Web Forms application.
There are many other windows with auxiliary functionality in Visual Studio but we will not review them at this time.
**Creating a New C# Project**
Before doing anything else in Visual Studio, we must **create a new project** or load an existing one. The project groups many files, designed to implement a software application or system, in a logical manner. It is recommended that we create a separate project for each new program.
We can **create a project in Visual Studio** by following these steps:
- **File -> New Project ...**
- The “New Project” dialog appears and lists all the different types of projects we can create. We can choose a **project type** (e.g. Console Application or WPF Application), **programming language** (e.g. C# or VB.NET) and **.NET Framework version** (e.g. .NET Framework 4.5) and give a name to our project (in our case “IntroToCSharp”):
- We choose **Console Application**. Console applications are programs, which use the console as a default input and output. Data is entered with the keyboard and when a result needs to be printed it appears on the console (as text on the screen in the program window). Aside from console applications, we can create applications with a graphical user interface (e.g. Windows Forms or WPF), Web applications, web services, mobile applications, Windows Store apps, database projects and others.
- In the field "Name" we enter the name of the project. In our case we choose the name **IntroToCSharp**.
- We press the **[OK]** button.
The newly created project is now shown in the **Solution Explorer**. Also, our first file, containing the program code, is automatically added. It is named **Program.cs**. It is very important to **give meaningful names** to our files, classes, methods and other elements of the program, so that we can easily find them and navigate the code. A **meaningful name** means a name that answers the question “what is the intent of this file / class / method / variable?” and helps developers to understand how the code works. Don’t use **Problem3** for a name, even if you are solving the problem 3 from the exercises. Name your project / class by its **purpose**. If your project is well named, after few months or a year you will be able to explain what it is intended to do without opening it and looking inside. **Problem3** says nothing about what this project actually does.
In order to rename the **Program.cs** file, we right click on it in the Solution Explorer and select "Rename". We can name the main file of our C# program **HelloCSharp.cs**. Renaming a file can also be done with the [F2] key when the file is selected in the Solution Explorer:
A dialog window appears asking us if we want to rename class name as well as the file name. We select "Yes".
After we complete all these steps we have our first console application named IntroToCSharp and containing a single class HelloCSharp (stored in the file HelloCSharp.cs):
All we have to do is add code to the Main() method. By default, the HelloCSharp.cs code should be loaded and ready for editing. If it is not, we double click on the HelloCSharp.cs file in the Solution Explorer to load it. We enter the following source code:
Compiling the Source Code
The compiling process in Visual Studio includes several steps:
- Syntax error check;
- A check for other errors, like missing libraries;
- Converting the C# code into an executable file (a .NET assembly). For console applications it is an .exe file.
To compile a file in Visual Studio, we press the [F6] key or [Shift+Ctrl+B]. Usually, errors are underlined in red, to attract the programmer’s attention, while we are still writing or when compiling, at the latest. They are listed in the "Error List" window if it is visible (if it is not, we can show it from the "View" menu of Visual Studio).
If our project has at least one error, it will be marked with a small red "x" in the "Error List" window. Short info about the problem is displayed for each error – filename, line number and project name. If we double click any of the errors in the "Error List", Visual Studio will automatically take us to the file and line of code where the error has occurred. In the screenshot above the problem is that we have "using Systema;" instead of "using System".
Starting the Project
To start the project, we press [Ctrl+F5] (holding the [Ctrl] key pressed and at the same time pressing the [F5] key).
The program will start and the result will be displayed on the console, followed by the "Press any key to continue . . ." message:
The last message is not part of the result produced by the program. It is a reminder by Visual Studio that **our program has finished its execution** and it gives us time to see the result. If we run the program by only pressing [F5], that message will not appear and the result will vanish instantly after appearing because the program will have finished its execution, and the window will be closed. That is why we should **always start our console applications by pressing [Ctrl+F5]**.
Not all project types can be executed. In order to execute a C# project, it needs to have one class with a `Main()` method declared in the way described earlier in this chapter.
**Debugging the Program**
When our program contains errors, also known as **bugs**, we must find and remove them, i.e. we need to **debug** the program. The debugging process includes:
- **Noticing the problems** (bugs);
- **Finding the code causing** the problems;
- **Fixing** the code so that the program works correctly;
- **Testing** to make sure the program works as expected after the changes are made.
The process can be repeated several times until the program starts working correctly. After we have noticed the problem, we need to find the code causing it. Visual Studio can help by allowing us to check **step by step** whether everything is working as planned.
To stop the execution of the program at designated positions we can place **breakpoints**. The breakpoint is associated with a line of the program. The program **stops its execution** on the lines with breakpoints, allowing for the rest of the code to be executed step by step. On each step we can check and even change the values of the current variables.
Debugging is a sort of **step by step** slow motion execution of the program. It gives us the opportunity to easily understand the details of the code and see where exactly and why the errors have occurred.
Let’s create an **intentional error in our program**, to illustrate how to use breakpoints. We will add a line to the program, which will create an exception during the execution (we will take a detailed look at exceptions in the "Exception Handling" chapter).
For now let’s edit our program in the following way:
<table>
<thead>
<tr>
<th>HelloCSharp.cs</th>
</tr>
</thead>
<tbody>
<tr>
<td>class HelloCSharp</td>
</tr>
</tbody>
</table>
```csharp
{
static void Main()
{
throw new System.NotImplementedException(
"Intended exception.");
System.Console.WriteLine("Hello C#!");
}
}
```
When we start the program again with [Ctrl+F5] we will get an error and it will be printed on the console:

Let’s see how **breakpoints will help us** find the problem. We move the cursor to the line with the opening bracket of the `Main()` method and press [F9] (by doing so we place a breakpoint on that line). A red dot appears, indicating that the program will stop there if it is executed in debug mode:

Now we must start the program in debug mode. We select **Debug -> Start Debugging** or press [F5]. The program will start and immediately stop at the first breakpoint it encounters. The line will be colored in yellow and we can execute the program step by step. With the [F10] key we move to the next line.
When we are on a given line and it is colored in **yellow**, the code on that line is **not executed yet**. It executes once we have passed that line. In this case
we have not received the error yet despite the fact that we are on the line we added and should cause it:
We press [F10] one more time to execute the current line. This time Visual Studio displays a window specifying the line, where the error occurred as well as some additional details about it:
Once we know where exactly the problem in the program is, we can easily correct it. To do so, first, we need to stop the execution of the program before it is finished. We select **Debug -> Stop Debugging** or press [Shift+F5]. After that we delete the problem line and start the program in normal mode (without debugging) by pressing) [Ctrl+F5].
### Alternatives to Visual Studio
As we have seen, in theory, we can do without Visual Studio, but in practice that is not a good idea. The work required compiling a big project, finding all the errors in the code and performing numerous other actions would simply take too much time without Visual Studio.
On the other hand, **Visual Studio is not a free** software developing environment (the full version). Many people cannot afford to buy the professional version (this is also true for small companies and some people engaged in programming).
This is why there are some alternatives to Visual Studio (except VS Express Edition), which are free and can handle the same tasks relatively well.
### SharpDevelop
One alternative is **SharpDevelop (#Develop)**. We can find it at the following Internet address: [http://www.icsharpcode.NET/OpenSource/SD/](http://www.icsharpcode.NET/OpenSource/SD/). #Develop is an IDE for C# and is developed as an open-source project. It supports the majority of the functionalities offered in Visual Studio 2012 but also works in Linux and other operating systems. We will not review it in details but you should keep it in mind, in case you need a C# development environment and Visual Studio is not available.
### MonoDevelop
**MonoDevelop** is an integrated software development environment for the .NET platform. It is completely free (open source) and can be downloaded at: [http://monodevelop.com](http://monodevelop.com). With MonoDevelop, we can quickly and easily write fully functional desktop and ASP.NET applications for Linux, Mac OS X and Windows. It also enables programmers to easily transfer projects created in Visual Studio to the Mono platform and make them functional in other platforms.
### Decompiling Code
Sometimes programmers need to see the code of a given module or program, not written by them and with no source code available. The process, which **generates source code from an existing executable binary file** (.NET assembly – .exe or .dll) is called **decompiling**.
We might need to decompile code in the following cases:
- We want to check how a given algorithm is implemented but we do not have the source code, e.g. to check how `Array.Sort()` internally works.
- There are several options when using some .NET library, and we want to find the optimal choice. We want to see how to use certain API digging into some compiled code that uses it.
- We have no information how a given library works, but we have the compiled code (.NET assembly), which uses it, and we want to find out how exactly the library works.
- We have lost our source code and we want to recover it. **Code recovery** through decompilation will result in lost variable names, comments, formatting, and others, but is better than nothing.
Decompiling is done with the help of tools, which are not standard part of Visual Studio. The first popular .NET decompiler was Red Gate’s **Reflector** (before it became commercial in early 2011).
Telerik is offering a good and completely free .NET decompiler called **JustDecompile**. It can be downloaded from the company’s website: [http://www.telerik.com/products/decompiler.aspx](http://www.telerik.com/products/decompiler.aspx). JustDecompile allows code decompilation directly in Visual Studio and also has an external stand-alone GUI application for browsing assemblies and decompile their code:
Another good decompilation tool for .NET is the **ILSpy**, which is developed around the SharpDevelop project. ILSpy can be downloaded at: [http://ilspy.net](http://ilspy.net). The program does not require installation. After we start it, ILSpy loads some of the standard .NET Framework libraries. Via the menu File -> Open, we can open a certain .NET assembly. We can also load an assembly from the GAC (Global Assembly Cache). This is how ILSpy looks like:

In ILSpy there are two ways to find out how a given method is implemented. For example, if we want to see how the static method `System.Currency.ToDecimal` works, first we can use the tree on the left to find the `Currency` class in the `System` namespace and finally select the `.ToDecimal` method. If we click on any method, we will be able to see its source code in C#. Another way to find a given class is using the search engine in ILSpy. It searches through the names of all classes, interfaces, methods, properties etc. from the loaded assemblies. Unfortunately, the version at the time of writing of this book (ILSpy 2.1) can decompile only the languages C#, VB.NET and IL.
JustDecompile and ILSpy are **extremely useful tools**, which can help almost every day when developing .NET software and we should definitely download at least one and play with it. When we are wondering how a certain method works or how something is implemented in a given assembly, we can always rely on the decompiler to find out.
C# in Linux, iOS and Android
C# programming in Linux is not very developed compared to that in Windows. We do not want to completely skip it, so we will give some guidelines on how to start programming in C# in Linux, iOS and Android.
The most important thing that we need in order to write C# code in Linux is a .NET Framework implementation. Microsoft .NET Framework is not available for Linux but there is an open-source .NET implementation called “Mono”. We can download Mono at its official website: http://www.mono-project.com. Mono allows us to compile and execute C# programs in a Linux environment and on other operating systems. It contains a C# compiler, a CLR, a garbage collector, the standard .NET libraries and many of the libraries available for .NET Framework in Windows like Windows Forms and ASP.NET.
Mono supports compiling and running C# code not only in Linux but also in Solaris, Mac OS X, iOS (iPhone / iPad) and Android. The iOS version (MonoTouch) and the Android version of Mono (Mono for Android) are commercial projects, while Mono for Linux is open-source free software.
Of course, Visual Studio does not work in Linux environment but we can use the #Develop or MonoDevelop as C# IDE in Linux.
Other .NET Languages
C# is the most popular .NET language but there are few other languages that may be used to write .NET programs:
- **VB.NET** – Visual Basic .NET (VB) is Basic language adapted to run in .NET Framework. It is considered a successor of Microsoft Visual Basic 6 (legacy development environment for Windows 3.1 and Windows 95). It has strange syntax (for C# developers) but generally does the same as C#, just in different syntax. The only reason VB.NET exists is historical: it is successor of VB6 and keeps most of its syntax. **Not recommended** unless you are VB6 programmer.
- **Managed C++** – adaptation of the C++ programming language to .NET Framework. It can be useful if you need to quickly convert existing C++ code to be used from .NET. Not recommended for new projects. **Not recommended** for the readers of this book, even if someone has some C++ experience, because it makes .NET programming unnecessary complicated.
- **F#** – an experiment to put purely functional programming paradigm in .NET Framework. **Not recommended** at all (unless you are functional programming guru).
- **JavaScript** – it may be used to develop Windows 8 (Windows Store) applications through the WinJS technology. It might be a good choice for skillful HTML5 developers who have good JavaScript skills. **Not recommended** for the readers of this book because it does not support Console applications.
Exercises
1. Install and make yourself familiar with Microsoft Visual Studio and Microsoft Developer Network (MSDN) Library Documentation.
2. Find the description of the System.Console class in the standard .NET API documentation (MSDN Library).
4. **Compile and execute** the sample program from this chapter using the command prompt (the console) and Visual Studio.
5. **Modify** the sample program to print a different greeting, for example "Good Day!".
6. Write a console application that **prints your first and last name** on the console.
7. Write a program that **prints the following numbers** on the console 1, 101, 1001, each on a new line.
8. Write a program that prints on the console the **current date and time**.
9. Write a program that prints the **square root of 12345**.
10. Write a program that prints the first 100 members of the **sequence** 2, -3, 4, -5, 6, -7, 8.
11. Write a program that reads your age from the console and prints your age after **10 years**.
12. Describe the difference between **C#** and the .NET Framework.
13. Make a list of the **most popular programming** languages. How are they different from C#?
14. **Decompile** the example program from exercise 5.
Solutions and Guidelines
1. If you have a **DreamSpark account** ([www.dreamspark.com](http://www.dreamspark.com)), or your school or university offers free access to Microsoft products, install the full version of Microsoft Visual Studio. If you do not have the opportunity to work with the full version of Microsoft Visual Studio, you can download Visual Studio Express for free from the Microsoft web site; it is completely free and works well for educational purposes.
2. Use the address given in the "**.NET Documentation**" section of this chapter. Open it and search in the tree on the left side. A **Google search** will work just as well and is often the fastest way to find documentation for a given .NET class.
3. Use the **same approach** as in the previous exercise.
4. Follow the instruction from the [Compiling and Executing C# Programs](#) section.
5. Use the code from the [sample C# program](#) from this chapter and change the printed message.
6. Find out how to use the `System.Console.Write()` method.
8. Find out what features are offered by the `System.DateTime` class.
9. Find out what features are offered by the `System.Math` class.
10. Try to learn on your own how to use **loops** in C#. You may read about `for`-loops in the chapter “Loops”.
12. **Research them** on the Internet (e.g. in [Wikipedia](#)) and take a closer look at the differences between them. You will find that C# is a programming language while .NET Framework is development platform and runtime for running .NET code. Be sure to read the section “The C# Language and the .NET Platform” form this chapter.
13. Find out which are the most popular languages and examine some sample programs written in them. Compare them to C#. You might take a look at C, C++, Java, C#, VB.NET, PHP, JavaScript, Perl, Python and Ruby.
14. First download and **install** JustDecompile or ILSpy (more information about them can be found in the “Code Decompilation” section). After you run one of them, open your program’s compiled file. It can be found in the **bin\Debug** subdirectory of your C# project. For example, if your project is named TestCSharp and is located in **C:\Projects**, then the compiled assembly (executable file) of your program will be the following file **C:\Projects\TestCSharp\bin\Debug\TestCSharp.exe**.
|
{"Source-Url": "https://nscpolteksby.ac.id/ebook/files/Ebook/Computer%20Engineering/Fundamentals%20of%20Computer%20Programming%20with%20CSharp%20Nakov%20eBook%20(2013)/2.%20Chapter%201%20-%20Introduction%20to%20Programming.pdf", "len_cl100k_base": 14954, "olmocr-version": "0.1.50", "pdf-total-pages": 41, "total-fallback-pages": 0, "total-input-tokens": 80683, "total-output-tokens": 16712, "length": "2e13", "weborganizer": {"__label__adult": 0.0005211830139160156, "__label__art_design": 0.00023984909057617188, "__label__crime_law": 0.00020956993103027344, "__label__education_jobs": 0.0022678375244140625, "__label__entertainment": 7.402896881103516e-05, "__label__fashion_beauty": 0.00018215179443359375, "__label__finance_business": 0.0002105236053466797, "__label__food_dining": 0.0004630088806152344, "__label__games": 0.0008006095886230469, "__label__hardware": 0.00067138671875, "__label__health": 0.0002853870391845703, "__label__history": 0.0002046823501586914, "__label__home_hobbies": 0.00012409687042236328, "__label__industrial": 0.0003192424774169922, "__label__literature": 0.0003159046173095703, "__label__politics": 0.0001863241195678711, "__label__religion": 0.0005145072937011719, "__label__science_tech": 0.001438140869140625, "__label__social_life": 0.00011909008026123048, "__label__software": 0.00348663330078125, "__label__software_dev": 0.98583984375, "__label__sports_fitness": 0.00044345855712890625, "__label__transportation": 0.0005922317504882812, "__label__travel": 0.00030684471130371094}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 70267, 0.00414]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 70267, 0.73699]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 70267, 0.91344]], "google_gemma-3-12b-it_contains_pii": [[0, 2157, false], [2157, 5101, null], [5101, 7569, null], [7569, 10035, null], [10035, 12585, null], [12585, 15350, null], [15350, 17433, null], [17433, 19557, null], [19557, 21080, null], [21080, 23031, null], [23031, 25355, null], [25355, 27359, null], [27359, 29867, null], [29867, 32408, null], [32408, 35616, null], [35616, 37176, null], [37176, 39487, null], [39487, 40487, null], [40487, 40817, null], [40817, 41430, null], [41430, 43037, null], [43037, 43869, null], [43869, 43957, null], [43957, 44527, null], [44527, 46367, null], [46367, 49240, null], [49240, 50248, null], [50248, 51125, null], [51125, 52916, null], [52916, 53025, null], [53025, 53455, null], [53455, 54813, null], [54813, 57102, null], [57102, 58255, null], [58255, 58553, null], [58553, 61003, null], [61003, 62303, null], [62303, 63809, null], [63809, 66459, null], [66459, 68582, null], [68582, 70267, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2157, true], [2157, 5101, null], [5101, 7569, null], [7569, 10035, null], [10035, 12585, null], [12585, 15350, null], [15350, 17433, null], [17433, 19557, null], [19557, 21080, null], [21080, 23031, null], [23031, 25355, null], [25355, 27359, null], [27359, 29867, null], [29867, 32408, null], [32408, 35616, null], [35616, 37176, null], [37176, 39487, null], [39487, 40487, null], [40487, 40817, null], [40817, 41430, null], [41430, 43037, null], [43037, 43869, null], [43869, 43957, null], [43957, 44527, null], [44527, 46367, null], [46367, 49240, null], [49240, 50248, null], [50248, 51125, null], [51125, 52916, null], [52916, 53025, null], [53025, 53455, null], [53455, 54813, null], [54813, 57102, null], [57102, 58255, null], [58255, 58553, null], [58553, 61003, null], [61003, 62303, null], [62303, 63809, null], [63809, 66459, null], [66459, 68582, null], [68582, 70267, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, true], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 70267, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, true], [5000, 70267, null]], "pdf_page_numbers": [[0, 2157, 1], [2157, 5101, 2], [5101, 7569, 3], [7569, 10035, 4], [10035, 12585, 5], [12585, 15350, 6], [15350, 17433, 7], [17433, 19557, 8], [19557, 21080, 9], [21080, 23031, 10], [23031, 25355, 11], [25355, 27359, 12], [27359, 29867, 13], [29867, 32408, 14], [32408, 35616, 15], [35616, 37176, 16], [37176, 39487, 17], [39487, 40487, 18], [40487, 40817, 19], [40817, 41430, 20], [41430, 43037, 21], [43037, 43869, 22], [43869, 43957, 23], [43957, 44527, 24], [44527, 46367, 25], [46367, 49240, 26], [49240, 50248, 27], [50248, 51125, 28], [51125, 52916, 29], [52916, 53025, 30], [53025, 53455, 31], [53455, 54813, 32], [54813, 57102, 33], [57102, 58255, 34], [58255, 58553, 35], [58553, 61003, 36], [61003, 62303, 37], [62303, 63809, 38], [63809, 66459, 39], [66459, 68582, 40], [68582, 70267, 41]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 70267, 0.0224]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
f12c49287a066743ea3927cae58858251162f872
|
FLICK: Developing and Running Application-Specific Network Services
Abdul Alim, Richard G. Clegg, Luo Mai, Lukas Rupprecht, and Eric Seckler, Imperial College London; Paolo Costa, Microsoft Research and Imperial College London; Peter Pietzuch and Alexander L. Wolf, Imperial College London; Nik Sultana, Jon Crowcroft, Anil Madhavapeddy, Andrew W. Moore, and Richard Mortier, University of Cambridge; Masoud Koleni, Luis Oviedo, and Derek McAuley, University of Nottingham; Matteo Migliavacca, University of Kent
https://www.usenix.org/conference/atc16/technical-sessions/presentation/alim
This paper is included in the Proceedings of the 2016 USENIX Annual Technical Conference (USENIX ATC ’16).
June 22–24, 2016 • Denver, CO, USA
978-1-931971-30-0
Open access to the Proceedings of the 2016 USENIX Annual Technical Conference (USENIX ATC ’16) is sponsored by USENIX.
FLICK: Developing and Running Application-Specific Network Services
Abdul Alim†, Richard G. Clegg†, Luo Mai†, Lukas Rupprecht†, Eric Seckler†,
Paolo Costa‡, Peter Pietzuch†, Alexander L. Wolf‡, Nik Sultana*,
Jon Crowcroft*, Anil Madhavapeddy*, Andrew W. Moore*, Richard Mortier*,
Masoud Koleini♭, Luis Oviedo♭, Derek McAuley♭, Matteo Migliavacca‡
†Imperial College London, ♫Microsoft Research, ∗University of Cambridge,
♭University of Nottingham, ‡University of Kent
Abstract
Data centre networks are increasingly programmable, with application-specific network services proliferating, from custom load-balancers to middleboxes providing caching and aggregation. Developers must currently implement these services using traditional low-level APIs, which neither support natural operations on application data nor provide efficient performance isolation.
We describe FLICK, a framework for the programming and execution of application-specific network services on multi-core CPUs. Developers write network services in the FLICK language, which offers high-level processing constructs and application-relevant data types. FLICK programs are translated automatically to efficient, parallel task graphs, implemented in C++ on top of a user-space TCP stack. Task graphs have bounded resource usage at runtime, which means that the graphs of multiple services can execute concurrently without interference using cooperative scheduling. We evaluate FLICK with several services (an HTTP load-balancer, a Memcached router and a Hadoop data aggregator), showing that it achieves good performance while reducing development effort.
1 Introduction
Distributed applications in data centres increasingly want to adapt networks to their requirements. Application-specific network services, such as application load-balancers [23, 40], request data caches [36], and in-network data aggregators [29], therefore blur the boundary between the network fabric at the core and applications at the edge. For example, a Memcached request router can transparently scale deployments by routing requests using knowledge of the Memcached protocol [36]. In this paper, we explore how application developers, not network engineers, can be supported when implementing new application-specific network services.
Existing software middlebox platforms, such as Click-OS [30], xOMB [3] and SmartSwitch [53], support only application-independent network services, i.e. IP routers, firewalls or transport-layer gateways. Using them to interact with payload data in network flows leads to an impedance mismatch due to their byte-oriented, per-packet APIs. Instead, application developers would prefer high-level constructs and data types when expressing processing logic. For example, when defining the dispatching logic of a Memcached request router, a developer would ideally treat key/value pairs as a first-class data type in their program.
Today’s middlebox platforms also force developers to optimise their code carefully to achieve high throughput—implementing a new Click module [24, 30] in C++ that can process data at 10 Gbps line rate is challenging. As a result, many new application-specific network services [40, 29] are built from scratch rather than leveraging the above platforms.
Considerable work went into developing new high-level languages for network control within software-defined networking (SDN) [16, 33, 8, 48]. While these simplify the specification of network management policies, they typically operate on a per-packet basis and support a limited set of per-packet actions once matched, e.g. forwarding, cloning or dropping. In contrast, application-specific network services must refer to payload data, e.g. messages, key/value pairs or deserialised objects, and carry out richer computations, e.g. arbitrary payload transformations, caching or data aggregation.
Our goal is to enable developers to express application-specific network services in a natural high-level programming model, while executing such programs in an efficient and scalable manner. This is challenging for several reasons: (i) in many cases, the cost of data deserialisation and dynamic memory management reduces
achievable processing throughput. While high-level programming languages such as Java or Python can manipulate complex application objects, they struggle to provide predictable processing throughput for line-rate processing of network data; (ii) a typical data centre may host hundreds of applications, with each potentially requiring its own network service. Services must thus share resources, e.g. CPU and memory, without interference. Existing middlebox platforms use coarse-grained virtualisation [30], which carries a context-switching overhead of hundreds of microseconds. This is too high for fine-grained resource sharing between many application-specific network services; and (iii) most of the applications use TCP for transport, and an application-specific middlebox needs to terminate TCP connections to access data. Performance and scalability of such middleboxes are often bounded by the high cost of connection termination and frequent socket reads/writes.
We describe **FLICK**, a framework for developers to program and execute application-specific network services. It consists of the **FLICK language** for defining network services, and the **FLICK platform** for executing compiled programs efficiently on multi-core CPUs.
Programs in the **FLICK language** have **bounded resource usage** and are **guaranteed to terminate**. This is possible because most application-specific network services follow a similar pattern: they deserialise and access application data types, iterate over these data types to perform computation, and output the results as network flows. The language is therefore statically typed, and all built-in types (e.g. integer, string, and array) must have a maximum size to avoid dynamic memory allocation. Programs can refer to complex application-defined data types, such as messages or key/value pairs, for which efficient parsers are synthesised from the type definitions in the program. Since functions can only perform finite iteration over fixed-length data types, **FLICK programs with finite input must terminate**.
A compiler translates **FLICK programs into task graphs** implemented in C++. Tasks graphs are designed to permit the efficient and safe execution of many concurrent network services on a shared platform. A task graph consists of parallel **tasks** that define the computation of the **FLICK** program, and **channels** that propagate data between concurrently executing tasks. **Input/output tasks** perform the serialisation/deserialisation of data to and from application objects. Since **FLICK** programs explicitly specify accesses to application data fields, the compiler can generate custom parsing code, eliminating the overheads of general-purpose parsers.
The **FLICK platform** executes multiple task graphs belonging to different services. To reduce the overhead of frequent connection termination and socket operation, task graphs use a modified version of a highly-scalable user-space TCP stack (mTCP [21]) with Intel’s Data Plane Development Kit (DPDK) [20]. Task graphs are also scheduled **cooperatively**, avoiding context-switching overhead. They cannot interfere with each other, both in terms of performance and resources, due to their safe construction from **FLICK** programs.
We evaluate a prototype implementation of **FLICK** using both micro-benchmarks and three application-specific network services: an HTTP load balancer, a Memcached proxy and a Hadoop data aggregator. Our results show that **FLICK** can execute these services with throughput and latency that matches that of specialised middlebox implementations. In addition, it scales with a larger number of compute tasks. This paper focuses on the design, implementation and performance of a single **FLICK** middlebox. However, the wider vision is of a number of such boxes within a data centre [10].
## 2 Application-Specific Network Services
**FLICK** focuses on a specific context: data centres in which multiple, complex, distributed applications run concurrently. In this case, to achieve higher performance, flexibility or efficiency, it is advantageous to execute portions of these applications, e.g. related to load-balancing, caching or aggregation, as **application-specific network services** directly on network elements.
To do this, application developers must add code to network elements such as **software middleboxes**. Today this typically means that they must implement complicated features of the underlying network protocols (e.g. TCP flow construction, HTTP parsing and application data deserialisation). For performance reasons, network services must be highly parallel, which requires considerable developer expertise to achieve. Network resources are also inherently shared: even if hosts can be assigned to single applications, network elements must host many services for different applications.
The goal of **FLICK** is to allow developers to easily and efficiently introduce application-specific processing into network elements. Present approaches are unsatisfactory for three key reasons: (i) they provide only low-level APIs that focus on the manipulation of individual packets, or at best, individual flows; (ii) they do not permit developers to implement services in high-level languages, but typically rely on the use of low-level languages such as C; and (iii) they provide little support for the high degrees of concurrency that are required to make network service implementations perform well.
Next we elaborate on some of these challenges as encountered in our example applications (§2.1), and then contrast our approach with existing solutions (§2.2).
2.1 Use cases
We consider three sample uses for application-specific services: HTTP load balancing, Memcached request routing, and Hadoop data aggregation.
HTTP load balancer. To cope with a large number of concurrent requests, server farms employ load balancers as front ends. These are implemented by special-purpose hardware or highly-optimised software stacks and both sacrifice flexibility for performance. As a result, load balancers must often be reimplemented for each application to tailor them to specific needs. For example, this may be necessary to ensure consistency when multiple TCP connections are served by the same server; to improve the efficiency of clusters running Java code, a load balancer may avoid dispatching requests to servers that are currently performing garbage collection [27]; finally, there is increasing interest from Internet companies to monitor application-specific request statistics—a task that load balancers are ideally placed to carry out [13].
Memcached proxy. Memcached [15] is a popular distributed in-memory key/value store for reducing the number of client reads from external data sources by caching read results in memory. In production environments, a proxy such as twemproxy [52] or mcrouter [36] is situated usually between clients and servers to handle key/value mappings and instance configurations. This decouples clients and servers and allows the servers to scale out or in horizontally.
Past attempts to implement Memcached routers have involved user-space solutions [36], incurring high overheads due to expensive memory copies between kernel-and user-space. More recent proposals, such as MemSwitch [53], have shown that a dedicated single-purpose software switch that intercepts and processes Memcached traffic can be more efficient. To customise MemSwitch, developers, however, must write complex in-network programs that process raw packet payloads. This not only compromises the safety and performance of the network stack, but also complicates development—it requires knowledge about low-level details of networking as well as skills for writing high-performance, parallelisable packet-processing code.
Hadoop data aggregator. Hadoop [54] is a popular map/reduce framework for data analysis. In many deployments, job completion times are network-bound due to the shuffle phase [9]. This means that performance can be improved through an application-specific network service for in-network data aggregation [29], which executes an intermediate in-network reduction within the network topology before data reaches the reducers, thus reducing traffic crossing the network.
Providing an in-network data aggregation for Hadoop serves as a good example of an application-specific service that must carry out complex data serialisation and deserialisation. A developer wishing to implement in-network reduce logic must therefore re-implement the logic necessary to reconstruct Hadoop key/value pairs from TCP flows—a difficult and error-prone task.
2.2 Existing solution space
There are several proposals for addressing the challenges identified in the use cases above. We observe that existing solutions typically fit into one of four classes:
(i) Specialised, hand-crafted implementations. Systems such as netmap [43, 44] provide for efficient user-space implementations of packet-processing applications. Unfortunately, they offer only low-level abstractions, forcing developers to process individual packets rather than high-level business logic.
(ii) Packet-oriented middleboxes. Frameworks for implementing software middleboxes, such as ClickOS [30] and SmartSwitch [53], enable high-performance processing of network data and can be used to build higher-level abstractions. However, they fail to support useful high-level language features such as strong and static typing, or simple support for data-parallel processing.
(iii) Network programmability. More recently, we see increasing deployment of software-defined networking techniques, usually OpenFlow [31]. More advanced technologies have been proposed such as P4 [8] and Protocol Oblivious Forwarding [47]. These enable efficient in-network processing of traffic, selectively forwarding, rewriting and processing packets. However, they suffer from many of the same issues as (ii) due to their narrow focus on packet-level abstractions.
(iv) Flow-oriented servers. For in-network processing concerned with higher-level flow abstractions, it is common to leverage existing server implementations, such as Nginx [35] or Apache [51], and customise them either at the source level or through extensibility mechanisms such as modules. Another example is Netflix ribbon [34], which provides a number of highly configurable middlebox services along with a Java library to build custom services. While this raises the level of abstraction somewhat, the overheads of using such large, complex pieces of software to perform application-specific network services are substantial.
3 FLICK Framework
We motivate our design by outlining requirements (§3.1), and providing a high-level overview (§3.2).
3.1 Requirements
Based on the shortcomings of the approaches highlighted in §2.2, we identify the following three design requirements for our framework:
R1: Application-level abstractions: developers should
be able to express their network services using familiar constructs and abstractions without worrying about the low-level details of per-packet (or per-flow) processing;
**R2: High parallelism:** to achieve line-rate performance, programs for application-specific network services must exploit both data and task parallelism without requiring significant effort from the developers;
**R3: Safe and efficient resource sharing:** middleboxes are shared by multiple applications/users, therefore, we need to ensure that programs do not interfere with one another, both in terms of CPU and memory resources.
To meet these requirements, FLICK follows the scheme shown in Figure 1. For the desired level of abstraction (R1), it provides a novel high-level language (#: §4). The language allows developers to focus on the business logic of their network services ignoring low-level details (e.g. serialisation or TCP reassembly).
Compared to general-purpose languages such as C or Java, the FLICK language offers a constrained programming environment. This makes it easier to compile FLICK programs to parallel FLICK task graphs (#: §5). The division of programs into tasks allows the platform to take advantage of both data and task parallelism, thus exploiting multi-core CPUs (R2).
Finally, the FLICK language bounds the resource usage for each invocation of a network service. This allows task graphs to be executed by the FLICK platform according to a cooperative scheduling discipline (#: §5), permitting a large number of concurrent task graphs to share the same hardware resources with little interference (R3). A pool of worker threads execute tasks cooperatively, while channels move data between tasks.
### 3.2 Overview
We now give a more detailed overview of how a developer uses the FLICK framework (see Figure 1). First they write the logic of their application-specific network services in the FLICK language. After compilation by the FLICK compiler, the FLICK platform runs a program as an *instance*, consisting of a set of *task graphs*. Each task graph comprises of a directed acyclic graph of *tasks* connected by *task channels*. Depending on the program semantics, multiple instances of the task graph can be instantiated for each network request, or a single graph can be used by multiple requests.
A task is a schedulable unit of computation. Each task processes a stream of input values and generates a stream of output values. Initial input to the task graph is handled by one or more *input tasks*, which consume data from a single *input channel*, i.e., the byte stream of a TCP connection. An input task then deserialises bytes to values using deserialisation/parsing code generated by the FLICK compiler from the types specified in the FLICK program. Deserialisation splits data into the smallest units appropriate for the task being considered. For example, if the input is from a web client, the byte stream would be deserialised into individual complete HTTP requests; for Hadoop, a key/value pair is more appropriate.
Received data is then processed by one or more compute tasks and, finally, output from the task graph is emitted to the outside world via an *output task*, representing a single outgoing TCP connection. The output task also executes efficient serialisation code generated from the FLICK program, converting values into a byte stream that is placed onto an *output channel* for transmission.
### 4 FLICK Programming Model
FLICK provides a domain-specific language (DSL) targeting application-specific middlebox programming. While a difficult task, we decided to design a new language because we found existing general-purpose languages inappropriate for middlebox programming due to their excessive expressive power. Even safe redesigns of widely-used languages, such as *Cyclone* [22], are too powerful for our needs because, by design, they do not restrict the semantics of programs to terminate and bound the used resources. Existing specialised languages for network services, such as *PLAN* [18], are typically packet-centric. This makes it hard to implement application-specific traffic logic that is flow-centric. A new domain-specific language presents us with the opportunity to incorporate primitive abstractions that better fit the middlebox domain.
We also considered restricting an existing language to suit our needs (for example OCaml restricted so it forms no unbounded loops and no garbage collection). This, however, presented two difficulties: (i) exposing programmers to a familiar language but with altered semantics would be confusing; and (ii) it would prevent us from including language features for improved safety, such as static type-checking.
Numerous systems programming tasks have been simplified by providing DSLs to replace general-purpose programming languages [26, 28, 6, 39, 11]. The FLICK language is designed (i) to provide convenient, familiar high-level language abstractions targeted specifically at middlebox development, e.g. application-level types, processes and channels alongside traditional functions and primitive types; (ii) to take advantage of execution
parallelism for high throughput; and (iii) to enable efficient and safe handling of multiple programs and many requests on shared hardware resources by making it impossible to express programs with undesirable behaviour, such as unbounded resource consumption.
In the FLICK language, developers describe application-specific network services as a collection of interconnected processes. Each process manipulates values of the application’s data types, in contrast to earlier work which described network services as simple packet processors [24, 7, 30]. Application data is carried over channels, which interconnect processes with one another and with network flows. Processes interact with channels by consuming and processing input data read from them, and by transmitting output over them. Processes, channels and network interactions are handled by the FLICK platform.
The FLICK language is designed to achieve efficient parallel execution on multi-core CPUs using high-level parallel primitives. By default, the language offers parallelism across multiple requests, handling them concurrently. It supports the safe sharing of resources by bounding the resource use of an individual program. Processing of continuous network flows belonging to an application is subdivided into discrete units of work so that each process consumes only a bounded amount of resource. To achieve this, FLICK control structures are restricted to finite iteration only. This is not a significant limitation, however, as application-specific network services typically carry out deterministic transformations of network requests to generate responses. User-defined functions are written in FLICK itself, rather than in a general-purpose language as in Click [24] or Pig [37]), which preserves the safety of network services expressed in FLICK.
After presenting the FLICK language by example (§4.1), we describe its application data types (§4.2), primitives and compilation (§4.3).
### 4.1 Overview
Listing 1 shows a sample FLICK program that implements a Memcached proxy. Programs are composed of three types of declarations: data types (lines 1–2), processes (lines 4–6) and functions (lines 8–10).
Processes have signatures that specify how they connect to the outside world. In this case, a process called Memcached declares a signature containing two channels (line 4); the client channel produces and accepts values of type cmd, while backends is an array of channels, each of which produces and accepts values of type cmd.
Processes are instantiated by the FLICK platform, which binds channels to underlying network flows (§5). In this example, when a client sends a request, the FLICK platform creates a new Memcached task graph and assigns the client connection to this graph. Giving each client connection a new task graph ensures that responses are routed back to the correct client.
A process body describes how data is transformed and routed between channels connected to a process. The language design ensures that only a finite amount of input from each channel is consumed. The body of the Memcached process describes the application-specific network service: data received from any channel in backends is sent to the client channel (line 5); data received from the client is processed by the target_backend function (line 6), which in turn writes to a suitable channel in the backends array (line 10).
### 4.2 Supporting application data types
FLICK programs operate on application data types representing the exchanged messages. After an input task reads such messages from the network, they are parsed into FLICK data types. Similarly, before processed data values are transmitted by an output task, they are serialised into the appropriate wire format representation.
The transformation of messages between wire format and FLICK data types is defined as a message grammar. During compilation, FLICK generates the corresponding parsing and serialisation code from the grammar, which is then used in the input and output tasks of the task graph, respectively. The generated code is optimised for efficiency in three ways: (i) it does not dynamically allocate memory; (ii) it supports the incremental parsing of messages as new data arrives; and (iii) it is adapted automatically to specific use cases.
The syntax to define message grammars is based on that of the Spicy (formerly Binpac++) [46] parser generator. The language provides constructs to define messages and their serialised representation through units, fields, and variables, and their composition: units are used to modularise grammars; fields describe the structure of a unit; and variables can compute the value of expressions during parsing or serialisation, e.g. to determine the size of a field. FLICK grammars can express any LL(1)-parsable grammar as well as grammars with dependent fields, in a manner similar to Spicy. The FLICK framework provides reusable grammars for common protocols, such as the HTTP [14] and Memcached protocols [50]. Developers can also specify additional mes-
sage grammars for custom formats, such as application-specific Hadoop data types.
Listing 2 shows a simplified grammar for Memcached. The cmd unit for the corresponding FLICK data type is a sequence of fixed-size fields (lines 4–12), a variable (lines 14–18), and variable-size fields (lines 19–21). Each field is declared with its wire-format data type, e.g. the opcode field is an 8-bit integer (line 5). The sizes of the extras, key, and value fields are determined by the parsed value of the extras_len and key_len fields as well as the value_len variable, which is computed during parsing according to the expression in lines 15 and 16. During serialisation, the values of extras_len, key_len, and value_len are updated according to the sizes of the values stored in the extras, key, and value fields. Subsequently, the value of total_len is updated according to the variable’s serialisation expression in lines 17 and 18. The %byteorder property declaration in line 2 specifies the wire format encoding of number values—the generated code transforms such values between the specified big-endian encoding and the host byte-order. More advanced features of the grammar language include choices between alternative field sequences, field repetitions (i.e. lists), and transformations into custom FLICK field types (e.g. enumerations).
FLICK grammars aim to be reusable and thus include all fields of a given message format, even though application-specific network services often only require a subset of the information encoded in a message. To avoid generated parsers and serialisers handling unnecessary data, FLICK programs make accesses to message fields explicit by declaring a FLICK data type corresponding to the message (Listing 1, lines 1–2). This enables the FLICK compiler to generate input and output tasks that only parse and serialise the required fields for these data types and their dependencies. Other fields are aggregated into either simplified or composite fields, and then skipped or simply copied in their wire format representation. Developers can thus reuse complete message grammars to generate parsers and serialisers, while benefiting from efficient execution for their application-specific network service.
The current FLICK implementation does not support exceptions, but data type grammars could provide a default behaviour when a message is incomplete or not in an expected form.
4.3 Primitives and compilation
The FLICK language is strongly-typed for safety. To facilitate middlebox programming, it includes channels, processes, explicit parallelism, and exception handling as native features. For example, events such as broken connections can be caught and handled by FLICK functions, which can notify a backend or record to a log. State handling is essential for describing many middleboxes, and the language supports both session-level and long-term state, whose scope extends across sessions. The latter is provided through a key/value abstraction to task graph instances by the FLICK platform. To access it, the programmer declares a dictionary and labels it with a global qualifier. Multiple instances of the service share the key/value store.
The language is restricted to allow only computations that are guaranteed to terminate, thus avoiding expensive isolation mechanisms while supporting multiple processes competing for shared resources. This restriction allows static allocation of memory and cooperative task scheduling (see §5).
The FLICK language offers primitives to support common datatypes such as bytes, lists and records. Iteration may only be carried out on finite structures (e.g. lists). It also provides primitives such as fold, map and filter but it does not offer higher-order functions: functions such as fold are translated into finite for-loops. Datatypes may be annotated with cardinalities to determine statically the required memory. Loops and branching are compiled to their native counterparts in C++.
Channel- and process-related code is translated to API calls exposed by the platform (see §5). The language re-
lies on the C++ compiler to optimise the target code.
Channels are typed, and at compile time the platform determines that FLICK programs only send valid data into channels. Due to the language’s static memory restrictions, additional channels cannot be declared at runtime, though channels may be rebound, e.g. to connect to a different backend server.
The language also provides foldt, a parallel version of fold that operates over a set of channels. This allows the efficient expression of typical data processing operations, such as a k-way merge sort in which sorted streams of keys from k channels are combined by selecting elements with the smallest key. The expression foldt f o cs aggregates elements from an array of channels cs, selecting elements according to a function o and aggregating according to a function f. As f must be commutative and associative, the aggregation can be performed in parallel, combining elements in a pair-wise manner until only the result remains.
As shown in Listing 3, the foldt primitive can be used to implement an application-level network service for parallel data aggregation in Hadoop. Whenever key/value pairs become available from the mappers (lines 5–6), foldt is invoked (lines 7–10). Elements elem are ordered based on elem.key (line 8), and values of elements with the same key (elem.key) are merged using a combine function (line 9) to create a new key/value pair (line 10). While foldt could be expressed using core language primitives, the FLICK platform has a custom implementation for performance reasons.
While designed to achieve higher safety and performance, the constraints introduced in the design of the FLICK language, e.g. the lack of support for unbounded computation or dynamic memory allocation, imply that not all possible computations can be expressed in FLICK. For instance, algorithms requiring loops with unbounded iterations (e.g. while-like loops) cannot be encoded. In a general purpose programming language, this would be a severe constraint but for the middlebox functionality that FLICK targets we have not found this to cause major limitations.
5 FLICK Platform
The FLICK platform is designed around a task graph abstraction, composed of tasks that deserialise input data to typed values, compute over those values, and serialise results for onward transmission. The FLICK compiler translates an input FLICK program to C++, which is in turn compiled and linked against the platform runtime for execution. Figure 2 shows an overview of the FLICK platform, which handles network connections, the task graph life-cycle, the communication between tasks and the assignment of tasks to worker threads. Task graphs exploit task and data parallelism at runtime as tasks are assigned to worker threads. Even with only one large network flow, serialisation, processing and deserialisation tasks can be scheduled to run on different CPU cores.
(i) The application dispatcher manages the life-cycle of TCP connections: first it maps new incoming connections to a specific program instance, typically based on the destination port number of the incoming connection. The application dispatcher manages the listening sockets that handle incoming connections, creating a new input channel for each incoming connection and handing off data from that connection to the correct instance. When a client closes an input TCP connection, the application dispatcher indicates this to the instance; when a task graph has no more active input channels, it is shut down. New connections are directly connected to existing task graphs.
(ii) The graph dispatcher assigns incoming connections to task graphs, instantiating a new one if none suitable exists. The platform maintains a pre-allocated pool of task graphs to avoid the overhead of construction. The graph dispatcher also creates new output channel connections to forward processed traffic.
(iii) Tasks are cooperatively scheduled by the scheduler, which allocates work among a fixed number of worker threads. The number of worker threads is determined by the number of CPU cores available, and worker threads are pinned to CPU cores.
Tasks in a task graph become runnable after receiving data in their input queues (either from the network or from another task). A task that is not currently executing or scheduled is added to a worker queue when it becomes runnable. All buffers are drawn from a pre-allocated pool to avoid dynamic memory allocation. Input tasks use non-blocking sockets and epoll event handlers to process socket events. When a socket becomes readable, the input task attached to the relevant socket is scheduled to handle the event.
For scheduling, each worker thread is associated with its own FIFO task queue. Each task within a task graph has a unique identifier, and a hash over this identifier de-
Figure 3: Task graphs for different application-specific network services
(a) HTTP load balancer
(b) Memcached proxy
(c) Hadoop data aggregator
terminates which worker’s task queue the task should be assigned to. When a task is to be scheduled, it is always added to the same queue to reduce cache misses.
Each worker thread picks a task from its own queue. If its queue is empty, the worker attempts to scavenge work from other queues and, if none is found, it sleeps until new work arrives. A worker thread runs a task until either all its input data is consumed, or it exceeds a system-defined time quantum, the timeslice threshold (typically, 10–100 µs; see §6). If the timeslice threshold is exceeded, the code generated by the FLICK compiler guarantees that the task re-enters the scheduler, placing itself at the back of the queue if it has remaining work to do. A task with no work is not added to the task queue, but when new items arrive in its input channels, it is scheduled again.
A disadvantage of allocating tasks belonging to the same task graphs onto different CPU cores is that this would incur several cache invalidations as data move from one core to another. On the other hand, our design enables higher parallelism as different tasks can execute concurrently in a pipelined fashion, leading to higher throughput.
Some middlebox services must handle many concurrent connections, and they frequently write and read small amounts of data. The kernel TCP stack has a high overhead for creating and destroying sockets to support the Linux Virtual File System (VFS) interface [17]. Socket APIs also require switching between user- and kernel-mode, which adds further overhead. As a result, the FLICK platform uses mTCP [21], a highly scalable user-space TCP stack, combined with Intel’s DPDK [20] to reduce these overheads. The original mTCP implementation did not support multi-threaded applications, and we modified mTCP so that Flick I/O tasks can access sockets independently. To take utilise the efficient DPDK runtime environment, mTCP executes as a DPDK task. All of these optimisations, significantly improve performance for network-bound services (see §6.3).
6 Evaluation
The goals of our evaluation are to investigate whether the high-level programming abstraction that FLICK carries a performance and scalability cost and whether DPDK and mTCP improve performance. We implement FLICK programs for the use cases introduced in §2.1, i.e. an HTTP load balancer, a Memcached proxy and a Hadoop data aggregator, and compare their performance against baselines from existing implementations.
After describing the implementation of our use cases (§6.1) and the experimental set-up (§6.2), we explore the performance and scalability of FLICK (§6.3). After that, we examine how well the FLICK platform isolates resource consumption of multiple FLICK programs using cooperative scheduling (§6.4).
6.1 Use case implementation
For our three use cases, Figure 3 shows the task graph obtained from the corresponding FLICK program.
HTTP load balancer. This FLICK program implements an HTTP load balancer that forwards each incoming HTTP request to one of a number of backend web servers. Forwarding is based on a naive hash of the source IP and port and destination IP and port. Figure 3a shows the corresponding task graph. The application dispatcher forwards each new TCP connection received on port 80 to the graph dispatcher. The graph dispatcher creates a new task graph, which is later destroyed when the connection closes. The input task deserialises the incoming data into HTTP requests. For the first request, the compute task calculates a hash value selecting a backend server for the request. Subsequent requests on the same connection are forwarded to the same backend server. On their return path no computation or parsing is needed, and the data is forwarded without change. We also implement a variant of the HTTP load balancer that does not use backend servers but which returns a fixed response to a given request. This is effectively a static web server, which we use to test the system without backends.
Memcached proxy. In this use case, the FLICK program (Listing 1) receives Memcached look-up requests for keys. Requests are forwarded based on hash partitioning to a set of Memcached servers, each storing a disjoint section of the key space. Responses received from the Memcached servers are returned to clients.
Figure 3b shows the corresponding task graph. As before, a new task graph is created for each new TCP connection. Unlike the HTTP load balancer, requests
from the same client can be dispatched to different Memcached servers, which means that the compute task must have a fan-out greater than one.
When a request is received on the input channel, it is deserialised by the input task. The deserialisation code is automatically generated from the type specification in Listing 2. The deserialiser task outputs the Memcached request object, containing the request keys and body, which are passed on to the compute task. The compute task implements the dispatching logic. It identifies the Memcached server responsible for that key and forwards the request to it through the serialiser task. When the response is received from the Memcached server, the deserialiser task deserialises it and passes the response object to the compute task, which returns it to the client through the serialiser task.
**Hadoop data aggregator.** The Hadoop data aggregator implements the combiner function of a map/reduce job to perform early data aggregation in the network, as described in §2.1. It is implemented in FLICK according to Listing 3. We focus on a wordcount job in which the combiner function aggregates word counters produced by mappers over a set of documents.
For each Hadoop job, the platform creates a separate task graph per reducer (Figure 3c). The input tasks deserialise the stream of intermediate results (i.e. key/value pairs) from the mappers. Compute tasks combine the data with each compute task taking two input streams and producing one output. The output task converts the data to the byte stream, as per the Hadoop wire format.
### 6.2 Experimental set-up
We deploy the prototype implementation of the FLICK platform on servers with two 8-core Xeon E5-2690 CPUs running at 2.9 Ghz with 32 GB of memory. Clients and back-end machines are deployed on a cluster of 16 machines with 4-core Xeon E3-1240 CPUs running at 3.3 Ghz. All machines use Ubuntu Linux version 12.04. The clients and backend machines have 1 Gbps NICs, and the servers executing the FLICK platform have 10 Gbps NICs. The client and backend machines connect to a 1 Gbps switch, and the FLICK platform connects to a 10 Gbps switch. The switches have a 20 Gbps connection between them. We examine the performance of FLICK with and without mTCP/DPDK.
To evaluate the performance of the HTTP load balancer, we use multiple instances of ApacheBench (ab) [4], a standard tool for measuring web server performance, together with 10 backend servers that run the Apache web server [51]. Throughput is measured in terms of connections per second as well as requests per second for HTTP keep-alive connections. We compare against the standard Apache (mod_proxy_balancer) and the Nginx [35] load balancers.
For the Memcached proxy, we deploy 128 clients running libmemcached [1], a standard client library for interacting with Memcached servers. We use 10 Memcached servers as backends and compare the performance against a production Memcached proxy, Moxi [32]. We measure performance in terms of throughput (i.e. requests per second) and request latency. Clients send a single request and wait for a response before sending the next request.
For the Hadoop data aggregator, the workload is a wordcount job. It uses a sum as the aggregation computation and an input dataset with a high data reduction ratio. The datasets used in experiments are 8 GB, 12 GB and 16 GB (larger data sets were also used for validation). Here we measure performance in terms of the absolute network throughput.
In all graphs, the plotted points are the mean of five runs with identical parameters. Error bars correspond to a 95% confidence interval.
### 6.3 Performance
**HTTP load balancer.** We begin by measuring the performance of the static web server with an increasing load. This exercises the following components of the FLICK platform: HTTP parsing, internal and external channel operation and task scheduling. The results are for 100 to 1,600 concurrent connections (above these loads, Apache and Nginx begin to suffer timeouts). Across the entire workload, FLICK achieves superior performance. It achieves a peak throughput of 306,000 requests/sec for the kernel version and 380,000 requests/sec with mTCP. The maximum throughput achieved by Apache is 159,000 requests/sec and by Nginx is 217,000 requests/sec. FLICK also shows lower latency, particularly at high concurrency when Apache and Nginx use large numbers of threads. This confirms that, while FLICK provides a general-purpose platform for creating application-specific network functions, it can outperform purpose-written services.
To investigate the per-flow overhead due to TCP setup/tear-down, we also repeat the same experiment but with each web request establishing a separate TCP connection (i.e. non-persistent HTTP). This reduces the throughput in all deployments: 35,000 requests/sec for Apache; 44,000 requests/sec for Nginx; and 45,000 requests/sec for FLICK, which maintains the lowest latency. Here the kernel TCP performance for connection set-up and tear-down is a bottleneck: the mTCP version of FLICK handles up to 193,000 requests/sec.
Next, we repeat the experiment using our HTTP load balancer implementation to explore the impact of both receiving and forwarding requests. The set-up is as described in §6.2. We use small HTTP payloads (137 bytes each) to ensure that the network and the backends are
never the bottleneck. As for the web server experiment, we first consider persistent connections. Figures 4a and 4b confirm the previous results: FLICK achieves up to 1.4× higher throughput than Nginx and 2.2× higher than Apache. Using mTCP, the performance is even better with higher throughput and lower latency: FLICK achieves a maximum throughput 2.7× higher than Nginx and 4.2× higher than Apache. In all cases, FLICK has lower latency.
With non-persistent connections, the kernel version of FLICK exhibits a lower throughput than Apache and Nginx (see Figure 4c). Both Apache and Nginx keep persistent TCP connections to the backends, but FLICK does not, which increases its connection set-up/tear-down cost. When mTCP is used with its lower per connection cost, FLICK shows better performance than either with a maximum throughput 2.5× higher than that of Nginx and 2.1× higher than that of Apache. In addition, both the kernel and mTCP versions of FLICK maintain the lowest latency of the systems, as shown in Figure 4d.
**Memcached proxy.** For the Memcached proxy use case, we compare the performance of FLICK against Moxi [32], as we increase the number of CPU cores. We chose Moxi because it supports the binary Memcached protocol and is multi-threaded. In our set-up, 128 clients make concurrent requests using the Memcached binary protocol over persistent connections, which are then multiplexed to the backends.
Figures 5a and 5b show the throughput, in terms of the number of requests/sec, and the latency, respectively. With more CPU cores, the throughput initially increases for both systems. The kernel version achieves a maximum throughput of 126,000 requests/sec with 8 CPU cores and the mTCP version achieves 198,000 requests/sec with 16 CPU cores. Moxi peaks at 82,000 requests/sec with 4 CPU cores. FLICK’s latency decreases with more CPU cores due to the larger processing capacity available in the system. The latency of Moxi beyond 4 CPU cores and FLICK’s beyond 8 CPU cores increases as threads compete over common data structures.
**Hadoop data aggregator.** The previous use cases had relatively simple task graphs (see Figure 3) and considerable overhead comes from the connection set-up and tear-down, with many network requests processed in parallel. In contrast, the Hadoop data aggregator use case has a more complex task graph, and we use it to assess the overhead of FLICK’s communication channels and intra-graph scheduling. Here the tasks are compute bound, and the impact of the network overhead is limited. We only present the kernel results because the mTCP results are similar.
We deploy 8 mappers clients, each with 1 Gbps connections, to connect to the FLICK server. The task graph therefore has 16 tasks (8 input, 7 processing and 1 output). The FLICK Hadoop data aggregator runs on a server with 16 CPU cores without hyper-threading.
Figure 6 shows that FLICK scales well with the number of CPU cores, achieving a maximum throughput of 7,513 Mbps with 16 CPU cores. This is the maximum capacity of the 8 network links (once accounted for TCP
overhead), and matches measurements from iperf. We conclude that the FLICK platform can exploit the high level of parallelism of multi-core servers and efficiently schedule multiple tasks concurrently to maximise network throughput.
The results in Figure 6 represent three data sets of 8 GB, 12 GB and 16 GB mentioned in §6.2, consisting of words of 8, 12 and 16 characters, respectively. The FLICK platform can more efficiently process the longer words because they comprise fewer key value pairs.
6.4 Resource sharing
We finish our experiments by examining the ability of the FLICK platform to ensure efficient resource sharing, as described in §3.1. For this, we use a micro-benchmark running 200 tasks. Each task consumes a finite number of data items, computing a simple addition for each input byte. The tasks are equally split between two classes:

- **Light** tasks operate on 1 KB data items; and **heavy** tasks operate on 16 KB data items. We consider three scheduling policies: (i) **cooperative** is the policy used by FLICK, in which each task is given a fixed amount of CPU time before it yields control to another task; (ii) **non-cooperative** runs a scheduled task to completion, potentially letting the OS scheduler preempt it; and (iii) **round robin** schedules each task for one data item only.
Figure 7 shows the total completion time for light and heavy tasks. Since the light tasks handle less data, they should, given a fair share of resources, finish before the heavy tasks. With the round robin policy, this does not happen: the heavy tasks take longer to process one data item. Each time they are scheduled, they occupy the worker thread for longer than a light task. Conversely, with the non-cooperative policy, each task runs to completion. The total completion time for the light and heavy tasks is determined by their scheduling order. However, with FLICK’s cooperative policy, the light tasks are allowed to complete ahead of the heavy tasks without increasing the overall runtime—each task is given a fair share of the CPU time.
7 Related Work
Network programming languages are essential to the usability and scalability of software-defined networking (SDN), allowing high-level configuration logic to be translated to low-level network operations. Inspired by Frenetic [16], NetKAT [2] is a high-level network programming language based on Kleene algebra [25], in which network policies are compiled into a low-level programming abstraction such as OpenFlow [31] flow tables. Similarly, the Policy Graph Abstraction (PGA) [41] expresses network policies as a coherent, conflict-free policy set and supports automated, correct and independent composition of middlebox policies. These systems focus on network management and configuration and not on the more expressive programs for application-specific network services that are targeted by FLICK.
FLICK ensures that programs execute in a timely manner by restricting the expressiveness of the programming language. Another possible approach is to explicitly ver-
ify that a specific program meets requirements. This verification approach has been used to check simple, stateless Click pipelines [12] but might be harder for more complex middlebox programs.
There are proposed extensions to the packet processing done by OpenFlow. P4 [8] is a platform- and protocol-independent language for packet processors, which allows the definition of new header fields and protocols for use in match/action tables. Protocol Oblivious Forwarding (POF) [47] also provides a flexible means to match against and rewrite packet header fields. Packet Language for Active Networks (PLAN) [18] is a stateless and strongly-typed functional language for active networking in which packets carry programs to network nodes for execution. In general, these approaches are limited to expressing control-plane processing of packets in contrast to FLICK, which deals with application layer data.
**Software middlebox platforms.** Recently network services have been deployed on commodity hardware to reduce costs and increase flexibility. **Click** [24] processes packets through a chain of installed elements, and it supports a wide variety of predefined elements. Programmers, however, must write new elements in C++, which can be error-prone. **ClickOS** [30] combines Click with MiniOS and focuses on the consolidation of multiple software middlebox VMs onto a single server. It overcomes current hypervisor limitations through a redesigned I/O system and by replacing Open vSwitch [38] with a new software switch based on VALE [45]. ClickOS targets packet level processing, e.g. manipulating header fields or filtering packets; FLICK, by contrast, operates at the application level, and the approaches can be seen as orthogonal. It would be challenging for ClickOS to parse and process HTTP data when a single data item may span multiple packets or Memcached data when a packet may contain multiple data items.
**Merlin** [48] is a language that safely translates policies, expressed as regular expressions for encoding paths, into Click scripts. Similarly, **IN-NET** [49] is an architecture for the deployment of custom in-network processing on ClickOS with an emphasis on static checking for policy safety. In a similar vein, **xOMB** [3] provides a modular processing pipeline with user-defined logic for flow-oriented packet processing. **FlowOS** [7] is a flow-oriented programmable platform for middleboxes using a C API similar to the traditional socket interface. It uses kernel threads to execute flow-processing modules without terminating TCP connections. Similar to ClickOS, these platforms focus on packet processing rather than the application level. **SmartSwitch** [53] is a platform for high-performance middlebox applications built on top of NetVM [19], but it only supports UDP applications, and it does not offer a high-level programming model.
**Eden** is a platform to execute application-aware network services at the end hosts [5]. It uses a domain-specific language, similar to F#, and enables users to implement different services ranging from load balancing to flow prioritisation. By operating at the end hosts, it limits the set of network services that can be supported. For example, it would be impossible to implement in-network aggregation or in-network caching.
**Split/Merge** [42] is a hypervisor-level mechanism that allows balanced, stateful elasticity and migration of flow state for virtual middleboxes. Per-flow migration is accomplished by identifying the external state of network flows, which has to be split among replicas. Similar elasticity support could be integrated with FLICK.
## 8 Conclusions
Existing platforms for in-network processing typically provide a low-level, packet-based API. This makes it hard to implement application-specific network services. In addition, they lack support for low-overhead performance isolation, thus preventing efficient consolidation.
To address these challenges, we have developed FLICK, a domain-specific language and supporting platform that provides developers with high-level primitives to write generic application-specific network services. We described FLICK’s programming model and runtime platform. FLICK realises processing logic as restricted cooperatively schedulable tasks, allowing it to exploit the available parallelism of multi-core CPUs. We evaluated FLICK through three representative use cases, an HTTP load balancer, a Memcached proxy and a Hadoop data aggregator. Our results showed that FLICK greatly reduces the development effort, while achieving better performance than specialised middlebox implementations.
## References
|
{"Source-Url": "https://qmro.qmul.ac.uk/xmlui/bitstream/handle/123456789/18077/Clegg%20FLICK%20Developing%20and%20Running%202016%20Published.pdf?sequence=1", "len_cl100k_base": 11231, "olmocr-version": "0.1.46", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 50577, "total-output-tokens": 15379, "length": "2e13", "weborganizer": {"__label__adult": 0.0003101825714111328, "__label__art_design": 0.0003108978271484375, "__label__crime_law": 0.00028133392333984375, "__label__education_jobs": 0.0005578994750976562, "__label__entertainment": 9.41157341003418e-05, "__label__fashion_beauty": 0.00014674663543701172, "__label__finance_business": 0.0003294944763183594, "__label__food_dining": 0.0003426074981689453, "__label__games": 0.0005087852478027344, "__label__hardware": 0.0018672943115234375, "__label__health": 0.0005106925964355469, "__label__history": 0.00030112266540527344, "__label__home_hobbies": 9.238719940185548e-05, "__label__industrial": 0.00049591064453125, "__label__literature": 0.00020110607147216797, "__label__politics": 0.0002655982971191406, "__label__religion": 0.0004248619079589844, "__label__science_tech": 0.08575439453125, "__label__social_life": 8.004903793334961e-05, "__label__software": 0.015899658203125, "__label__software_dev": 0.89013671875, "__label__sports_fitness": 0.0002913475036621094, "__label__transportation": 0.0006346702575683594, "__label__travel": 0.00022721290588378904}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 65883, 0.02899]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 65883, 0.42799]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 65883, 0.88161]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 875, false], [875, 5066, null], [5066, 10727, null], [10727, 16062, null], [16062, 21224, null], [21224, 26292, null], [26292, 30388, null], [30388, 35236, null], [35236, 39851, null], [39851, 45248, null], [45248, 48343, null], [48343, 51497, null], [51497, 57008, null], [57008, 63200, null], [63200, 65883, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 875, true], [875, 5066, null], [5066, 10727, null], [10727, 16062, null], [16062, 21224, null], [21224, 26292, null], [26292, 30388, null], [30388, 35236, null], [35236, 39851, null], [39851, 45248, null], [45248, 48343, null], [48343, 51497, null], [51497, 57008, null], [57008, 63200, null], [63200, 65883, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 65883, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 65883, null]], "pdf_page_numbers": [[0, 0, 1], [0, 875, 2], [875, 5066, 3], [5066, 10727, 4], [10727, 16062, 5], [16062, 21224, 6], [21224, 26292, 7], [26292, 30388, 8], [30388, 35236, 9], [35236, 39851, 10], [39851, 45248, 11], [45248, 48343, 12], [48343, 51497, 13], [51497, 57008, 14], [57008, 63200, 15], [63200, 65883, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 65883, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-23
|
2024-11-23
|
9de9baea5aaeb4ae34fb4a2f30a88100c395e8e4
|
Rule-driven service coordination middleware for scientific applications
Héctor Fernández, Cédric Tedeschi, Thierry Priol
To cite this version:
HAL Id: hal-01326930
https://hal.inria.fr/hal-01326930
Submitted on 6 Jun 2016
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
Rule-Driven Service Coordination Middleware for Scientific Applications
Héctor Fernández¹, Cédric Tedeschi², Thierry Priol³
¹IRISA. University of Rennes 1 / INRIA
²INRIA
³Vrije University Amsterdam
Abstract
With the proliferation of Web services, scientific applications are more and more designed as temporal compositions of services, commonly referred to as workflows. To address this paradigm shift, different workflow management systems have been proposed. While their efficiency has been established over centralized static systems, it is questionable over decentralized failure-prone platforms.
Scientific applications recently started to be deployed over large distributed computing platforms, leading to new issues, like elasticity, i.e., the possibility to dynamically refine, at runtime, the amount of resources dedicated to an application. This raised again the demand for new programming models, able to express autonomic self-coordination of services in a dynamic platform.
Nature-inspired, rule-based computing models recently gained a lot of attention in this context. They are able to naturally expressing parallelism, distribution, and autonomic adaptation. While their high expressiveness and adequacy for this context has been established, such models severely suffer from a lack of proof of concepts. In this paper, we concretely show how to leverage such models in this context. We focus on the design, the implementation and the experimental validation of a chemistry-inspired scientific workflow management system.
Keywords: Service coordination; Workflow execution; Nature-inspired computing; Rule-based programming; Decentralization
Preprint submitted to Future Generation Computer Systems
1. Introduction
Until recently, scientific applications were commonly hard-to-maintain unreadable scripts, leading to a poor reusability level and high maintenance costs. With the proliferation of Web services and the increasing adoption of service-oriented computing, whose primary goal is to make a collection of software services accessible through the network, scientists started to develop their applications as compositions of Web services, today commonly referred to as workflows. This shift of paradigm recently led to more reutilization, and experiment sharing in the community. The specification and execution of such workflows are managed by workflow management systems, responsible for the coordination of involved services. Addressing the limitations of initial workflow languages such as the BPEL business standard [1], different systems, for example Taverna [2], Kepler [3], Triana [4], Pegasus [5], or Askalon [6] provide nice features such as implicit parallelism and data-driven coordination, increasing the level of abstraction regarding execution management as well as improving the manageability of science workflows, as it was formulated by Zhao, Raicu and Foster in 2008 [7].
Science workflows need to be deployed over more and more distributed environments, to face the computing power they require. Let us for instance cite the Magellan project, which aims at providing a large and elastic distributed infrastructure for science [8]. Such platforms are the new target for scientists needing to run their applications. They appear as one major solution to face their computing requirements. Thus it appears that future scientific workflow systems and languages should provide a natural way to express both workflows and platform characteristics. We identify several critical features the future WMS must address: (i) the high degree of parallelism and distribution of services deployed, (ii) the potential issues brought by a centralized coordinator such as single points of failure and scalability, and (iii) the dynamicity and distribution of the next generation of distributed infrastructures. These features call for new specification tools, able to easily express them.
Lately, nature metaphors, and in particular chemistry-inspired analogies have been identified as a promising source of inspiration for developing new approaches for autonomous service coordination [9]. Among them, the chemical programming paradigm is a rule-based programming model built atop a high level execution model within which a computation is basically seen as a set of reactions consuming some molecules of data interacting freely
within a chemical solution producing new ones (resulting data). Reactions take place in an implicitly parallel, autonomous, and decentralized manner. More recently, the Higher-Order Chemical Language (HOCL) [10] raised the chemical model to the higher-order, providing a highly-expressive paradigm: every entity in the system (in our case data, services and their dependencies, and the platform itself) is seen as a molecule. Moreover, rules can apply on other reaction rules, programs dynamically modifying programs, opening doors to dynamic adaptation. This model is now envisioned as an alternative to naturally express autonomous coordination [11]. However, while its expressiveness and adequacy to service coordination have been established, the actual experimentation of the chemical model has remained quite limited until now. There is a strong need of a proof of concept to show its viability, in particular compared to current WMSs.
Contribution. In this paper, we present a workflow management system able to solve a wide variety of workflow patterns both in a centralized and a decentralized way following the chemical model. Its implementation and performance evaluation on different classic scientific workflows presenting different characteristics are discussed. For the sake of comparison and discussion, workflows tested were also executed on top of Taverna and Kepler WMS, validating our software prototype, and establishing the viability of the concept.
Outline. Section 2 introduces the preliminaries of our work, namely workflow management systems and the Higher-Order Chemical Language (HOCL). Section 3 describes the architecture and workflow engine we have built. We show how a workflow is described in our model, and how chemical rules are defined and combined so that they can solve a wide variety of workflow patterns. Section 4 focuses on the implementations of both centralized and decentralized versions of the system defined. Section 5 details the experimental campaign and its results. Section 6 discusses related works. Section 7 draws a conclusion.
2. Background
In this section, we introduce the two background areas of this work: workflow management systems and the chemical computing model.
2.1. Workflow Management for e-Science
The increase in the reliance on service-oriented architectures (SOA) in e-sciences resulted in applications to be more and more defined as workflows of services. As a natural consequence, workflow management systems have gained recently considerable attention. The BPEL standard [1] and its followers [12] were at first briefly adopted by the scientific community. Then, science-oriented workflow languages and systems were designed, to cope with the different characteristics of scientific applications, such as a high parallelism degree and the need for scheduling. In this way, a number of systems were designed for the expression and execution of scientific workflows. Taverna [2], Kepler [3], Triana [4], Pegasus [5] or Askalon [6] provide nice features such as implicit parallelism and data-driven coordination, increasing the abstraction regarding execution management while improving the efficiency and manageability. All typically provide a visual notation for service composition.
The remainder of this section reviews the two open-source workflow systems used for the sake of validation of our work: Taverna and Kepler. We chose Kepler and Taverna because they are among the most used and mature open-source scientific WMS. As mature as it can be, we chose not to use Pegasus as the resource management is integrated into the workflow manager, which is not our primary concern here.
Taverna [2] builds upon service-oriented architectures, and the web service standards. Interactions between services (referred to as processors) are defined using the XML-based Scufl language or a GUI. Taverna is data-driven. Data-dependencies specify links among different services, so parallelism is implicit, and optimized at run time. Note that control-dependencies can also be specified by links that define precedence conditions among processors. Taverna’s workflow engine is centralized; a unique coordinator manages the coordination of all computation blocks.
Kepler is a centralized workflow engine built upon Ptolemy II [13], initiated in part by the members of the Science Environment for Ecological Knowledge (SEEK)\(^1\). It also relies on a data-driven model for simulating and designing real-time and concurrent workflows using a proprietary modeling markup language called MoML. This language is based on the actor-oriented modeling paradigm which consists in a composition of computation blocks.
\(^1\)http://seek.ecoinformatics.org/
called actors representing operations or data sources. Thanks to its data-driven behavior, Kepler provides an intuitive and implicit parallel execution. However, it may hinder the execution of more complex workflow patterns. In recent versions, some control structures can be supported through more sophisticated programming.
The limitation of both Taverna and Kepler is the lack of (i) facilities to describe more complex control-flow patterns, and (ii) support for a decentralized coordination of the workflow execution.
2.2. Rule-Based Chemical Programming
Nature analogies, and more specifically bio-chemical metaphors, have recently gained momentum in the construction of programming models coping with the requirements of the Internet of Services [9]. Initially proposed to naturally express highly parallel programs, the chemical programming paradigm exhibits properties required in emerging service platforms and naturally expresses autonomic coordination.
According to the chemical metaphor, molecules (data) float in a chemical solution, and react according to reaction rules (program) producing new molecules (resulting data). These reactions take place in an implicitly parallel, autonomous, and non-deterministic way until no more reactions are possible, a state referred to as inertia. The computation is carried out according to local conditions without any central coordination, ordering or serialization. This programming style allows writing programs cleared of any artificial sequentiality, so the programmer can concentrate on the functional aspects of the problem solved. The execution model is reactive, in the sense that the presence of a molecule suffices to trigger a reaction requiring such a molecule. Nevertheless, as it will be shown, it can express sequentiality if needed.
Such a model takes its roots in concurrent multiset rewriting and was formalized in [14], and then put in practice through the Higher-Order Chemical Language (HOCL) [10]. In HOCL, every entity is a molecule, including reaction rules. A program is a solution of molecules, formally a multiset of atoms, denoted \( A_1, A_2, \ldots, A_n \), “,” being the associative and commutative operator of construction of compound molecules. Atoms can be constants (integers, booleans, etc.), reaction rules, tuples of \( n \) atoms, denoted \( A_1:A_2: \ldots :A_n \), or sub-solutions, denoted \( \langle M_i \rangle \), where \( M_i \) is the molecule content of the sub-solution. A reaction involves a reaction rule replace \( P \) by \( M \) if \( V \) and a molecule \( N \) satisfying the pattern \( P \) and the reaction condition \( V \).
The reaction consumes the molecule $N$ to produce a new molecule $M$. This rule can react as long as a molecule satisfying the pattern $P$ exists in the solution. Its one-shot variant, denoted one $P$ by $M$ if $V$, reacts only once, and is consumed in the reaction. Rules can either appear explicitly or be directly named using the let operator, allowing to use only this name in the solution. Let us consider the simple HOCL program below that extracts the maximum even number from a set of integers.
```plaintext
let selectEvens = replace $x, \omega$ by $\omega$ if $x \% 2 \neq 0$ in
let getMax = replace $x, y$ by $x$ if $x \geq y$
in
⟨⟨ selectEvens, 2, 3, 5, 6, 8, 9 ⟩, replace-one ⟨selectEvens = s, \omega⟩ by getMax, \omega ⟩
```
The selectEvens rule removes odd numbers from the solution, by repeated reactions with an integer $x$, $\omega$ denoting the whole solution in which selectEvens floats, deprived of $x$. The getMax rule reacts with two integers $x$ and $y$ such that $x \geq y$ and replaces them by $x$. In a solution of integers, this rule, by its repeated application, extracts the maximum value. The solution is composed by (i) a sub-solution containing the input integers along with the selectEvens rule, and (ii) a higher-order rule (on Line 1.06) that will open the sub-solution, extract the remaining (even) numbers and introduce the getMax rule.
Solving the problem requires the sequentiality of the reactions of the two rules. This can be achieved by the higher order: in an HOCL program, a sub-solution can react with other elements as soon as it has reached the state of inertia. In other terms, the higher-order rule will react with the sub-solutions only when no more reactions are possible within it, i.e., when it contains only even numbers. (Note that the order in which odd numbers are deleted is non-deterministic.) The result is then as follows:
```plaintext
⟨⟨ selectEvens, 2, 6, 8 ⟩, replace-one ⟨selectEvens = s, \omega⟩ by getMax, \omega ⟩
```
Then, the higher-order rule reacts with it, extracting remaining numbers, introducing dynamically the \textit{getMax} rule, and in this way triggering the second phase of the program where the maximum value is kept. The solution is then:
$\langle 2, 6, 8, \text{getMax} \rangle$
\textit{getMax} then reacts with pairs of integers until only 8 remains. Note that, due to the higher order, putting both rules directly in the solution of integers could entail a wrong behavior as the pipeline between the two rules would be broken, possibly leading to a wrong result. For instance, if \textit{getMax} reacts first with molecules 8 and 9, 8 would be deleted.
While this example is quite simple, it already provides the intuition behind autonomic coordination and adaptation, and its simple programming style. These features are explored in more detail in [15, 16, 17]. Furthermore, as a rule-based language, HOCL provides a high level of abstraction for the modelling of the service interactions, as rules allow to define the collaborations without having to interact with the individual services.
3. Chemistry-Inspired Workflow Management
In this section, we describe an HOCL-based workflow management system. First, the coordination mechanisms developed, which build upon higher-order chemistry, are presented. Then, the architecture underlying it, for both centralized or decentralized coordination are described. The concepts presented in this section take their origin in the founding work presented in [18].
3.1. Workflow Representation
Let us consider a simple workflow expressed using BPMN (Business Process Modeling Notation) [19], and composed of the four services $S_1$, $S_2$, $S_3$ and $S_4$, as illustrated in Figure 1. In this example, after $S_1$ completes, $S_2$ and $S_3$ can be invoked in parallel. Once $S_2$ and $S_3$ have both completed, $S_4$ can be invoked. Using any of the existing BPMN editors [20], we assume data and control dependencies are translated to a traditional workflow definition language, such as the well-known BPEL [1] or SCUFL [21]. For instance, a BPEL specification could be translated into a chemical program, as is detailed in [22]. Even though HOCL is used to describe and execute workflow
specifications, our purpose is to show its potential as executable workflow language. Thus, the general shape of the chemical representation of a workflow is as follows: the main solution is composed of as many sub-solutions as there are WSes in the workflow. Each sub-solution represents a WS with its own data and control dependencies with other WSes. More formally, a WS is a molecule of the form $WS_i : \langle \ldots \rangle$ where $WS_i$ refers to the symbolic name given to the service whose connection details and physical location are hidden.
Based on the workflow shown in Figure 1, an example of its chemical representation is illustrated by Figure 2. In this example, $WS_1 : \langle \ldots \rangle$ to $WS_4 : \langle \ldots \rangle$ represent WSes in the solution. The relations between WSes are expressed through molecules of the form $\text{Dest}:WS_i$ with $WS_i$ being the destination WS where some information needs to be transferred. For instance, we can see in the $WS_1$ sub-solution that $WS_1$ will transfer some information (its outcome) to $WS_2$ and $WS_3$ (Line 2.02).
Let us have a more precise look on these dependencies. $WS_2$ contains a data dependency: it requires a molecule $\text{RESULT}:WS1:value1$ containing
---
Figure 1: Simple workflow example.
Figure 2: Chemical workflow representation.
the result of \( S_1 \) to be invoked (second part of Line 2.03). The two molecules produced by the reaction represent the call to \( S_2 \) and their input parameters. They are expressed using a molecule of the form \( \text{Call}:S_i \) and a molecule \( \text{Param}: \langle in_1, \ldots, in_n \rangle \), where \( in_1, \ldots, in_n \) represent the input parameters to call service \( S_i \). In Figure 2, this input parameter corresponds to the result of some previous service \( S_j \). WS3 works similarly. WS4 performs a control pattern known as synchronization. It needs to wait until both WS2 and WS3 have completed, in other words, until the molecules \( \text{Result}:WS2:value2 \) and \( \text{Result}:WS3:value3 \) appear in its own sub-solution, to start its execution. In addition, a data dependency is also expressed in WS4: the result of \( S_2 \) is required to call \( S_4 \).
To ensure the execution of a chemical workflow, additional generic chemical rules (i.e., independent of any specific workflow) must be defined. These rules consume and generate additional molecules to manage transfer of information between services, condition checking, fault detection, and more complex control flows. To express the whole logic of a workflow, these rules are composed relying on the analogy of molecular composition. This consists in the composition of several molecules, which are combined based on data molecule dependencies, and whose reactions produce new molecules reacting in their turn, and so on, until the workflow is completed.
### 3.2. Generic Rules for Invocation and Transfer
Common tasks in a workflow of services are service invocation and information transfer between services. We now review three generic rules illustrated in Algorithm 1, responsible for these basic tasks, and that will be commonly encountered in the compositions presented later. The \( \text{invokeServ} \) rule encapsulates the actual invocation of services. Upon reaction, it invokes the Web Service \( S_i \), by consuming the tuple \( \text{Call}:S_i \) representing the invocation itself, and \( \text{Param}: \langle in_1, \ldots, in_n \rangle \) representing the input parameters, and generates the molecules containing the results of the invocation in the \( WSi \) sub-solution. The molecule \( \text{FLAG}_\text{Invoke} \) is a flag whose presence in the solution indicates that the invocation can take place. The \( \text{preparePass} \) rule is used for preparing the messages aimed at transferring the results to their destination services, that will in turn trigger the execution of the \( \text{passInfo} \) rule.
Rule \( \text{passInfo} \) transfers molecules of information between WSeS. This rule reacts with a molecule \( \text{WSi}:\langle \text{Pass}:d: \langle \omega_1 \rangle \rangle \) that indicates that some molecules (here denoted \( \omega_1 \)) from \( WSi \) needs to be transferred to \( d \). These
Algorithm 1 Basic generic rules.
3.01 let invokeServ = replace WSi:(Call:Si, Param:⟨in1, . . . , in_n⟩, Flag:Invoke, ω),
by WSi:(Result:WSi:⟨value⟩, ω)
3.02 let preparePass = replace WSi:(Result:WSi:⟨value⟩, Dest:WSj, ω)
by WSi:(Pass:WSj:⟨Completed:WSi:⟨value⟩ ⟩ ⟩
3.03 let passInfo = replace WSi:(Pass:WSj:⟨ω1⟩, ω2), WSj:⟨ω3⟩
by WSi:⟨ω2⟩, WSj:⟨ω1, ω3⟩
molecules, once inside the sub-solution of d will trigger the next step of the execution. Therefore, the molecule ω₁ will be transferred from sub-solution WSi to sub-solution d, when reacting with the passInfo rule.
3.3. Complex Workflow Patterns
With the generic rules described until now, the engine can only support data flows which induce a deterministic behavior to the execution of our programs. However, more complex control flows should be taken into account, in order to solve a broader range of workflow definitions. We now illustrate how HOCL can be leveraged to deal with complex control flows, by detailing a particular pattern known as Simple Merge.
As illustrated in Figure 3, a simple merge pattern resembles a XOR operation. It involves a structure where two or more source service flows (denoted S₁ to Sₙ on Figure 3 converge into a single destination (denoted S_D) asynchronously. The destination service must however be launched only once, regardless of the number of incoming branches. In other words, only the first source service to complete will influence the remainder of the workflow execution.
To enhance our workflow engine with the support of the simple merge pattern, we need to define the appropriate generic rules and dispatch them in the sub-solutions of WSs involved. These rules are given in Algorithm 2. The `sm_preparePass` reaction rule is used to add, in the sub-solution of every incoming service, a particular MERGE molecule to the information to be transferred to the destination service (see Lines 4.01 and 4.03). The destination WS waits for this molecule and only the first MERGE molecule received in its sub-solution will be consumed. Next, `sm_setFlag` reaction rule takes place, producing one molecule of the form `Flag_Invoke`, that allows to trigger the service invocation. The following MERGE molecules received will be ignored. In terms of molecular composition, each source WS will have in its sub-solution one `sm_preparePass` rule (A on Figure 3) and one `passInfo` rule (denoted B on Figure 3), they are composed with `sm_setFlag` rule (C) in the destination WS.
**Algorithm 2** Generic rules - Simple merge pattern
4.01 let `sm_preparePass = replace Dest:WSj, Result:WSi:(value)`
4.02 by `Pass:WSj:(Result:WSi:(value), Merge)`
4.03 let `sm_setFlag = replace-one Merge by Flag_Invoke`
Note that we omit more complex control flows, such as synchronization `merge`, `exclusive choice` or `discriminator`, as it is not the scope here. The
description of the support for a wide range of control flow patterns, as well as its design process, can be found in the research report [23].
On the other hand, even though the design of large workflows could lead to the creation of a large number of rules, which is a problem inherent to rule-based languages, one point is that, the HOCL engines themselves include the generic rules needed to execute the specific workflows described. More precisely, it means that the workflow itself, as defined by the programmer, is quite reduced, as, as discussed before, only the name of the rules to be used, and not the rule itself needs to be included by the programmer. In our work, we consider that handling errors is delegated to the HOCL compiler and runtime, which is in charge to validate the definition and to execute the rules.
To sum up, the coordination is achieved through a set of autonomic and local reactions taking place within each WS’s sub-solution (or between two WSs’ sub-solutions), providing adequate abstractions for a natural expression of a decentralized coordination for virtually all identified workflow patterns [24].
3.4. Execution Example
To better understand how the coordination between chemical engines works, we here present the execution of the workflow example illustrated in Figure 2, for which we focus on each step of the coordination logic. These steps are listed in Figures 4 (steps 1-3), 5 (steps 4-7) and 6 (steps 8-10). Recall that, thanks to the higher-order property, reaction rules react themselves with other molecules. As we have discussed already, the example is composed by four ChWSes applying parallel split and synchronization patterns. More precisely, the execution is as follows: After ChWS1 completes, it forwards the result to ChWS2 and ChWS3 in parallel. Once ChWS2 and ChWS3 have completed, ChWS4 can start. Consider that each chemical local engine is responsible for the reactions taking place within its sub-solution in the multiset, thus respecting at runtime the decentralization designed. Indeed, for the sake of clarity, we only mention the molecules that take part in the logic of the coordination.
The first step (Lines 5.02-5.05) corresponds to the initial state of the multiset, illustrated in Figure 4. Initially, the only possible reaction is inside ChWS1, the invokeServ rule is triggered by the HOCL interpreter of ChWS1, producing the outcome molecule RESULT:ChWS1:⟨val⟩. This molecule represents the result of the invocation of S1. Then, the preparePass rule con-
sumes the molecules \texttt{DEST}:destination and \texttt{RESULT}:
\texttt{ChWS1}:\langle val \rangle, preparing the \textit{parallel split}. Therefore, it produces two new molecules for the dis-
tribution of this result to ChWS2 and ChWS3 (Line 6.02). Finally, still through ChWS1, \texttt{passInfo} triggers it by transferring in parallel the outcome of ChWS1.
Once the information is received by ChWS2 and ChWS3, the re-
actions (Lines 6.09 and 6.11) are triggered, in parallel, producing the
needed molecules to invoke \( S_2 \) and \( S_3 \). Thus, molecules of the form \texttt{CALL}:Si and \texttt{PARAM}:\langle val \rangle contained into ChWS2 and ChWS3 respectively, launch the \textit{invokeServ} rule (Lines 6.08-6.10) that generates the result of
\( S_2 \) and \( S_3 \). Similarly to ChWS1, the molecules \texttt{RESULT}:
\texttt{ChWS2}:\langle val2 \rangle and \texttt{RESULT}:
\texttt{ChWS3}:\langle val3 \rangle react with the \textit{preparePass} rule. Finally, in ChWS2 and ChWS3, the \texttt{passInfo} rule propagates the molecule \texttt{PASS}:
\texttt{ChWS4}:\langle information \rangle to ChWS4 (Lines 7.03-7.04).
The execution ends with the last steps of Figure 6, processed by ChWS4’s
local engine. Once the information from ChWS2 and ChWS3 is received by
ChWS4, the reaction rule (Line 7.12) can react with results molecules to pro-
duce two new molecules for invoking service \( S_4 \) (Line 7.18). Finally, \textit{invoke-
Serv} rule will take place producing the final result \texttt{RESULT}:
\texttt{ChWS4}:\langle val4 \rangle.
To sum up, local engines of each ChWSes are co-responsible for applying
workflow patterns, invoking services, and propagating the information to
other ChWSes. The coordination is achieved as reactions become possible,
in an asynchronous and decentralized manner.
4. Architectures and Implementations
To put into practice and validate the concepts presented before, we have
developed an architectural framework (whose main ideas are taken from our
initial work in [18]) and three software prototypes exhibiting different levels of
decentralization regarding processing and communications. Firstly, we devel-
oped a shared space-based architecture referred to us as \textit{HOCL-TS}, inspired
by [18]. Secondly, and also for the sake of comparison and discussion, two
other architectures were developed, namely \textit{HOCL-C} and \textit{HOCL-P2P}, fully
centralized, and fully distributed, respectively. These three architectures rely
in common on an HOCL-based workflow engine, enhanced with the molecular
composition-based rules for the modelling of service interactions. Let us
briefly introduce these architectures. Figure 7 illustrates their relationships.
Figure 4: Workflow execution, steps 1-3.
6.01
6.02 ChWS1: ⟨passInfo, Pass:ChWS2: ⟨Completed:ChWS1: ⟨val⟩ ⟩,
Result:ChWS1: ⟨val⟩, Pass:ChWS3: ⟨Completed:ChWS1: ⟨val⟩ ⟩ ⟩,
6.03 ChWS2: ⟨Dest:ChWS4, invokeServ, preparePass, passInfo,
replace Completed:ChWS1: ⟨val⟩ by CALL:S2, PARAM: ⟨val⟩ ⟩,
6.04 ChWS3: ⟨Dest:ChWS4, invokeServ, preparePass, passInfo,
replace Completed:ChWS1: ⟨val⟩ by CALL:S3, PARAM: ⟨val⟩ ⟩,
6.05 ChWS4: ⟨invokeServ,
replace Completed:ChWS2: ⟨val2⟩, Completed:ChWS3: ⟨val3⟩ by CALL:S4, PARAM: ⟨val2⟩ ⟩ ⟩ ↓
6.06
6.07 ChWS1: ⟨RESULT:ChWS1: ⟨val⟩ ⟩,
6.08 ChWS2: ⟨Dest:ChWS4, invokeServ, preparePass, passInfo, Completed:ChWS1: ⟨val⟩,
replace Completed:ChWS1: ⟨val⟩ by CALL:S2, PARAM: ⟨val⟩ ⟩,
6.09 ChWS3: ⟨Dest:ChWS4, invokeServ, preparePass, passInfo, Completed:ChWS1: ⟨val⟩,
replace Completed:ChWS1: ⟨val⟩ by CALL:S3, PARAM: ⟨val⟩ ⟩,
6.10 ChWS4: ⟨invokeServ,
replace Completed:ChWS2: ⟨val2⟩, Completed:ChWS3: ⟨val3⟩ by CALL:S4, PARAM: ⟨val2⟩ ⟩ ⟩ ↓
6.11
6.12 ChWS1: ⟨RESULT:ChWS1: ⟨val⟩ ⟩,
6.13 ChWS2: ⟨Dest:ChWS4, invokeServ, preparePass, passInfo, CALL:S2, PARAM: ⟨val⟩ ⟩,
6.14 ChWS3: ⟨Dest:ChWS4, invokeServ, preparePass, passInfo, CALL:S3, PARAM: ⟨val⟩ ⟩,
6.15 ChWS4: ⟨invokeServ,
replace Completed:ChWS2: ⟨val2⟩, Completed:ChWS3: ⟨val3⟩ by CALL:S4, PARAM: ⟨val2⟩ ⟩ ⟩ ↓
6.16
6.17 ChWS1: ⟨RESULT:ChWS1: ⟨val⟩ ⟩,
6.18 ChWS2: ⟨Dest:ChWS4, Result:ChWS2: ⟨val2⟩, preparePass, passInfo⟩,
6.19 ChWS3: ⟨Dest:ChWS4, Result:ChWS3: ⟨val3⟩, preparePass, passInfo⟩,
6.20 ChWS4: ⟨invokeServ,
replace Completed:ChWS2: ⟨val2⟩, Completed:ChWS3: ⟨val3⟩ by CALL:S4, PARAM: ⟨val2⟩ ⟩ ⟩ }
Figure 5: Workflow execution, steps 4-7.
Figure 6: Workflow execution, steps 7-11.
Their design and implementation will be detailed in Sections 4.1 and 4.2, respectively:
- **HOCL-C.** Centralized, HOCL-C is an architecture composed of a single chemical engine playing a role similar to that of traditional workflow engines.
- **HOCL-TS.** Inspired by the architectural framework proposed in [18], HOCL-TS (for TupleSpace) is composed of a set of distributed chemical engines coordinated through reading and (re)writing the multiset, which now acts as a shared space containing the information about the workflow to be executed. This architecture provide loosely-coupled interactions between services. The execution is now decentralized, while the multiset remains a central mean for services to communicate.
- **HOCL-P2P.** Fully decentralized, HOCL-P2P is based on the direct, point-to-point communication of chemical engines, when executing the workflow. The multiset is now distributed on the nodes involved prior to the execution. Note that HOCL-P2P shares some similarities with the work presented in [25].
### 4.1. Architectures
We now detail how an HOCL-based workflow engine can be powered over both centralized and decentralized architectures.
4.1.1. HOCL-C
Following traditional workflow management systems, the coordination can be managed by a single node, referred to as the chemical workflow service, as illustrated by Figure 8. First, notice the S components, which act as interfaces with the actual remote Web services to be called. Then, the multiset contains the chemical workflow definition and its coordination information (as presented before). It is accessed by the chemical engine to perform the required reactions.

4.1.2. HOCL-TS
Distributing the workflow execution means that each service involved will participate in the coordination process. In HOCL-TS, each Web service is chemically encapsulated, to form what we refer to as a Chemical Web Service (ChWS). There is as many ChWSes as Web service participating in a service composition. Each ChWS is equipped with a chemical engine and a local copy of a part of the multiset, which its chemical interpreter will act on. The complete multiset, containing the workflow definition and thus all required coordination information, will now act as a space shared by all ChWSes involved in the workflow. In other words, ChWSes will communicate by reading and writing it, as illustrated by Figure 9. This architecture follows a loosely coupled interaction model, as ChWSes only keep a physical connection with the shared space, not with the other ChWSes. Note that however, the communication remains based on a centralized data space, that may become a bottleneck.
4.1.3. **HOCL-P2P**
Suppressing this central space led to the design of HOCL-P2P, where both computations and communication are fully decentralized. In HOCL-P2P, a set of engines interact to execute a service composition in a peer-to-peer fashion, as proposed before for instance in works such as [25] or [26]. ChWSes now rely only on message passing to coordinate the workflow execution, as illustrated by Figure 10. This communication mechanism involves the participants in a more tightly coupled interaction, as they have to keep a physical reference to other ChWSes they are supposed to interact with. Each ChWS contains one portion of the workflow definition. These portions will be processed by the chemical engines of each ChWS. Consequently, this architecture assumes that the workflow portions are distributed prior to the execution.
4.2. Software Prototypes
In this section, we discuss the actual implementation of three software prototypes, one for each of the previously described architectures. The low layer of our prototypes is an HOCL interpreter based on the on-the-fly compilation of HOCL specifications [27]. The prototypes are fully written in Java.
4.2.1. HOCL-C Prototype
The HOCL-C prototype is illustrated by Figure 11. As mentioned in Section 3.1, the workflow definition is executed as a chemical program by the chemical workflow service. The low layer of the architecture is the HOCL interpreter. Given a workflow specification as input (an HOCL program), it executes the workflow by processing the multiset initially fed with the workflow definition, like any other HOCL program. The interface between the chemical engine and the distant services themselves is realized through the service caller component, which has been implemented with the DAIOS framework [28]. DAIOS provides an abstraction layer allowing dynamic connection to different flavors of services (SOAP or RESTful), abstracting out the target service’s internals. Note that for our purpose, DAIOS was specifically extended to automatically generate dynamic bindings, as well as the correct input and output messages required to realize the interface between
the chemical engines and a Web service. As such, web services can be easily changed by only specifying a web service description file (WSDL).

4.2.2. HOCL-TS Prototype
The HOCL-TS prototype is illustrated on Figure 12. On a software point of view, the main difference between HOCL-TS and HOCL-C prototypes, beyond the obvious architectural difference, stands in the multiset implementation, as it now represents a shared space playing the role of a communication mechanism and a storage system.
The multiset is initially fed with the HOCL specification of the workflow. As we have detailed in Section 3.1, the workflow definition is comprised of one sub-solution per Web service involved. The information in one sub-solution can only be accessed by the corresponding ChWS. On each ChWS, a simple local storage space acts as a temporary container for the sub-solution to be processed by a local HOCL interpreter. The interface between a ChWS and a concrete Web service is still realized by the service caller based on the DAIOS framework, mentioned earlier.
ChWSes communicate with the multiset using the Java Message Service (JMS) publisher/subscriber modules. Concretely, we use ActiveMQ (version 5.4.1), an implementation of the JMS 1.1 specification, which can be embedded in a Java application server. The multiset itself is encapsulated into a JMS server to allow concurrent reading and writing operations. The publish/subscribe messaging model is used by the ChWSes and the multiset whereby message producers called publishers pushing each message to each interested party called subscribers.
Initially, the multiset, through its JMS publisher (denoted PUB on Figure 12) pushes the content of each WSi sub-solution to the JMS listener (denoted LIS on Figure 12) of the corresponding ChWS. Upon receipt, the content of the ChWSi solution is copied into the local multiset. When a ChWS has its HOCL interpreter that detected the inertia in its sub-solution, its publisher sends its content back to the multiset’s listener.
4.2.3. HOCL-P2P Prototype
The HOCL-P2P prototype, illustrated by Figure 13 can be seen as a static interconnection of HOCL-C prototypes, which are now Chemical Web Services (ChWSes) each one corresponding to a service involved in the workflow. The workflow definition, which is comprised of one sub-solution per
Web service is now dispatched to each ChWS at build-time, informing each Web service statically about which other Web services to communicate.
Thus, ChWSes still communicate among them through Java Message Service (JMS) publisher/subscriber, but without the need for the multiset to become a shared space. A JMS server is included into each ChWS. So when one node detects local inertia, its JMS publisher sends the outcome directly to the JMS listener of its successors in the workflow. DAIOS is again used to implement the service caller.
5. Experimentations
This section explores the viability and shows the benefits of using a chemistry-inspired system for service coordination. For the sake of validation, a series of experiments were conducted on our chemistry-inspired workflow system with the following objectives in mind: (i) capture the behavior of our approach when processing different types of workflows; (ii) evaluate the benefits of a decentralized coordination compared to a centralized one when
modelling and executing different workflow structures; (iii) establish the viability of a chemistry-based workflow management system in comparison with what appears to be the most mature workflow management systems (WM-Ses).
In the following, we present and analyse our experimental results. Five engines have been used: Taverna Workbench 2.2.0 [2], Kepler 2.0 [3], HOCL-C, HOCL-TS and HOCL-P2P. Note that we have considered Taverna and Kepler as representing validated standards we will see as references to achieve our objectives.
5.1. Workflow-based Applications
Three scientific workflows have been used. Illustrated by Figure 14 (left), BlastReport is a home-built bioinformatics workflow which retrieves a blast report of a protein in a database given its protein ID. The second one, CardiacAnalysis, illustrated on Figure 14 (right), is a cardiovascular image analysis workflow which extracts the heart’s anatomy from a series of image sequences by applying image processing algorithms, developed by the CREATIS-LRMN biomedical laboratory\(^2\). The third one, illustrated by Figure 15 is the well known Montage\(^3\), a classic astronomical image mosaic workflow processing large images of the sky.\(^4\)
In order to transform these applications into chemical workflow definitions, we first analyzed their code, exposing their building functions or executables as Web services, which will be part of the service composition. Finally, we composed those services based on their control and data dependencies to obtain the final outcome. For instance, the CardiacAnalysis application relies at a given point of his execution on an executable script, called Image_Pyramid_Decomposition. To construct the CardiacAnalysis workflow, this executable was installed as a Web service named pyramideDecom and composed with the other services, as more comprehensibly shown in Figure 14 (right). Please refer to [22] for more details.
These three workflows present different characteristics related to (i) the number of services involved, (ii) the amount of data exchanged and (iii) the
\(^2\)http://www.creatis.insa-lyon.fr/site/
\(^3\)http://montage.ipac.caltech.edu/
\(^4\)The workflow definitions used for each WMS are available at https://www.irisa.fr/myriads/members/hfernand/hocl/workflows and http://www.myexperiment.org/workflows/2058.html.
complexity of the coordination required (data processing included, such as iterations of lists of objects). We attempt to characterize these workflows as follows:
- The *BlastReport* workflow includes 5 services, and presents a medium level of data exchange (simple objects, lists) and low coordination overhead – it is composed mostly of sequences.
- The *CardiacAnalysis* workflow includes 6 services, presenting a high amount of data exchange (complex objects, lists) and a high coordination overhead (synchronizations, loop iteration, parallelism). This overhead does not appear on Figure 14 (right). It is due to the re-entrant nature of the services. For each workflow instances, multiple instances of tasks are created from the *interpolation* service to *borderDetection* and *gradient* services (lists of lists of elements to be processed). Some services produce lists of objects that need to be extracted one by one by iterators, and transferred to the next service asynchronously.
- The *Montage* workflow includes 27 services, and exhibits a low amount of data exchange (simple objects) and medium coordination overhead (parallelism and synchronization patterns).
In these workflows, each service invocation was deployed in a different ChWS to evaluate the decentralized coordination in the *HOCL-TS* and *HOCL-P2P* prototypes.
5.2. Centralized Experiments
The workflows were first run using Taverna, Kepler, and HOCL-C, on a local machine equipped with the Intel core-duo T9600 2.8 Ghz processor and 4GB of memory. Figures 16, 17 and 18 present the results. In Figure 16, a first encouraging result is that the execution time for the Montage workflow, (i.e., a workflow with limited data exchange and coordination overhead), on Kepler, Taverna and HOCL-C are quite similar, and even slightly reduced on the HOCL-C WMS.
For the *BlastReport* workflow on Figure 17, while results are again similar for the different WMSes, HOCL-C takes a little more time. This can be explained by the increased size of the multiset for the *BlastReport* workflow (in terms of number of molecules). However, in terms of ratio, execution times remain very close among the *HOCL-* prototypes.
Finally, we can see in Figure 18 the increased coordination overhead of the *CardiacAnalysis* workflow. As mentioned before, this workflow relies on a lot of data processing related to the coordination itself, which, in the case
of HOCL-C, results in a significant increase of the size and processing time of the multiset. Also, no support for parallel execution has been implemented in the HOCL interpreter. These two optimization aspects will be investigated in the future.
5.3. Decentralized Experiments
The workflows were also executed with the HOCL-TS and HOCL-P2P prototypes. The experiments were conducted on the Grid’5000 platform [29], specifically, on the adonis and edel clusters, located in Grenoble, each node being equipped with two quad-core Intel Xeon E5520 processors, 24 GB of RAM and 40GB InfiniBand Ethernet cards. We now focus on the two rightmost bars of Figures 16, 17 and 18.
A first observation is that the performance degradation using HOCL-TS and HOCL-P2P on the Montage workflow, as illustrated on Figure 16. Even though the coordination is executed locally on each ChWS (here the coordination is shared among 27 services in both designs), the time wasted with the network latency to coordinate the chemical nodes is higher than the workload using HOCL-C to coordinate the involved services. We can also notice that HOCL-TS performs slightly better than HOCL-P2P. This shows that some nodes in HOCL-P2P can lead to some bottlenecks, for instance when performing synchronization operations. In this case, in HOCL-P2P, when the number of incoming branches increases for a node, its workload can become important. In contrary, with HOCL-TS, such a load will be distributed between this node and the multiset.
On the BlastReport, a performance gain over HOCL-C is obtained with HOCL-TS and HOCL-P2P, thanks to the distribution of the coordination over the 5 services involved, as shown by Figure 17. The BlastReport workflow starts to show the benefits by using decentralized prototypes, as an increment of the amount of data exchanged and coordination workload provokes some degradations using centralized architectures. The decentralized prototypes present an acceptable performance in comparison with Kepler and Taverna, as depicted in Figure 17. For this workflow, HOCL-TS and HOCL-P2P have similar performance. (There is no synchronization structures.)
For the CardiacAnalysis workflow, a considerable performance gain is again obtained using HOCL-TS and HOCL-P2P, demonstrating the benefits of a decentralized workflow execution when workflows present a high coordination overhead like CardiacAnalysis, which is considered as a computation and data intensive workflow, as depicted in Figure 18. Exploiting the processing resources of each ChWS, the list handling and adaptation tasks are separately managed by each ChWS. Therefore, the time wasted with the network latency is now gained by reducing the workload of a central engine. Like for BlastReport, HOCL-TS and HOCL-P2P perform identically due to the absence of synchronization patterns in CardiacAnalysis.
5.4. Discussion
This series of experiments leads to several conclusions. They constitute a proof of the viability of a chemistry-based workflow engine, as for some representative workflows, its performance are similar and sometimes better to those of Kepler and Taverna. Kepler and Taverna are broadly considered as the defacto standards.
Nevertheless, the network latency comes up as a limitation for decentralized workflow engines when processing workflows such as Montage. Its reduced computational load and low rate of data exchange provoke that the coordination time in a decentralized architecture is higher than in a centralized engine, due to the communications (network latency). Even thought the workflow execution time is affected by the network latency, the decentralized workflow systems are highly competitive when processing large workflows, as detailed in our previous work [30].
These experiments also show how HOCL-TS can perform slightly better than a fully decentralized architecture such as HOCL-P2P, even if HOCL-TS uses a central shared space as a communication mechanism. This should be further investigated. To deal with the decentralization of the multiset itself,
and build a fully decentralized solution with loosely-coupled interactions, some solutions based on peer-to-peer protocols, able to distribute and retrieve objects (here, workflow molecules) at large-scale [31] are being proposed. One of the next steps of this work is to build the HOCL-TS environment on top of such approaches to suppress the potentiality of a bottleneck, and thus propose a fully decentralized workflow engine.
6. Related Works
This section gives a more accurate comparison of our approach with some close recent works. We have observed two methods of distributed coordination approach. In the first one, nodes interact directly. In the second one, they use a shared space for coordination.
Earlier works proposed decentralized architectures where nodes achieve the coordination of a workflow through the exchange of messages [32, 33]. Recently, some works [34, 25, 26] shown the increasing interest in this type of coordination mechanism. In [34], the authors introduce service invocation triggers, a lightweight infrastructure that routes messages directly from a producing service to a consuming one, where each service invocation trigger corresponds to the invocation of a service. In [25], an engine is proposed based on a peer-to-peer architecture wherein nodes (similar to local engines) are distributed across multiple computer systems. These nodes collaborate, in order to execute a workflow with every node executing a part of it. Lately, a continuation-passing style, where information on the remainder of the execution is carried in messages, has been proposed [26]. Nodes interpret such messages and thus conduct the execution of services without consulting a centralized engine. However, this coordination mechanism implies a tight coupling of services in terms of spatial and temporal composition. Nodes need to know explicitly which other nodes they will potentially interact with, and when, to be active at the same time. Likewise, a distributed workflow system based on mobile libraries playing the role of engines was presented in [35]. The authors, however, do not give much details about the coordination itself, and about where the data and control dependencies are located.
Our works deal with the information exchange among ChWSes by writing and reading the multiset which act as a shared space by all ChWSes. Then, the communication can be completely asynchronous since the multiset guarantees the persistence of data and control dependencies. This gives an increased loose coupling to our proposal, making it able to deal with dy-
namic changes in the workflow itself. (Still this was not the scope of this paper).
According to this method of distributed coordination, a series of works proposed relying on a shared space a mechanism to exchange information between nodes of a decentralized architecture, more specifically called a *tuplespace* [36, 37, 38]. This idea was initially used in the Linda language [39].
A tuplespace works as a piece of memory shared by all interacting parties. Thus, using tuplespace for coordination, the execution of a part of a workflow within each node is triggered when tuples, matching the templates registered by the respective nodes, are present in the tuplespace. In the same vein, works such as [40], propose a distributed architecture based on Linda where distributed tuplespaces store data and programs as tuples, allowing mobile computations by transferring programs from one tuple to another. However, the chemical paradigm allows an increased abstraction level while providing support for dynamics.
Using a tuplespace for the execution of workflows, works such as [36], [37] and [38] replace a centralized BPEL engine by a set of distributed, loosely coupled, cooperating nodes. In [36] and [37], the authors present a coordination mechanism where the data is managed using a tuplespace and the control is driven by asynchronous messages exchanged between nodes. This message exchange pattern for the control is derived from a Petri net expression of the workflow. In [37], the workflow definition is transformed into a set of activities, that are distributed by passing tokens in the Petri net. However, while in these works, the tuplespace is only used to store data information, our coordination mechanism stores both control and data information in the multiset, which is made possible by the use of the chemical execution model for the coordination of all data and control dependencies.
The recent work in [38] uses a shared tuplespace working as a communication infrastructure, the control and data dependencies exchange among processes to make the different nodes interact between them. The authors transform a centralized BPEL definition into a set of coordinated processes using the tuplespace as a communication space. In contrast, the use of BPEL as coordination language hinders from expressing dynamic and self-adaptive behaviors.
7. Conclusion
Scientific applications are more and more built as workflows of services. Workflow management systems gained recently a lot of attention in this context. However, the emergence of new distributed platforms, where elasticity and dynamic adaptation are strong requirements, led to a high demand for new models able to represent both workflows and platforms, as well as their inherent characteristics.
The chemical model is a promising paradigm naturally capturing parallelism, distribution and dynamics. While its advantages are now well-established, this model still suffers from a lack of proof of concepts and actual deployments.
In this paper, we have proposed concepts and software prototypes for a family of chemistry-inspired workflow management system. A workflow description language and its execution model inspired by such abstractions is discussed. The wide expressiveness (data-flows, control-flows, natural decentralization) of the paradigm is highlighted. Then, its implementation based on the HOCL language, for both centralized and decentralized environment is given. Finally, experiments conducted show the viability of the concept, lifting a barrier on the path to its actual adoption.
References
[34] W. Binder, I. Constantinescu, B. Faltings, Decentralized orchestration of compositeweb services, in: Proceedings of the IEEE International
|
{"Source-Url": "https://core.ac.uk/download/pdf/48152639.pdf", "len_cl100k_base": 11969, "olmocr-version": "0.1.53", "pdf-total-pages": 37, "total-fallback-pages": 0, "total-input-tokens": 82127, "total-output-tokens": 16331, "length": "2e13", "weborganizer": {"__label__adult": 0.0003173351287841797, "__label__art_design": 0.0006470680236816406, "__label__crime_law": 0.0003459453582763672, "__label__education_jobs": 0.0015459060668945312, "__label__entertainment": 0.00014531612396240234, "__label__fashion_beauty": 0.0002083778381347656, "__label__finance_business": 0.00048661231994628906, "__label__food_dining": 0.0004334449768066406, "__label__games": 0.0004897117614746094, "__label__hardware": 0.0013980865478515625, "__label__health": 0.0007052421569824219, "__label__history": 0.0005240440368652344, "__label__home_hobbies": 0.00013887882232666016, "__label__industrial": 0.0007519721984863281, "__label__literature": 0.0004451274871826172, "__label__politics": 0.0003879070281982422, "__label__religion": 0.0006074905395507812, "__label__science_tech": 0.3046875, "__label__social_life": 0.00016438961029052734, "__label__software": 0.0211944580078125, "__label__software_dev": 0.6630859375, "__label__sports_fitness": 0.0002636909484863281, "__label__transportation": 0.0006566047668457031, "__label__travel": 0.0002455711364746094}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 61845, 0.04906]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 61845, 0.51581]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 61845, 0.89411]], "google_gemma-3-12b-it_contains_pii": [[0, 994, false], [994, 2717, null], [2717, 5360, null], [5360, 7591, null], [7591, 10080, null], [10080, 12724, null], [12724, 14715, null], [14715, 16956, null], [16956, 18293, null], [18293, 21234, null], [21234, 22734, null], [22734, 24109, null], [24109, 26646, null], [26646, 29372, null], [29372, 29413, null], [29413, 31098, null], [31098, 31140, null], [31140, 32316, null], [32316, 33841, null], [33841, 34685, null], [34685, 35997, null], [35997, 37101, null], [37101, 38386, null], [38386, 39400, null], [39400, 41752, null], [41752, 43096, null], [43096, 43589, null], [43589, 44172, null], [44172, 45680, null], [45680, 48235, null], [48235, 50815, null], [50815, 53177, null], [53177, 55261, null], [55261, 57086, null], [57086, 58716, null], [58716, 60619, null], [60619, 61845, null]], "google_gemma-3-12b-it_is_public_document": [[0, 994, true], [994, 2717, null], [2717, 5360, null], [5360, 7591, null], [7591, 10080, null], [10080, 12724, null], [12724, 14715, null], [14715, 16956, null], [16956, 18293, null], [18293, 21234, null], [21234, 22734, null], [22734, 24109, null], [24109, 26646, null], [26646, 29372, null], [29372, 29413, null], [29413, 31098, null], [31098, 31140, null], [31140, 32316, null], [32316, 33841, null], [33841, 34685, null], [34685, 35997, null], [35997, 37101, null], [37101, 38386, null], [38386, 39400, null], [39400, 41752, null], [41752, 43096, null], [43096, 43589, null], [43589, 44172, null], [44172, 45680, null], [45680, 48235, null], [48235, 50815, null], [50815, 53177, null], [53177, 55261, null], [55261, 57086, null], [57086, 58716, null], [58716, 60619, null], [60619, 61845, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 61845, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 61845, null]], "pdf_page_numbers": [[0, 994, 1], [994, 2717, 2], [2717, 5360, 3], [5360, 7591, 4], [7591, 10080, 5], [10080, 12724, 6], [12724, 14715, 7], [14715, 16956, 8], [16956, 18293, 9], [18293, 21234, 10], [21234, 22734, 11], [22734, 24109, 12], [24109, 26646, 13], [26646, 29372, 14], [29372, 29413, 15], [29413, 31098, 16], [31098, 31140, 17], [31140, 32316, 18], [32316, 33841, 19], [33841, 34685, 20], [34685, 35997, 21], [35997, 37101, 22], [37101, 38386, 23], [38386, 39400, 24], [39400, 41752, 25], [41752, 43096, 26], [43096, 43589, 27], [43589, 44172, 28], [44172, 45680, 29], [45680, 48235, 30], [48235, 50815, 31], [50815, 53177, 32], [53177, 55261, 33], [55261, 57086, 34], [57086, 58716, 35], [58716, 60619, 36], [60619, 61845, 37]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 61845, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
41cace284febd70bdbf913b4d2c64352bb2fbf90
|
S-Store: streaming meets transaction processing
The MIT Faculty has made this article openly available. Please share how this access benefits you. Your story matters.
<table>
<thead>
<tr>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>As Published</td>
<td><a href="http://dx.doi.org/10.14778/2831360.2831367">http://dx.doi.org/10.14778/2831360.2831367</a></td>
</tr>
<tr>
<td>Publisher</td>
<td>Association for Computing Machinery</td>
</tr>
<tr>
<td>Version</td>
<td>Final published version</td>
</tr>
<tr>
<td>Accessed</td>
<td>Sun Dec 16 18:03:37 EST 2018</td>
</tr>
<tr>
<td>Citable Link</td>
<td><a href="http://hdl.handle.net/1721.1/113832">http://hdl.handle.net/1721.1/113832</a></td>
</tr>
<tr>
<td>Terms of Use</td>
<td>Creative Commons Attribution-NonCommercial-NoDerivs License</td>
</tr>
<tr>
<td>Detailed Terms</td>
<td><a href="http://creativecommons.org/licenses/by-nc-nd/3.0/">http://creativecommons.org/licenses/by-nc-nd/3.0/</a></td>
</tr>
</tbody>
</table>
S-Store: Streaming Meets Transaction Processing
John Meehan, Nesime Tatbul, Stan Zdonik, Cansu Aslantas, Ugur Cetintemel, Jiang Du, Tim Kraska, Samuel Madden, David Maier, Andrew Pavlo, Michael Stonebraker, Kristin Tufte, Hao Wang
1 Brown University 2 Intel Labs 3 MIT 4 University of Toronto 5 Portland State University 6 CMU
ABSTRACT
Stream processing addresses the needs of real-time applications. Transaction processing addresses the coordination and safety of short atomic computations. Heretofore, these two modes of operation existed in separate, stove-piped systems. In this work, we attempt to fuse the two computational paradigms in a single system called S-Store. In this way, S-Store can simultaneously accommodate OLTP and streaming applications. We present a simple transaction model for streams that integrates seamlessly with a traditional OLTP system, and provides both ACID and stream-oriented guarantees. We chose to build S-Store as an extension of H-Store - an open-source, in-memory, distributed OLTP database system. By implementing S-Store in this way, we can make use of the transaction processing facilities that H-Store already provides, and we can concentrate on the additional features that are needed to support streaming. Similar implementations could be done using other main-memory OLTP platforms. We show that we can actually achieve higher throughput for streaming workloads in S-Store than an equivalent deployment in H-Store alone. We also show how this can be achieved within H-Store with the addition of a modest amount of new functionality. Furthermore, we compare S-Store to two state-of-the-art streaming systems, Esper and Apache Storm, and show how S-Store can sometimes exceed their performance while at the same time providing stronger correctness guarantees.
1. INTRODUCTION
A decade ago, the database research community focused attention on stream data processing systems. These systems [10, 16], including our own system, Aurora/Borealis [7, 8], were largely concerned with executing SQL-like operators on an unbounded and continuous stream of input data. The main optimization goal of these systems was reducing the latency of results, since they mainly addressed what might be called monitoring applications open to potential inconsistencies with weak guarantees for isolation and recovery. These first-generation streaming systems could be viewed as real-time analytics systems. After all, the input was made up of an infinite stream of new tuples. The notion of some of these tuples representing updates of previously viewed tuples (or causing updates to other stored data that is related) was not made explicit in the model. This is fine if time is the key. In this case, if each tuple is given a unique timestamp, the update pattern is append-only. However, there are cases when the identifying attribute is something else. Consider a stock ticker application in which stock symbol is the key. Here a new tuple for, say, IBM is really an update to the previously reported price. Traders want to see the current stock book as a consistent view of the 6000 stocks on the NYSE, with all prices reported in a consistent way. Thus, these applications introduce the need for shared mutable state in streaming systems.
We are beginning to see the rise of second-generation streaming systems [1, 2, 9, 32, 33, 37, 40]. These systems do not enforce a relational view on their users. Instead, they allow users to create their own operators that are invoked and managed by a common infrastructure. Note that it is reasonable to have libraries of common operators (including relational) that manipulate tables. The infrastructure enforces some model of failure semantics (e.g., at-least-once or exactly-once processing), but still ignores needs of proper isolation and consistent storage in the context of updates.
Meanwhile, the advent of inexpensive, high-density RAM has led to a new generation of distributed on-line transaction processing (OLTP) systems that store their data in main memory, thereby enabling very high throughput with ACID guarantees for workloads with shared mutable state (e.g., [6, 18, 29]). However, these systems lack the notion of stream-based processing (e.g., unbounded data, push-based data arrival, ordered processing, windowing).
Many applications that involve shared mutable state in fact need aspects of both streaming and transaction processing. In this paper, we propose to combine these two computational paradigms in a single system called S-Store.
1.1 Example Use Cases
Applications that benefit from this kind of hybrid system generally include those that use the streaming facilities to record persistent state or views in shared tables (in near real-time), and at the same time use the transactional facilities to ensure a consistent representation or summary of this state (e.g., dashboards or leaderboards [14]). We now describe two selected use cases as examples.
Real-Time Data Ingestion. An analytics warehouse must be updated periodically with recent activity. It was once the case that this was done once a day (typically at night) when there was little...
to no load on the system. Nowadays, systems must be available at all times and the latency window for loading new data is quickly shrinking. Also, new data must be added to the warehouse in a consistent fashion (e.g., groups of updates must be added atomically) [23]. This suggests that a transaction mechanism is needed. Even more interesting is the fact that incoming data is typically in different formats and is often dirty. ETL tools can address some of the problems of data cleaning and integration, but they work with files of bulk updates. This is slow and cumbersome, and cannot load the warehouse in near real time. Thus, there is a need for something similar to ETL that instead works on streaming data. S-Store is well-positioned to satisfy this need, and in fact is already being used for this purpose in the BigDAWG system [19].
**Shared Mutable State.** S-Store is useful beyond real-time ETL, as illustrated in the example depicted in Figure 1. In the figure, rectangles represent transactions; oil drums represent stored, shared data; skinny arrows represent reads and writes of stored data; and block arrows represent streams. This example is based on customer experience at TIBCO StreamBase, Inc [4]. It is a simplified version of intelligent order routing with FIX (Financial Information Exchange) data.
Notice that FIX data arrives on a stream and is processed by a transaction (Check and Debit Order Amount) that checks the buyer’s account balance and puts a temporary hold on the funds involved in that transaction in the Buying Power database. When this is successful, the Venue Selection transaction determines to which exchange the order is to be sent. This can be a complex process that involves checking, e.g., the history of a particular exchange with the given security, and may involve retrieving data from other databases not shown in the figure. Thus, it is modeled as a separate transaction so that the Buying Power database is available to other transactions, before the Venue Selection transaction is complete.
Also, Venue Selection requires isolation, since it has to make its decision based on a consistent state (e.g., there may be other, independent OLTP transactions accessing the Customer Orders database as shown in the figure). The bold red arrow that connects these two transactions expresses a dependency between them which requires that for a particular FIX input, Check and Debit Order Amount must precede Venue Selection. This illustrates the need for transaction ordering. Moreover, when Check and Debit Order Amount commits, Venue Selection needs to be triggered (push-based processing). At the bottom of the figure, the Update Order transaction takes input from the exchanges, and confirms or denies previously placed orders. In the case of a failed order, it will return funds to the customers account. This can obviously conflict with new orders from the same customer. Thus, Check and Debit Order Amount and Update Order must both be transactions to guarantee consistency through isolation.
If we used only a pure stream processing system to implement this use case, we would be able to ensure ordering and push-based processing. However, the isolation requirements of the application would not be expressible. If we used a pure OLTP DBMS instead, we would be able to ensure isolation, but would be unable to take advantage of push-based processing. Transaction ordering would need to be managed at the client, requiring unnecessary context switches and a need to poll the interface for new data. Today, use cases like this are implemented with in-memory data structures, careful custom coding, and recovery based on replaying message logs. We believe that a platform like S-Store reduces user code complexity.
### 1.2 Contributions and Outline
This paper introduces the design and implementation of S-Store, a single system for processing streams and transactions with well-defined correctness guarantees. Our approach to building such a system is to start with a fully transactional OLTP main-memory database system and to integrate additional streaming functionality. By doing so, we are able to leverage infrastructure that already addresses many of the implementation complexities of transaction processing. This choice is very natural, since streaming systems largely run in main memory to achieve low latency. More specifically, this work makes the following key contributions:
**Model.** We define a novel, general-purpose computational model that allows us to seamlessly mix streaming transactions with ordinary OLTP transactions. Stream processing adds additional semantics to an OLTP engine’s operating model. In particular, stream processing introduces the notion of order to the transaction mix. That is, it is possible to say that one transaction must precede another, something that is missing from the non-deterministic semantics of a standard transaction model. Further, since streams are unbounded and arrive on a continuous basis, there is a need to add the necessary primitives for bounding computation on streams, such as batch-based processing [10, 27] and windowing [12, 24]. Finally, streaming transactions support a push-based processing model, whereas OLTP transactions access state in a pull-based manner. Our hybrid model provides uniform access to state for all transactions.
**Architecture and Implementation.** We show how our hybrid computational model can be cleanly and efficiently implemented on top of a state-of-the-art main-memory OLTP engine (H-Store [29]). Our architectural extensions are general enough to be applied to any main-memory OLTP engine, and include: (i) streams and windows represented as time-varying state, (ii) triggers to enable push-based processing over such state, (iii) a streaming scheduler that ensures correct transaction ordering, and (iv) a variant on H-Store’s recovery scheme that ensures exactly-once processing for streams. Note that the discussion in this paper is confined to the single-node case; multi-node S-Store is the topic for follow-on research.
**Performance.** We provide a detailed study of S-Store’s performance characteristics, specifically the benefits of integrating transactional state processing with push-based processing. For streaming workloads that require transactional state, S-Store demonstrates improved throughput over both pure OLTP systems and pure streaming systems. In both cases, the advantage is a direct result of integrating state and processing, removing blocking during communication between the dataflow manager and the data-storage engine.
The rest of this paper is organized as follows: We first describe our computational model for transactional stream processing in Section 2. Section 3 presents the design and implementation of the S-Store system, which realizes this model on top of the H-Store main-memory OLTP system [29]. In Section 4, we present an experimental evaluation of S-Store in comparison to H-Store, as well as...
as to two representative stream processing systems - Esper [3] (first generation) and Storm [37] (second generation). We discuss related work in Section 5, and finally conclude the paper with a summary and a discussion of future research directions in Section 6.
2. THE COMPUTATIONAL MODEL
In this section, we describe our computational model for transactional stream processing. This model allows us to support hybrid workloads (i.e., independent OLTP transactions and streaming transactions) with well-defined correctness guarantees. As we will discuss in more detail shortly, these guarantees include:
1. **ACID** guarantees for individual transactions (both OLTP and streaming)
2. **Ordered Execution** guarantees for dataflow graphs of streaming transactions
3. **Exactly-Once Processing** guarantees for streams (i.e., no loss or duplication)
S-Store acquires ACID guarantees from the traditional OLTP model (Sections 2.1 and 2.2), and adds ordered execution guarantees to capture stream-based processing semantics (Sections 2.3 and 2.4) and exactly-once processing guarantees for correctly recovering from failures (Section 2.5).
2.1 Overview
Our model adopts well-accepted notions of OLTP and stream processing, and fuses them into one coherent model. We assume that the reader is already familiar with the traditional notions, and strive to keep our model description brief and informal for them.
We assume that both OLTP and streaming transactions can share state and at the same time produce correct results. S-Store supports three different kinds of state: (i) public tables, (ii) windows, and (iii) streams. Furthermore, we make a distinction between OLTP transactions that only access public tables, and streaming transactions (defined as stored procedures) or nested transactions (described in Section 2.4), and edges represent an execution ordering. In our model, the above-described notion of a “batch” of tuples in a stream forms an important basis for transaction atomicity. A streaming transaction essentially operates over non-overlapping “atomic batches” of tuples from its input streams. Thus, an atomic batch corresponds to a finite, contiguous subsequence of a stream that must be processed as an indivisible unit. Atomic batches for input streams must be defined by the application programmer, and can be based on timestamps (like in [10, 40]) or tuple counts.
**Processing Model.** Stream processing systems commonly define computations over streams as dataflow graphs. Early streaming systems focused on relational-style operators as computations (e.g., Filter, Join), whereas current systems support more general user-defined computations [1, 2, 9, 32, 33, 37, 40]. Following this trend and consistent with our OLTP model, we assume that computations over streams are expressed as dataflow graphs of user-defined stored procedures. More formally, a *dataflow graph* is a directed acyclic graph (DAG), in which nodes represent streaming transactions (defined as stored procedures) or nested transactions (described in Section 2.4), and edges represent an execution ordering. If there is an edge between node $T_i$ and node $T_j$, there is also a stream that is output for $T_i$ and input for $T_j$. We say that $T_i$ precedes $T_j$ and is denoted as $T_i \prec T_j$.
Furthermore, given the unbounded nature of streams, all stream processing systems support windowing as a means to restrict state and computation for stateful operations (e.g., Join, Aggregate). A *window* is a finite, contiguous subsequence of a stream. Windows can be defined in many different ways [12, 24], but for the purposes of this work, we will restrict our focus to the most common type: sliding windows. A sliding window is a window which has a fixed size and a fixed slide, where the slide specifies the distance between two consecutive windows and must be less than or equal to the window size (if equal to window size, it has been called a *tumbling window*). A sliding window is said to be *time-based* if its size and slide are defined in terms of tuple timestamps, and *tuple-based* if its size and slide are defined in terms of the number of tuples. Note that atomic batches and tumbling windows are similar in definition, but their use is orthogonal: batches are external to a streaming transaction $T$ and are mainly used to set atomic boundaries for $T$’s instances, whereas windows are internal to $T$ and are used to bound computations defined inside $T$.
Atomic batches of tuples arrive on a stream at the input to a dataflow graph from push-based data sources. We adopt the data-driven execution model of streams, where arrival of a new atomic batch causes a new invocation for all the streaming transactions that are defined over the corresponding stream. We refer to execution of each such transaction invocation as a *transaction execution* (TE). (In the rest of this paper, we use the terms “transaction” and “stored procedure” interchangeably to refer to the definition of a
transaction, whereas we use the term “transaction execution” (TE) to refer to a specific invocation of that definition. A TE essentially corresponds to an atomic batch and its subsequent processing by a stored procedure. For example, in Figure 2, a dataflow graph with two stored procedures (i.e., $T_1$ and $T_2$) are defined above the dashed line, labeled “Definition”, but each of those is executed twice for two contiguous atomic batches on their respective input streams (i.e., $s_1,b_1, s_1,b_2$ for $T_1$, and $s_2,b_1, s_2,b_2$ for $T_2$), yielding a total of four TE’s shown below the dashed line, labeled “Execution” (i.e., $T_1 \{ s_1,b_1, w_1 \}, T_2 \{ s_1,b_2, w_1 \}, T_2 \{ s_2,b_1 \}, T_2 \{ s_2,b_2 \}$). Note, $s_1,b_2$ denotes the second batch on stream $s_1$ and $T_1,2$ denotes the second execution of $T_1$ on that batch.
Given a dataflow graph, it is also useful to distinguish between border transactions (those that ingest streams from the outside, e.g., $T_1$ in Figure 2) and interior transactions (others, e.g., $T_2$ in Figure 2). Border transactions are instantiated by each newly arriving atomic batch (e.g., $s_1,b_1, s_1,b_2$), and each such execution may produce a group of output stream tuples labeled with the same batch-id as the input that produced them (e.g., $s_2,b_1, s_2,b_2$, respectively). These output tuples become the atomic batch for the immediately downstream interior transactions, and so on.
Figure 2 also illustrates the different kinds of state accessed and shared by different transaction instances (shown below the dashed line, labeled “State”). $T_1$ takes as input the stream $s_1$ and the window $w_1$, and produces as output the stream $s_2$, whereas $T_2$ takes as input the stream $s_2$ and produces as output the stream $s_3$. Thus, TE’s of $T_1$ (i.e., $T_{1,1}$ and $T_{1,2}$) share access to $s_1$, $w_1$, and $s_2$, whereas TE’s of $T_2$ (i.e., $T_{2,1}$ and $T_{2,2}$) do so for $s_2$ and $s_3$. Note, there are two ways to output final results of a dataflow graph (e.g., $s_3$ in Figure 2); (i) write them to a public table, or (ii) push them to a sink outside the system (e.g., a TCP connection).
In order to ensure a correct execution, shared state accesses must be properly coordinated. We discuss this issue in more detail next.
### 2.3 Correct Execution for Dataflow Graphs
A standard OLTP transaction mechanism guarantees the isolation of a transaction’s operations from others’. When a transaction $T$ commits successfully, all of $T$’s writes are installed and made public. During $T$’s execution, all of $T$’s writes remain private.
S-Store adopts such standard transaction semantics as a basic building block for its streaming transactions (thus ensuring ACID guarantees in this way); however, the ordering of stored procedures in the dataflow graph as well as the inherent order in streaming data puts additional constraints on allowable transaction execution orders. As an example, consider again the dataflow graph shown in Figure 2. The four TE’s illustrated in this example can be ordered in one of two possible ways: $[T_{1,1}, T_{2,1}, T_{1,2}, T_{2,2}]$ or $[T_{1,1}, T_{1,2}, T_{2,1}, T_{2,2}]$. Any other orderings would not lead to a correct execution. This is due to the precedence relation between $T_1$ and $T_2$ in the graph as well as the ordering of the atomic batches on their input streams. This requirement is in contrast to most OLTP transaction processors which would accept any serializable schedule (e.g., one that is equivalent to any of the 4! possible serial execution schedules if these were 4 independent transactions).
Note that we make no ACID claims for the dataflow graph as a whole. The result of running a dataflow graph is to create an ordered execution of ACID transactions.
Furthermore, in streaming applications, the state of a window must be shared differently than other stored state. To understand this, consider again the simple dataflow graph shown in Figure 2. Let us assume for simplicity that the transaction input batch size for $T_1$ is 1 tuple. Further, suppose that $T_1$ constructs a window of size 2 that slides by 1 tuple, i.e., two consecutive windows in $T_1$ overlap by 1 tuple. This means that window state will carry over from $T_{1,1}$ to $T_{1,2}$. For correct behavior, this window state must not be publicly shared with other transaction executions. That is, the state of a window can be shared among consecutive executions of a given transaction, but should not be made public beyond that. Returning to Figure 2, when $T_{1,1}$ commits, the window in $T_{1,1}$ will slide by one and will then become available to $T_{1,2}$, but not to $T_{2,1}$.
This approach to window visibility is necessary, since it is this way of sharing window state that is the basis for continuous operation. Windows evolve and, in some sense, “belong” to a particular stored procedure. Thus, a window’s visibility should be restricted to the transaction executions of its “owning” stored procedure.
We will now describe what constitutes a correct execution for a dataflow graph of streaming transactions more formally. Consider a dataflow graph $D$ of $n$ streaming transactions $T_i, 1 \leq i \leq n$. $D$ is a directed acyclic graph $G = (V, E)$, where $V = \{ T_1, \ldots, T_n \}$ and $E \subseteq V \times V$, where $(T_i, T_j) \in E$ means that $T_i$ must precede $T_j$ (denoted as $T_i < T_j$). Being a DAG, $G$ has at least one topological ordering. A topological ordering of $G$ is an ordering of its nodes $T_i \in V$ such that for every edge $(T_i, T_j) \in E$ we have $i < j$. Each topological ordering of $G$ is essentially some permutation of $V$.
Without loss of generality: (i) Let us focus on one specific topological ordering of $G$ and call it $O$; (ii) For ease of notation, let us simply assume that $O$ corresponds to the identity permutation such that it represents: $T_1 < T_2 < \ldots < T_n$.
$T_i$ represents a transaction definition $T_i(s_i, w_i, p_i)$, where $s_i$ denotes all stream inputs of $T_i$ (at least one), $w_i$ denotes all window inputs of $T_i$ (optional), $p_i$ denotes all table partition inputs of $T_i$ (optional). Similarly, $T_{i,j}$ represents the $j$th transaction execution of $T_i$ as $T_{i,j}(s_i, b_j, w_i, p_i)$, where $s_i, b_j$ denotes the $j$th atomic batches of all streams in $s_i$.
A dataflow graph $D$ is executed in rounds of atomic batches $1 \leq r < \infty$, such that for any round $r$, atomic batch $r$ from all streaming inputs into $D$ generates a sequence of transaction executions $T_{i,r}(s_i, b_r, w_i, p_i)$ for each $T_i$. Note that this execution generates an unbounded schedule. However, as of a specific round $r = R$, we generate a bounded schedule that consists of all $R + n$ transaction executions: $1 \leq r \leq R, 1 \leq i \leq n, T_{i,r}(s_i, b_r, w_i, p_i)$.
In the traditional ACID model of databases, any permutation of these $R + n$ transaction executions would be considered to be a valid/correct, serial schedule. In our model, we additionally have:
1. **Dataflow graph order constraint**: Consider the topological ordering $O$ of $G$ as we defined above. Then for any given execution round $r$, it must hold that:
\[ T_{i,r}(s_i, b_r, w_i, p_i) \quad \text{such that} \quad T_{i,r}(s_i, b_r, w_i, p_i) \]
2. **Stream order constraint**: For any given transaction $T_i$, as of any execution round $r$, the following must hold:
\[ T_{i,r}(s_i, b_r, w_i, p_i) \quad \text{such that} \quad T_{i,r}(s_i, b_r, w_i, p_i) \]
Any bounded schedule of $D$ that meets the above two ordering constraints is a correct schedule. If $G$ has multiple topological orderings, then the dataflow graph order constraint must be relaxed to accept any of those orderings for any given execution round of $D$.
### 2.4 Correct Execution for Hybrid Workloads
S-Store’s computational model allows OLTP and streaming transactions to co-exist as part of a common transaction execution schedule. This is particularly interesting if those transactions access shared public tables. Given our formal description of a correct schedule for a dataflow graph $D$ that consists of streaming transactions, any OLTP transaction execution $T_i(p_i)$ (defined on one or more public table partitions $p_i$) is allowed to interleave anywhere in such a schedule. The resulting schedule would still be correct.
We have also extended our transaction model to include nested transactions. Fundamentally, this allows the application programmer to build higher-level transactions out of smaller ones, giving her the ability to create coarser isolation units among stored procedures, as illustrated in Figure 3. In this example, two streaming transactions, $T_1$ and $T_2$, in a dataflow graph access a shared table partition $p$. $T_1$ writes to the table and $T_2$ reads from it. If another OLTP transaction also writes to $p$ in a way to interleave between $T_1$ and $T_2$, then $T_2$ may get unexpected results. Creating a nested transaction with $T_1$ and $T_2$ as its children will isolate the behavior of $T_1$ and $T_2$ as a group from other transactions (i.e., other OLTP or streaming). Note that nested transactions also isolate multiple instances of a given streaming dataflow graph (or subgraph) from one another. We describe such a scenario in Section 4.1.1.
More generally, an S-Store nested transaction consists of two or more stored procedures with a partial order defined among them [36]. The stored procedures within a nested transaction must execute in a way that is consistent with that partial order. A nested transaction will commit, if and only if all of its stored procedures commit. If one or more stored procedures abort, the whole nested transaction will abort.
Nested transactions fit into our formal model of streaming transactions in a rather straightforward way. More specifically, any streaming transaction $T_i$ in dataflow graph $D$ can be defined as a nested transaction that consists of children $T_{i1}, \ldots, T_{im}$. In this case, $T_{i1}, \ldots, T_{im}$ must obey the partial order defined for $T_i$ for every execution round $r$, $1 \leq r < \infty$. This means that no other streaming or OLTP transaction instance will be allowed to interleave with $T_{i1}, \ldots, T_{im}$ for any given execution round.
### 2.5 Fault Tolerance
Like any ACID-compliant database, in the face of failure, S-Store must recover all of its state (including streams, windows, and public tables) such that any committed transactions (including OLTP and streaming) remain stable, and, at the same time, any uncommitted transactions are not allowed to have any effect on this state. A TE that had started but had not yet committed should be undone, and it should be reinvoked with the proper input parameters once the system is stable again. For a streaming TE, the invocation should also take proper stream input from its predecessor.
In addition to ACID, S-Store strives to provide exactly-once processing guarantees for all streams in its database. This means that each atomic batch $b_i$ on a given stream $s$ that is an input to a streaming transaction $T_i$ is processed exactly once by $T_i$. Note that such a TE $T_i(s, b_i)$, once it commits, will likely modify the database state (streams, windows, or public tables). Thus, even if a failure happens and some TE’s are undone / redone during recovery, the database state must be “equivalent” to one that is as if $s$ were processed exactly once by $T_i$.
For example, consider the streaming transaction $T_1(s_1, w_1)$ in Figure 2. If a failure happens while TE $T_{11}(s_1, b_1, w_1)$ is still executing, then: (i) $T_{11}$ should be undone, i.e., any modifications that it may have done on $s_1$, $w_1$, and $s_2$ should be undone; (ii) $T_{11}$ should be reinvoked for the atomic batch $s_1, b_1$. Similarly, if a failure happens after TE $T_{12}(s_1, b_1, w_1)$ has already committed, then all of its modifications on $s_1$, $w_1$, and $s_2$ should be retained in the database. In both of these failure scenarios, the recovery mechanism should guarantee that $s_1, b_1$ is processed exactly once by $T_1$ and the database state will reflect the effects of this execution.
Note that a streaming TE may have an external side effect other than modifying the database state (e.g., delivering an output tuple to a sink that is external to S-Store, as shown for $s_3$ at the top part of Figure 2). Such a side effect may get executed multiple times due to failures. Thus, our exactly-once processing guarantee applies only to state that is internal to S-Store (e.g., if $s_3$ were alternatively stored in an S-Store table as shown at the bottom part of Figure 2). This is similar to other exactly-once processing systems such as Spark Streaming [40].
If the dataflow graph definition allows multiple TE orderings or if the transactions within a dataflow graph contain any nondeterministic operations (e.g., use of a random number generator), we provide an additional recovery option that we call weak recovery. Weak recovery will produce a correct result in the sense that it will produce results that could have been produced if the failure had not occurred, but not necessarily the one that was in fact being produced. In other words, each atomic batch of each stream in the database will still be processed exactly once and the TE’s will be ordered correctly (as described in Sections 2.3 and 2.4), but the final database state might look different than that of the original execution before the failure. This is because the new execution might follow a different (but valid) TE ordering, or a non-deterministic TE might behave differently every time it is invoked (even with the same input parameters and database state).
### 3. ARCHITECTURE & IMPLEMENTATION
We chose to build S-Store on top of the H-Store main-memory OLTP system [29]. This allows us to inherit H-Store’s support for high-throughput transaction processing, thereby eliminating the need to replicate this complex functionality. We also receive associated functionality that will be important for streaming OLTP applications, including indexing, main-memory operation, and support for user-defined transactions.
In this section, we briefly describe the H-Store architecture and the changes required to incorporate S-Store’s hybrid model described in the previous section. Nevertheless, we believe that the architectural features that we have added to H-Store are conceptually applicable to any main-memory OLTP system.
#### 3.1 H-Store Overview
H-Store is an open-source, main-memory OLTP engine that was developed at Brown and MIT [29], and formed the basis for the design of the VoltDB NewSQL database system [6].
All transactions in H-Store must be predefined as stored procedures with input parameters. The stored procedure code is a mixture of SQL and Java. Transaction executions (TEs) are instantiated by binding input parameters of a stored procedure to real values and running it. In general, a given stored procedure definition will, over time, generate many TEs. TEs are submitted to H-Store, and
the H-Store scheduler executes them in whatever order is required to provide ACID guarantees.
H-Store follows a typical distributed DBMS architecture in which a client initiates the transaction in a layer (in H-Store, called the partition engine (PE)) that is responsible for managing transaction distribution, scheduling, coordination, and recovery. The PE manages the use of another layer (in H-Store, called the execution engine (EE)) that is responsible for the local execution of SQL queries. This layering is very much like the transaction manager / transaction coordinator division of labor in a standard distributed DBMS architecture.
A client program connects to the PE via a stored procedure execution request. If the stored procedure requires SQL processing, then the EE is invoked with these sub-requests.
An H-Store database is partitioned across multiple sites [34], where a site corresponds to a CPU core. The available DRAM for a node is divided equally among the partitions, and each stores a horizontal slice of the database. A transaction is executed on the sites that hold the data that it needs. If the data is partitioned carefully, most transactions will only need data from a single site. Single-sited transactions are run serially on that site, thereby eliminating the need for fine-grained locks and latches.
H-Store provides recovery through a checkpointing and command-logging mechanism [31]. Periodically, the system creates a persistent snapshot or checkpoint of the current committed state of the database. Furthermore, every time H-Store commits a transaction, it writes a command-log record containing the name of that stored procedure along with its input parameters. This command-log record must be made persistent before its transaction can commit. In order to minimize interactions with the slow persistent store, H-Store offers a group-commit mechanism.
On recovery, the system’s state is restored to the latest snapshot, and the command-log is replayed. That is, each command-log record causes the system to re-execute the same stored procedures with the same arguments in the same order that it did before the failure. Note that an undo-log is unnecessary, as neither the previous checkpoint nor the command-log will contain uncommitted changes.
### 3.2 S-Store Extensions
The high-level architecture of S-Store, directly adapted from H-Store, is shown in Figure 4. S-Store makes a number of extensions to H-Store to enable stream processing in the engine (shown in boldface in Figure 4). These include management of: (i) inputs from streaming clients and dataflow graphs of stored procedures at the PE layer, (ii) triggers at both the PE and the EE layers, (iii) stream- and window-based queries at the EE layer, (iv) in-memory stream and window state.
#### 3.2.1 Streams
S-Store implements a stream as a time-varying H-Store table. Using this approach, stream state is persistent and recoverable. Since tables are unordered, the order of tuples in a stream is captured by timestamps. An atomic batch of tuples is appended to the stream table as it is placed on the corresponding stream, and conversely, an atomic batch of tuples is removed from the stream table as it is consumed by a downstream transaction in the dataflow. The presence of an atomic batch of tuples within a stream can activate either a SQL plan fragment or a downstream streaming transaction, depending on what “triggers” are attached to the stream (described in Section 3.2.2). In case of the latter, the current stream table serves as input for the corresponding downstream streaming transaction.
#### 3.2.2 Triggers
Triggers enable push-based, data-driven processing needed to implement S-Store dataflow graphs. A trigger is associated with a stream table or a window table. When new tuples are appended to such a table, downstream processing will be automatically activated. The alternative to triggers would be polling for newly-arriving tuples, which would reduce throughput.
There are two types of triggers in S-Store to reflect the two-layer design of H-Store and of many other distributed database systems:
- **Partition engine (PE) triggers** can only be attached to stream tables, and are used to activate downstream stored procedures upon the insertion and commit of a new atomic batch of tuples on the corresponding streams. As the name implies, PE triggers exist to create a push-based dataflow within the PE by eliminating the need to return back to the client to activate downstream stored procedures. In Figure 4, the horizontal arrows between stored procedures inside the PE layer denote PE triggers.
- **Execution Engine (EE) triggers** can be attached to stream or window tables, and are used to activate SQL queries within the EE. These triggers occur immediately upon the insertion of an atomic batch of tuples in the case of a stream, and upon the insertion of an atomic batch of tuples that also cause a window to slide in the case of a window. The SQL queries are executed within the same transaction instance as the batch insertion which triggered them, and
can also activate further downstream EE triggers. EE triggers are designed to eliminate unnecessary communication between the EE and PE layers, for example when the execution of downstream processing is conditional. In Figure 4, the horizontal arrows between SQL queries inside the EE layer denote EE triggers.
### 3.2.3 Windows
Windows are also implemented as time-varying H-Store tables. A window is processed only when a new complete window state is available. For a sliding window, a new full window becomes available every time that window has one slide-worth of new tuples. Therefore, when new tuples are inserted into a window, they are flagged as “staged” until slide conditions are met. Staged tuples are not visible to any queries on the window, but are maintained within the window. Upon sliding, the oldest tuples within the window are removed, and the staged tuples are marked as active in their place. All window manipulation is done at the EE level, and output can be activated using an EE trigger.
Due to the invisible “staging” state of a window table as well as the transaction isolation rules discussed earlier in Section 2.3, special scoping rules are enforced for window state. A window table must not be accessed in general by TE’s other than those of the stored procedure that defined it. In fact, a window table must only be visible to consecutive TE’s of the stored procedure that contains it. As a consequence, one is not allowed to define PE triggers on window state, but only EE triggers. In other words, windows must be contained within the TE’s of single stored procedures and must not be shared across other stored procedures in the dataflow graph.
S-Store provides automatic garbage collection mechanisms for tuples that expire from stream or window state, after any triggers associated with them have all been fired and executed.
It should be noted that some optimizations, such as incremental window processing, have been left as future work.
### 3.2.4 Streaming Scheduler
Being an OLTP database that implements the traditional ACID model, the H-Store scheduler can execute transaction requests in any order. On a single H-Store partition, transactions run in a serial fashion by design [29]. H-Store serves transaction requests from its clients in a FIFO manner by default.
As we discussed in Section 2.3, streaming transactions and dataflow graphs require TE’s for dependent stored procedures to be scheduled in an order that is consistent with the dataflow graph (i.e., not necessarily FIFO). This is, of course, true for other streaming schedulers, but here we must obey the rules defining correct schedules as stated earlier in Section 2.3. Additionally, as discussed in Section 2.4, the application can specify (via defining nested transactions) additional isolation constraints, especially when shared table state among streaming transactions is involved. The simplest solution is to require the TE’s in a dataflow graph for a given input batch to always be executed in an order consistent with a specific topological ordering of that dataflow graph.
Although our ordering rules described earlier would allow transaction schedules that are “equivalent” to any topological ordering of the dataflow graph, our current scheduler implementation admits only one of them. We have found this approach to be practical in that it is amenable to a low-overhead implementation in H-Store and good enough to support all the S-Store use cases and benchmarks that we have so far studied (see Section 4). As we consider scaling to larger collections of workloads and nodes going forward, issues of fairness and locality may require more sophisticated approaches, such as flow-based scheduling [26].
### 3.2.5 Recovery Mechanisms
As described in Section 2.5, S-Store provides two different recovery options: (i) strong recovery, which is guaranteed to produce exactly the same state as was present before the failure (note that this guarantee is feasible only if the workload does not contain any non-determinism), and (ii) weak recovery, which will produce a legal state that could have existed, but is not necessarily the exact state lost. Both of these options leverage periodic checkpointing and command-logging mechanisms of H-Store. However, they differ in terms of which transactions are recorded in the command-log during normal operation and how they are replayed during crash recovery.
**Strong Recovery.** S-Store’s strong recovery is very similar to H-Store’s recovery mechanism. All committed transactions (both OLTP and streaming) are recorded in the command-log along with their input arguments. When a failure occurs, the system reaps the command-log starting from the latest snapshot. The log is replayed in the order in which the transactions appear, which is the same as the order they were originally committed. This will guarantee the reads-from and the writes-to-relationships between the transactions are strictly maintained.
There is one variation on H-Store’s recovery, however. Before the log replay, we must first disable all PE triggers so that the execution of a stored procedure does not redundantly trigger the execution of its successor(s) in the dataflow graph. Because every transaction is logged in strong recovery, failing to do this would create duplicate invocations, and thus potentially incorrect results. Once triggers are disabled, the snapshot is applied, and recovery from the command-log can begin.
When recovery is complete, we turn PE triggers back on. At that point, we also check if there are any stream tables that contain tuples in them. For such streams, PE triggers will be fired to activate their respective downstream transactions. Once those transactions have been queued, then the system can resume normal operation.
**Weak Recovery.** In weak recovery, the command-log need not record all stored procedure invocations, but only the ones that ingest streams from the outside (i.e., border transactions). We then use a technique similar to upstream backup [25] to reinvoke the other previously committed stored procedures (i.e., interior transactions). In upstream backup, the data at the inputs to a dataflow graph are cached so that in the event of a failure, the system can replay them in the same way that it did on first receiving them in the live system. Because the streaming stored procedures in an S-Store dataflow have a well-defined ordering, the replay will necessarily create a correct execution schedule. While transactions may not be scheduled in the exact order that took place on the original run, some legal transaction order is ensured.
When recovering using weak recovery, we must first apply the snapshot, as usual. However, before applying the command-log, S-Store must first check existing streams for data recovered by the snapshot, and fire any PE triggers associated with those streams. This ensures that interior transactions that were run post-snapshot but not logged are re-executed. Once these triggers have been fired, S-Store can begin replaying the log. Unlike for strong recovery, we do not need to turn off PE triggers during weak recovery. In fact, we rely on PE triggers for the recovery of all interior transactions, as these are not recorded in the command-log. Results are returned through committed tables.
Weak recovery is a light-weight alternative to strong recovery, since it need not log all committed transactions. Section 4.2.3 provides an experimental comparison of our strong and weak recovery mechanisms.
4. EXPERIMENTS
In this section, we present the results of our experimental study that evaluates S-Store with respect to existing alternatives in OLTP and stream processing. First, we demonstrate the benefits of integrating state management with push-based processing in Section 4.1. Specifically, we compare S-Store to H-Store, Esper, and Storm in terms of overall throughput on a transactional stream processing workload. Then, Section 4.2 further explores a number of microbenchmarks that focus on evaluating specific architectural features of S-Store in comparison to its base system H-Store (i.e., EE triggers, PE triggers, and recovery modes).
To properly evaluate streaming workloads, we record throughput in terms of “input batches per second”. This number represents the number of input batches that are processed to completion, regardless of the number of transactions executed. In order to simplify comparison to other systems, these experiments set the batch size to be a single tuple. For example, if any system processes 1,000 tuples / sec, we consider it to be processing 1,000 batches / sec.
All experiments were run on a cluster of machines using the Intel® Xeon® E7-4830 processors running at 2.13 GHz. Each machine contains a total of 64 cores and 264 GB of memory. Because we focus on single-node S-Store deployments in this paper and due to the partitioned architecture of S-Store, effectively only a single core is used for data access. In order to create data isolation for an apples-to-apples comparison, we limit data access to a single core on all featured systems. The experiments were run using a single non-blocking client which asynchronously sends requests to the system. Command-logging was enabled unless otherwise stated.
4.1 State-of-the-Art Comparison
In order to provide the best comparison between S-Store and state-of-the-art systems, we chose to implement a Leaderboard Maintenance benchmark that exercises all of the architectural additions of S-Store described in Section 3. We measure S-Store’s performance against a main-memory OLTP system (H-Store [29]), a traditional single-node CEP engine (Esper [3]), and a modern distributed streaming system (Storm [37]).
4.1.1 Leaderboard Maintenance Benchmark
Consider a TV game-show in which viewers vote for their favorite candidate. Leaderboards are periodically updated with the number of votes each candidate has received. Each viewer may cast a single vote via text message. Suppose the candidate with the fewest votes will be removed from the running every 20,000 votes, as it has become clear that s/he is the least popular. When this candidate is removed, votes submitted for him or her will be deleted, effectively returning the votes to the people who cast them. Those votes may then be re-submitted for any of the remaining candidates. This continues until a single winner is declared. During the course of the voting, each incoming vote needs to be validated and recorded. Furthermore, several leaderboards are maintained: one representing the top-3 candidates, another for the bottom-3 candidates, and a third one for the top-3 trending candidates of the last 100 votes. With each incoming vote, these leaderboards are updated with new statistics regarding the number of votes each candidate has received.
As shown in Figure 5, the dataflow graph contains three separate stored procedures: one to validate and insert a new vote, a second to maintain the leaderboard, and a third to delete a candidate if necessary. In order to ensure the correctness of the result in the presence of shared tables, as well as to maintain consistency of the tables across the dataflow graph, these three stored procedures must execute in sequence for each new vote.
4.1.2 OLTP Systems (H-Store)
As discussed at the beginning of Section 2, S-Store provides three primary guarantees: ACID, ordered execution, and exactly-once processing. When evaluating S-Store against an OLTP system (H-Store), it is important to consider which of these guarantees are being provided.
By default, H-Store provides only one of the three processing guarantees of S-Store: ACID. H-Store has no ordering guarantees, as it has no concept of a dataflow graph. It can instead choose any serializable transaction schedule (Section 3.1). In fact, we have previously shown that, in a workload in which multiple stored procedures within a dataflow share state like the one in Figure 5, H-Store may produce incorrect results [14]. H-Store also does not guarantee that a dataflow will be fully processed exactly once in the event of a system failure (again due to the lack of concept of a dataflow graph).
Because ordering guarantees are not considered, H-Store can asynchronously queue transactions for the engine to process. Thus, H-Store can send a transaction request and immediately send another without waiting for the response. The queue provides the system with a continuous supply of work, meaning H-Store is almost constantly doing transactional work. As a result, H-Store is able to process an impressive 5,300 input batches per second, as can be seen in Table 1.
By comparison, S-Store is able to achieve 2,200 input batches per second, while providing all three correctness guarantees. The primary performance difference lies within the ordered execution guarantee. To provide this, S-Store’s scheduler must determine the proper order in which to run the transactions in its queue (discussed in Section 3.2.4). This scheduling does reduce the number of transactions per second that S-Store is able to process, but it is necessary to ensure correct results.
<table>
<thead>
<tr>
<th>System</th>
<th>ACID</th>
<th>Order</th>
<th>Exactly-Once</th>
<th>Max Tput (batches/sec)</th>
</tr>
</thead>
<tbody>
<tr>
<td>H-Store (async)</td>
<td>✓</td>
<td>✗</td>
<td>✗</td>
<td>5300</td>
</tr>
<tr>
<td>H-Store (sync)</td>
<td>✓</td>
<td>✓</td>
<td>✗</td>
<td>210</td>
</tr>
<tr>
<td>Esper+ VoltDB</td>
<td>✓</td>
<td>✓</td>
<td>✓</td>
<td>570</td>
</tr>
<tr>
<td>Storm+ VoltDB</td>
<td>✓</td>
<td>✓</td>
<td>✓</td>
<td>600</td>
</tr>
<tr>
<td>S-Store</td>
<td>✓</td>
<td>✓</td>
<td>✓</td>
<td>2200</td>
</tr>
</tbody>
</table>
Table 1: Guarantees vs Max Tput (Leaderboard Maintenance)
It is possible to execute the Leaderboard Maintenance benchmark on H-Store in a way that provides ordering guarantees. This is accomplished by designing a pseudo-“dataflow-graph” within the client. The parameters of a downstream procedure depend on the result from an upstream procedure, and transaction ordering must be ensured by the client. As a result, all procedures are forced to be invoked synchronously, meaning that a response must be received before the next request can be made.
This method ensures that the end results of the benchmark are correct, but performance suffers severely in the process. H-Store is only able to process 210 input batches per second when ordering is enforced by the client (see Table 1). Because all transaction calls are synchronous, H-Store’s transaction queue never holds more than one transaction at a time. As a result, the client and the PE of H-Store must constantly wait for each other, severely hindering performance. S-Store, on the other hand, provides all three correctness guarantees while maintaining reasonable throughput.
4.1.3 Streaming Systems (Esper and Storm)
To compete with pure streaming systems, S-Store’s performance must be comparable to both first-generation, single-node CEP engines as well as second-generation, distributed real-time streaming systems. We chose Esper and Storm as representative systems for their respective categories.
As further discussed in Section 5, neither Esper nor Storm are transactional. In order to provide comparable (though not comprehensive) guarantees to S-Store, only serialized tuple processing was allowed. All of Esper’s default delivery ordering guarantees remain activated, meaning each tuple must run to completion before the next tuple may begin processing. For the Storm implementation, we opted to use Trident [5], an extension of Storm that supports stateful stream processing and exactly-once semantics. Data durability in both systems is provided by command-logging each of the three atomic processing units in the dataflow graph.
On stateless, pure streaming workloads that do not require transactional guarantees, both Esper and Storm would easily outperform S-Store. However, shared state management is key to many workloads, including our Leaderboard Maintenance benchmark. Like many stream processing systems, both Esper and Storm rely on external data storage for durable, shared state.
We added VoltDB[6], a main-memory, transactional database, as the backend for both Esper and Storm. VoltDB is an optimized, commercial version of H-Store, making the comparison with S-Store fair. Esper and Storm serve as the driving push-based engines, choosing when to access state based on the results received from the database. To maximize VoltDB’s potential and batch requests from Esper / Storm to the database, we compile the three operations in Leaderboard Maintenance as VoltDB stored procedures. Each streaming system sends stored procedure requests via JDBC. Command-logging was unavailable in the open-source version of VoltDB, so asynchronous command logging was implemented in Esper and Storm.
After adding VoltDB, both Esper and Storm with Trident provide comparable guarantees to S-Store, outlined in Table 1. Esper (+VoltDB) provides two of the three processing guarantees of S-Store (ACID and ordered execution guarantees), but has no support for exactly-once semantics. Storm with Trident (+VoltDB) provides all three correctness guarantees.
As shown in Table 1, both Esper and Storm with Trident achieve roughly 600 batches per second, with data access being the significant bottleneck. At all times, either Esper or Storm is waiting for VoltDB, or vice-versa. Because tuples must be processed sequentially, only a single transaction request can be sent to VoltDB at a time, and the database must at a minimum wait for a full round-trip to and from the streaming system before it can process more work. Meanwhile, Esper and Storm must wait for VoltDB to process its transaction request before evaluating the response and continuing to process the dataflow graph.
By contrast, S-Store processes 2,200 batches per second. S-Store is able to handle multiple asynchronous transaction requests from the client and still preserve the tuple processing order. This is because all of the transaction ordering is handled directly by the S-Store partition engine. By combining the push-based semantics and fully-integrated state management, S-Store avoids the costly blocking communication between the streaming system and the database.
4.2 Micro-Benchmarks
A number of micro-experiments were performed to evaluate the optimizations achieved by S-Store over its predecessor, H-Store, in the presence of transactional stream processing workloads. For the experiments in Sections 4.2.1 and 4.2.2, command-logging was disabled to emphasize the feature being measured.
4.2.1 Execution Engine Triggers
In this experiment, we evaluate the benefit of S-Store’s EE triggers. The micro-benchmark contains a single stored procedure that consists of a sequence of SQL statements (Figure 6(a)). In S-Store, these SQL statements can be activated using EE triggers such that all execution takes place inside the EE layer. H-Store, on the other hand, must submit the set of SQL statements (an insert and a delete) for each query as a separate execution batch from PE to EE. Figure 6(a) illustrates the case for 3 streams and 3 queries. S-Store’s EE triggers enable it to trade off trigger execution cost for a reduction in the number of PE-to-EE round-trips (e.g., 2 triggers instead of 2 additional round-trips). Note also that the DELETE statements are not needed in S-Store, since garbage collection on streams is done automatically as part of our EE trigger implementation.
Figure 6(b) shows how maximum throughput varies with the number of EE triggers. S-Store outperforms H-Store in all cases, and its relative performance further increases with the number of
EE triggers, reaching up to a factor of 2.5x for 9 triggers. This trend continues as more EE triggers are added.
### 4.2.2 Partition Engine Triggers
This experiment compares the performance of S-Store’s PE triggers to an equivalent implementation in H-Store, which has no such trigger support in its PE. As illustrated in Figure 7(a), the microbenchmark consists of a dataflow graph with a number of identical stored procedures (SPs). Each SP removes tuples from its input stream, and then inserts these tuples into its output stream. We assume that the dataflow graph must execute in exact sequential order. In H-Store, the scheduling request of a new transaction must come from the client, and because the dataflow order of these transactions must be maintained, transactions cannot be submitted asynchronously. Serializing transaction requests severely limits H-Store’s performance, as the engine will be unable to perform meaningful work while it waits for a client request (as discussed in Section 4.1.2). In S-Store, a PE trigger can activate the next transaction directly within the PE and can prioritize these triggered transactions ahead of the current scheduling queue using its streaming scheduler. Thus, S-Store is able to maintain dataflow order while both avoiding blockage of transaction executions and reducing the number of round-trips to the client layer.
Figure 7(b) shows how throughput (plotted in log-scale) changes with increasing dataflow graph size (shown as number of PE triggers for S-Store). H-Store’s throughput tapers due to the PE’s need to wait for the client to determine which transaction to schedule next. S-Store is able to process roughly an order of magnitude more input batches per second thanks to its PE triggers. Our experiments found that for trigger-heavy workloads, weak recovery can accomplish a similar run-time effect to the use of group commit. As shown in Figure 8(a), without group commit, logging quickly becomes a bottleneck in the strong recovery case. Each committed transaction is logged, so the throughput quickly degrades as the number of transactions in the dataflow graph increases. By contrast, weak recovery logs only the committed border transactions, allowing up to 4x the throughput as it writes a smaller fraction of log records to disk.
For the recovery experiment, we ran 5,000 input batches through the same PE micro-benchmark, recording logs for both weak and strong recovery. We then measured the amount of time it took S-Store to recover from scratch using each command-log.
As shown in Figure 8(b), weak recovery not only achieves better throughput during normal operation, but it also provides lower recovery time. Typically during recovery, the log is read by the client and transactions are submitted sequentially to the engine. Each transaction must be confirmed as committed before the next can be sent. Because weak recovery activates interior transactions within the engine, the transactions can be confirmed without a round-trip to the client. As a result, recovery time stays roughly constant for weak recovery, even for dataflow graphs with larger numbers of stored procedures. For strong recovery, recovery time increases linearly with the size of the dataflow graph.
As previously stated, we expect the need for recovery to be rare, and thus prioritize throughput at run time over total recovery time. However, in real-time systems in which recovery time can be crucial, weak recovery can provide a significant performance boost while also improving run-time throughput.
### 5. RELATED WORK
In the early 2000’s, there was a lot of interest in the database community for stream processing. The main goal of this work was to process continuous queries with low latency as data streamed into the system. This was largely inspired by the emergence of sensor-based applications. Many academic prototypes (Aurora / Borealis [7, 8], STREAM [10], TelegraphCQ [16], NiagaraCQ [17]) were built, and several commercial products were spawned as a result of this work (e.g., TIBCO StreamBase, CISCO Truviso, SAP Coral8 / ESP, IBM InfoSphere Streams, Microsoft StreamInsight, Oracle CEP, Esper). With the exception of STREAM and Coral8, these systems did not support an explicit notion of transactions. STREAM did not directly claim to have transactions, but its execution model was based on logical timestamps which could be interpreted as transaction IDs. Batches of tuples with the same timestamp were executed atomically. While this could be used to provide isolation, recovery was not discussed. Furthermore, modeling transactions as the execution of an entire query graph did not allow finer-grained transaction definitions. Similarly, Coral8 provided so-called “atomic bundles” as a configurable isolation/recovery unit embedded in its execution model, but did not provide any transactional guarantees beyond “at least once” for processing events. Furthermore, none of these early systems considered integrating stream processing with traditional OLTP-style query processing.
Fault tolerance issues have been investigated as stream processing systems have been moved into distributed settings [8, 16]. A few fundamental models and algorithms have been established by
this work [11, 25, 35], including the upstream backup technique that we leverage in our weak recovery mechanism [25].
There have also been several efforts in addressing specific transactional issues that arise in stream processing settings. For example, Golab et al. have studied the concurrency control problem that arises when a sliding window is advanced (write) while it is being accessed by a query (read) [22]. This work proposes sub-windows to be used as atomic access units and two new isolation levels that are stronger than conflict serializability. Such a problem never arises in S-Store, since window state is accessed by a single TE at a time (and never by TEs of different SPs). As another example, Wang et al. have considered concurrency issues that arise when adding active rule support to CEP engines in order to monitor and react to streaming outputs [38]. In this case, the rules may require accessing state shared with other queries or rules. This work defines a stream transaction as a sequence of system state changes that are triggered by a single input event, and proposes a timestamp-based notion of correctness enforced through appropriate scheduling algorithms. S-Store investigates transactional stream processing in a more general context than active CEP engines.
Botan et al.’s work was the first to recognize the need for an explicit transaction model to support queries across both streaming and stored data sources [13]. This work proposed to extend the traditional page model [39] to include streams of events (as time-varying relations) and continuous queries (as a series of one-time queries activated by event arrivals). As a result, each one-time query execution corresponds to a sequence of read/write operations, and operations from one or more such sequences can be grouped into transaction units based on the application semantics. Transactions must then be executed in a way to ensure conflict serializability and event arrival ordering. Thus, this work focused on the correct ordering of individual read/write operations for a single continuous query, and not so much on transaction-level ordering for complex dataflow graphs like we do.
Recently, a new breed of stream processors has emerged. Unlike the majority of the earlier-generation systems, these do not adopt a select-project-join operator environment. Instead, they expect the user to supply their own operators (UDF’s), and the system controls their execution in a scalable fashion over a cluster of compute nodes. Typically, these systems provide fault tolerance and recoverability, but do not support fully-ACID transactions. Essentially, they all aim at providing a MapReduce-like framework for real-time computations over streaming data. Representatives include Storm [37], Spark Streaming [40], Samza [2], Naiad [32], Flink [1], MillWheel [9], and S4 [33].
Storm provides two types of semantic guarantees: at-least-once and at-most-once. For at-least-once, each tuple is assigned a unique message-id and its lineage is tracked. For each output tuple \( t \) that is successfully delivered by a topology, a backflow mechanism is used to acknowledge the tasks that contributed to \( t \) with the help of a dedicated acker bolt. The data source must hold the tuple until a positive ack is received and the tuple can be removed (similar to upstream backup [25]). If an ack is not received within a given timeout period, then the source will replay the tuple again. Storm can only provide the weaker at-most-once semantics when the ack mechanism is disabled. Trident provides a higher-level programming abstraction over Storm which provides a stronger, exactly-once processing guarantee based on automatic replication [5]. While these guarantees ensure some level of consistency against failures, they are not sufficient to support atomicity and isolation as in the case of ACID guarantees. Furthermore, Storm focuses on purely streaming topologies and thus lacks support for dealing with persistent state and OLTP transactions.
Spark Streaming extends the Spark batch processing engine with support for discretized streams (D-Streams) [40]. Analytical computations are divided into a series of stateless, deterministic transformations over small batches of input tuples. Like STREAM, tuples are processed atomically within each of these batches. All state in Spark Streaming is stored in in-memory data structures called Resilient Distributed Datasets (RDDs). RDDs are partitioned and immutable. Like Storm+Trident, Spark Streaming provides exactly-once consistency semantics. Furthermore, the RDD-based state management model incurs high overhead for transactional workloads that require many fine-grained update operations (due to maintaining a large number of RDDs and managing their lineage).
Several of the new-generation streaming systems adopt a stateful dataflow model with support for in-memory state management. SEEP decouples a streaming operator’s state from its processing logic, thereby making state directly manageable by the system via a well-defined set of primitive scale-out and fault-tolerance operations [20]. SEEP has also been extended to support iterative cyclic computations [21]. Naiad extends the MapReduce model with support for structured cycles and streaming [32]. Naiad’s timely dataflow model uses logical timestamps for coordination. Samza isolates multiple processors by localizing their state and disallowing them from sharing data, unless data is explicitly written to external storage [2]. Like S-Store, all of these systems treat state as mutable and explicitly manageable, but since they all focus on analytical and cyclic dataflow graphs, they do not provide inherent support for transactional access to shared state, thus their consistency guarantees are weaker than S-Store’s.
Microsoft Trill is a new analytics engine that supports a diverse spectrum of queries (including streaming, historical, and progressive/exploratory) with real-time to offline latency requirements [15]. Trill is based on a tempo-relational query model that incrementally processes events in batches organized as columns. Trill’s adaptive batching and punctuation mechanisms enable trading off throughput for latency in case of higher loads. Both Trill and S-Store target hybrid workloads that include streaming, strive to maximize throughput while controlling latency, and are capable of in-memory
Figure 8: Recovery Mechanisms
(a) Logging
(b) Recovery
processing of events in adjustable batch granularity. However, S-Store focuses more on OLTP settings with shared mutable state, whereas Trill focuses more on OLAP settings with read-mostly state. Therefore, S-Store pays more attention to providing correctness guarantees in the face of concurrent access, processing dependencies, and failures without sacrificing performance.
6. SUMMARY & FUTURE DIRECTIONS
This paper has defined a new model of transactions for stream processing. We have presented the design and implementation of a novel system called S-Store that seamlessly combines OLTP transaction processing with our transactional stream processing model. We have also shown how this symbiosis can be implemented in the context of a main-memory, OLTP DBMS in a straightforward way. S-Store is shown to outperform H-Store, Esper, and Storm on a streaming workload that requires transactional state access, while at the same time providing stronger correctness guarantees.
Future work includes extending S-Store to operate on multiple nodes. We plan to address a number of research issues including data and workload partitioning, distributed recovery, and distributed transaction scheduling. We also plan to investigate handling of dynamic and hybrid (OLTP+streaming) workloads.
Acknowledgments. We thank Richard Tibbets for sharing his experience about StreamBase use cases, as well as Chenggang Wu and Hong Quach for their contributions. This research was funded by the NSF under grants NSF IIS-1111423 and NSF IIS-1110917, and by the Maseeh Professorship in Emerging Technologies.
7. REFERENCES
|
{"Source-Url": "http://dspace.mit.edu/openaccess-disseminate/1721.1/113832", "len_cl100k_base": 15455, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 43518, "total-output-tokens": 17613, "length": "2e13", "weborganizer": {"__label__adult": 0.0003571510314941406, "__label__art_design": 0.0003819465637207031, "__label__crime_law": 0.0003592967987060547, "__label__education_jobs": 0.001125335693359375, "__label__entertainment": 0.0001386404037475586, "__label__fashion_beauty": 0.0001920461654663086, "__label__finance_business": 0.0013399124145507812, "__label__food_dining": 0.00040268898010253906, "__label__games": 0.0008578300476074219, "__label__hardware": 0.0017824172973632812, "__label__health": 0.0005779266357421875, "__label__history": 0.000396728515625, "__label__home_hobbies": 0.00011622905731201172, "__label__industrial": 0.0007596015930175781, "__label__literature": 0.0003287792205810547, "__label__politics": 0.0003690719604492187, "__label__religion": 0.0004687309265136719, "__label__science_tech": 0.1995849609375, "__label__social_life": 9.381771087646484e-05, "__label__software": 0.027374267578125, "__label__software_dev": 0.76171875, "__label__sports_fitness": 0.00025582313537597656, "__label__transportation": 0.00067138671875, "__label__travel": 0.0002218484878540039}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 76133, 0.0368]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 76133, 0.34054]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 76133, 0.91938]], "google_gemma-3-12b-it_contains_pii": [[0, 1579, false], [1579, 6741, null], [6741, 13750, null], [13750, 18759, null], [18759, 26309, null], [26309, 33912, null], [33912, 39021, null], [39021, 46575, null], [46575, 52580, null], [52580, 58559, null], [58559, 63813, null], [63813, 70301, null], [70301, 76133, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1579, true], [1579, 6741, null], [6741, 13750, null], [13750, 18759, null], [18759, 26309, null], [26309, 33912, null], [33912, 39021, null], [39021, 46575, null], [46575, 52580, null], [52580, 58559, null], [58559, 63813, null], [63813, 70301, null], [70301, 76133, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 76133, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 76133, null]], "pdf_page_numbers": [[0, 1579, 1], [1579, 6741, 2], [6741, 13750, 3], [13750, 18759, 4], [18759, 26309, 5], [26309, 33912, 6], [33912, 39021, 7], [39021, 46575, 8], [46575, 52580, 9], [52580, 58559, 10], [58559, 63813, 11], [63813, 70301, 12], [70301, 76133, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 76133, 0.07207]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
cf7e40e09af6c076595b12d4ea5c03f978de42bf
|
Website accessibility: An Australian view
Jonathon Grantham*, Elizabeth Grantham†, David Powers*
*School of Computer Science, Engineering and Mathematics,
Flinders University of South Australia
†School of Education
Flinders University of South Australia
PO Box 2100, Adelaide 5001, South Australia
Jonathon.grantham@gmail.com
Abstract
For nearly 20 years Australian and international legal requirements have existed around the development of accessible websites. This paper briefly reviews the history of legislation against web disability discrimination, along with the current legal requirements for website development as indicated by current international accessibility specifications, and reports on a manual examination of the accessibility of 40 Australian private and governmental websites. Not one of the 20 largest Australian companies, nor the Australian 20 Federal Government portfolios, were found to have produced a legally accessible website as per Australian standards.
Keywords: accessibility, disabilities, Disabilities Discrimination Act, web development.
1 Introduction
"The power of the Web is in its universality. Access by everyone regardless of disability is an essential aspect.”
Tim Berners-Lee, director and founder of World Wide Web Consortium (W3C), 2002
Website accessibility refers to the practice of making websites accessible to all users inclusive of race, nationality, religion and disability. Website accessibility includes, but is not limited to, the communication style of the text as well as the technical development of the website. Users have come to expect web accessibility, and Huang (2002) notes that, “Access to the Internet, to a large extent, decides whether or not one can fully participate in the increasingly turbulent and networked world.” Most governments have implemented laws and policies regarding their own websites, communication plans and technology mediums. The Australian Bureau of Statistics (2009) states that 18.5% of Australians have a disability. This figure does not include the significant percentage of Australians with temporary injury or disability, nor does it cover the aging population who, although without disability, can find themselves with similar accessibility difficulties.
However, of greater significance to the field of website and application design is the percentage of individuals (estimated at 10%) who have a disability that affects their use of Information and Communication Technologies (ICT) (Royal National Institute of Blind People (RNIB), 2011). In addition, approximately 6.2 million Australians have poor literacy or numeracy skills, and of this figure, over a third (2.6 million) (ABS, 1996) have very poor literacy or numeracy skills. Low literacy and numeracy skills can significantly affect an individual’s access to and understanding of websites and can, in turn, limit ability to complete tasks such as forms and surveys online.
1.1 Why develop accessible websites?
There are social, economic and legal arguments in favour of the development of accessible websites. Traditionally, corporate social responsibility has been based around environmental impact and anti-discrimination guidelines in the workplace (Australian Human Rights and Equal Opportunities Commission (AHREOC), 2010). Social responsibility towards web accessibility seems to have been largely left up to the individual person or organisation. In 2008, an Australian Senate motion emphasised the role of the Australian government and its responsibility to “foster a corporate culture respectful of human rights at home and abroad”. This motion encouraged all government portfolios to adhere to a common standard of website accessibility.
Huang (2002) notes the economic advantages to making a website accessible. Non-accessible websites run the risk of the potential alienation of between 10% (AHREOC, 2010) and 20% (Specific Learning Difficulties (SPELD), 2011) of the population. In the competitive corporate world, website accessibility can win or lose clientele and have significant impact on a company’s profits (Loiacono and McCoy, 2004). Limited access will encourage users with disabilities to find more accessible websites offering similar products or access more expensive channels such as call centres and walk-in branches.
Aside from the possible alienation of a significant percentage of potential clientele, the development of websites that comply with disability discrimination standards can potentially increase exposure and thus, increase the number of clientele both with and without disability.
1.2 Australian legal view
“Accessible web pages promote equal access to information and opportunities”
- Spindler, 2002
The Australian Human Rights Commission (AHREOC) is responsible for investigating discrimination on any grounds including race, colour, ethnic origin, sexual preference, gender, marital status, pregnancy and disability. The AHREOC (AHREOC) states that website owners are obliged to make websites accessible to everyone, without discrimination.
The Australian Human Rights Commission Disability Discrimination Act states;
“Provision of information and other material through the web is a service covered by the DDA. Equal access for people with a disability in this area is required by the DDA where it can reasonably be provided... This requirement applies to any individual or organisation developing a World Wide Web page in Australia, or placing or maintaining a web page on an Australian server... whether providing for payment or not.”
Websites that do not conform to the DDA and accessibility guidelines run the risk of information provided within the website not being accessible to those who have a right to use it. Websites in which information is not accessible to all are in breach of the DDA, and therefore, the owners of the website can be prosecuted for discrimination. The most commonly referenced case of this nature is Maguire versus the Sydney Organising Committee for the Olympic Games (SOCOG). Maguire claimed that the SOCOG had created a website that was inaccessible for individuals with vision impairment. The website left individuals with vision impairment unable to access the ticketing information, event schedules or posted event results. The court ruled in favour of Maguire and under the DDA fined the SOCOG $20,000. The court case cost the SOCOG in excess of $500,000 (FCA, 2000).
On the 30th June 2010 the Minister for Finance and Deregulation, Lindsay Tanner, and Parliamentary Secretary for Disabilities, Bill Shorten, released the Website Accessibility National Transition Strategy and Implementation Plan for Australian Government agencies. The plan states that in a four year period ending in June 2014 all government department websites will meet the technical requirements of the Web Content Accessibility Guidelines 2.0 (WCAGv2, 2008). The WCAGv2 is a series of guidelines that ‘covers a range of recommendations for making web content more accessible’ (W3C, 2008). By meeting these guidelines organisations can create websites that offer accessibility for all.
South Australia and Victoria have the strictest guidelines of all Australian states in regards to disability discrimination legislation. South Australia commissioned websitecriteria (a private organisation focused around web accessibility) to write guidelines for website development, and later regulated that all South Australian Government websites must adhere to the guidelines stated by websitecriteria (2008) as well as the WCAGv2 (SAG, 2011). Websitecriteria is a detailed document that proposes guidelines for communication style and accessibility as opposed to just the technical syntactic requirement of a web language which is covered in the WCAGv2.
The Victorian Government took a similar approach, producing the “Victorian Government Accessibility Toolkit”, a recommendation for all Victorian Government websites. The “Victorian Government Accessibility Toolkit” is mostly derived from the WCAGv2 with a significant number of criteria existing in both specifications (VGAT, 2011). There is very little in the Toolkit referring to language communication styles.
1.3 International legal view
In 1993 the United Nations released guidelines on the Equalisation of Opportunities of Persons with Disabilities. This document, although not strict law, outlines the need to meet a uniform standard in website development (ILI, 2011).
Most western countries have laws against the discrimination of people with disabilities. The United Kingdom has the Disability Discrimination Act of 1995 which was later extended with the Equality Act of 2010 (Office for Disability Issues, 2011). The United States has the Americans With Disabilities Act (1990) which rules out any discrimination based on a person’s disability.
Although Canada does not have a Disability Discrimination Act per se, it operates under the Federal Accountability Act of 2006 (CWDA, 2011). The Federal Accountability Act does not directly address website accessibility; however, it was extended by government policy revolving around declaring website management roles. The policy separates professionals involved in the development of websites into categories such as developers, graphic designers and content managers. The policy then places legal responsibility for accessibility issues associated with each category. This system relies on a specific staffing structure which causes limitations for small organisations and larger organisations that use a different structure.
Across the European Union (EU) a mixture of disability discrimination laws are in place. The EU states that compliance with the WCAGv2 will be mandatory by 2010 (EIS, 2006). In each of the formerly mentioned countries the WCAGv2 is referenced as the common website accessibility standard. The United States has an additional standard entitled “Section 508”, which makes reference not only to the technical requirements for accessibility but also to the language and communication issues surrounding accessibility. Section 508 will not be considered further here; the focus of this paper is on website compliance with the internationally recognised WCAGv2.
2 Background
In a time where users are pushing for ever more advanced website functionality, websites are becoming rapidly more complicated, and less accessible for those facing difficulties. Milliman (2002) conducted a survey of webmasters, including representatives from many different demographics including; large and small, business to business and business to consumer, not-for-profit and profit-seeking organisations. Over 98% of websites examined in the survey failed the Bobby test (CAST, 2011) for website accessibility and thus, did not comply with US Federal Regulation Section 508 nor the
W3C’s WCAGv2 accessibility standard.
The results of the Milliman (2002) survey also indicated that 42% of the survey population did not consider persons with disabilities as part of their target audience. Further, only about 13% of the surveyed population claimed that they had insufficient funds to make their site compliant, theoretically leaving 87% of surveyed organisations with the funding to create accessible websites but making the choice not to.
2.1 Barriers to web access
Little research surrounds the effect that disabilities can have in reference to web accessibility. Many and varied conditions can affect website accessibility including, but not limited to; cognitive impairment, motor skill impairment, sensory impairments such as hearing and vision impairment, processing disorders and learning disorders such as dyslexia.
Vassallo (2003) notes a number of common interface design flaws that can have an effect on access for individuals with disabilities including; small fonts, poor contrast backgrounds (either too low or too high), large blocks of text, cluttered pages, animated images or blinking/moving text, automated page or form redirects, excessive use of capitals or italics, fully justified text (resulting in uneven spacing between words); and wordy and confusing use of English.
Assistive technologies designed to boost web accessibility cater to the varied needs of different individuals and different disabilities. Commonly used assistive technologies include; high contrast monitors, low-resolution (high-magnification) monitors, digital Braille devices, screen readers, voice recognition / digital transcribing, low sensitivity input devices, joysticks, track balls and alternate keyboards (manipulated by head movements)
The technologies and development work behind these assistive technologies have a significant effect on how developers and designers create websites. Huang (2002) notes that rules such as using the “ALT” tag when displaying an image, or avoiding calling a button or link “click here”, are considered best practice as for someone using a screen reader, “click here” does not portray context.
2.2 Methods of evaluating accessibility
Methods of evaluating website accessibility broadly fall into three categories; automatic validation / tools, manual evaluation against the WCAGv2 specification, and accessibility testing via a group of test users.
2.2.1 Automatic validation/tools
Automatic validation is by far the simplest and most cost-effective method for evaluating accessibility. Most online automatic validation tools systematically crawl through websites measuring compliance through examining the code structure of the website.
Although this is a very easily implemented and cost-effective strategy, sites that function as applications rather than the more traditional information websites rate poorly. Websites such as Facebook initially open with a login screen asking for a username and password - a common occurrence in restricted web applications. The crawler would not have links to bypass this page, therefore rendering the site non-compliant.
A solution to this may be to temporarily disable web security during the testing and development phases. Another may be to use a client-based validator which will follow a user’s navigation path through the website; however, this process has limitations, as only pages visited by the user will be checked.
The use of automated checkers appears to be an effective method of detecting syntactic errors in coding. Killam and Holland (2001) note that in traditional information-based websites automatic checkers are less likely to miss accessibility issues. However, automated checkers do not detect or warn users about formatting, cascading style sheet, display or colour errors (Rowan et al, 2000). Automated checkers are also known to have difficulties in evaluating non-English websites (Cooper & Rejmer, 2001). None of the currently available tools check reading order, or how the website will be interpreted by a screen reader (Cooper & Rejmer, 2001).
2.2.2 Manual Evaluation against the WCAGv2
The method of manually checking a website against WCAGc2 criteria, although more cost-effective than user testing, requires more labour, in terms of training and implementation, than the use of automated validation. Familiarity with the WCAGv2 and consistency are vital for a person undertaking the role of evaluator as this approach runs the risk of potentially being very subjective. Manual evaluation is likely to identify a wider range of accessibility issues (Lang, 2003), however, it is less likely to highlight usability issues which may prevent users, with or without a disability, from completing their task (Killam & Holland, 2001). It has also been noted that manually checking large number of pages is not practical and can lead to the overlooking of pages or inconsistent criteria (Rowan et al, 2000).
2.2.3 User-Based Testing
User-based testing is generally regarded as the most accurate method of accessibility testing. Although authors debate the specific methodology involved in user testing, the general concept remains consistent: a test group of users systematically work through the website, testing usability and accessibility from their point of view.
As with all testing methods, user-based testing has its limitations, and users are likely to return accessibility issues specifically related to their particular needs. A group of test subjects with vision impairment are likely to focus their feedback around text size and colour contrast whereas a test group consisting of people who have dyslexia are more likely to focus on text content, writing styles and menu systems as possible issues (LaPlant et al, 2001).
Regardless of the nature of test groups, user-based testing is likely to be the most expensive of the three methods and also poses the added challenge of finding a large enough group of diverse, experienced testers to challenge the accessibility of the website. However, this method is an effective way to uncover usability issues that affect all users, both with and without a disability.
2.3 Limitations of the WCAGv2
Colwell & Petrie (1999) investigated the accessibility of web pages developed under the WCAG guidelines. They compared the different web pages in relation to different browsers and screen readers using a test group of 15 users with vision impairment. The results showed that even though the web pages were WCAG-compliant, some major usability issues still persisted. Six out of the 15 users could not view the “ALT” text that was available (this appeared to be linked to the users’ test subjects’ experience). Other results showed that some deviations away from the WCAG guidelines actually improved accessibility.
Colwell & Petrie (1999) remarked that companies following the WCAG guidelines could develop a false sense of security as simply passing the WCAG criteria does not necessarily make a website accessible. As most western countries reference the WCAGv2 as the recognised legal document for website accessibility, this is cause for concern. Rowan et al. (2000) affirm that although guidelines provide a good starting point, common sense and user testing are the most effective way to carry out accessible development. Unreflective adherence to the WCAGv2 or any other guidelines, especially in the dynamic and creative field of website development, will lead to restricted and inferior products (Sloan et al. 2006).
3 Methodology
Assessment criteria were selected to test the compliance with DDA standards of the websites of the top 20 Australian companies and the 20 Australian Federal Government portfolios. Websites were examined manually in order to assess compliance with each of the criteria.
3.1 Selection of websites
The AHREOC (1999) states that, “Equal access for people with a disability in this area is required by the DDA where it can reasonably be provided...” By choosing the top 20 Australian companies, financial hardship as a defence for noncompliance can be eliminated.
Companies can be ranked in a variety of ways including; company wealth (assets), number of employees, turn over, net profit, physical land etc. The Australian Stock Exchange ranks the top 200 traded publically listed companies, however, this measurement has limited validity as it is a measure of stock trading and neglects other influencing factors of size or wealth. Therefore, for the purpose of this paper the top 20 Australian companies will be derived from the Thomson Financials world scope database. The Thomson list is derived from roughly 1,800 publicly traded Australian companies. Companies are ranked into four equally weighted lists of; biggest sales, profit, assets and market value. Companies receive points based on their rank within each category. If a company does not appear in any of the four lists, they will receive no points for that category. Rank positions are then summed to create the final top 20 companies list.
3.2 Assessment criteria
The WCAGv2 covers a wide range of requirements and recommendations for making website content more accessible. These guidelines cover coding, colours, size, accessibility, media, error correction and business logic. Following the WCAGv2 guidelines will ensure content is accessible to a wider range of users including those with disabilities. The guidelines specifically target vision impairment, hearing impairment, learning disabilities, cognitive limitations, physical disabilities, speech disabilities, photosensitivity and combinations of these conditions. WCAGv2 criteria have been written as non-technology-specific testable statements allowing for application across various mediums.
For the purpose of this paper, twelve criteria have been selected directly from the WCAGv2 based on experience and observations of web development industry practices. Although the chosen criteria are based on the WCAGv2, by no means are they a complete substitution for the WCAGv2. This means it is possible for a website to pass all twelve criteria used in this paper and still not meet the WCAGv2 standard. However, if a website fails any one of the chosen criteria, the website has failed to meet the WCAGv2 standard.
3.2.1 Criterion 1 – W3C validation service
Most web documents are written using a markup language such as HTML or XHTML. These markup languages are defined in the technical specifications covered in the International Standard ISO/IEC 15445-HyperText Markup Language and the International Standard ISO 8879-Standard Generalized Markup Language. These technical specifications include detailed rules regarding syntax or grammar in relation to specific elements within a document. These rules include which elements can be contained inside which elements as well as what types of data can be contained inside a specific element.
The W3C markup validation service (http://validator.w3.org/) is a free web application produced by the World Wide Web Consortium (W3C) which allows the user to enter the URL of a publicly accessible website and check whether the website meets the technical specification of the specific markup language. The W3C validator can process documents written in most markup languages including HTML 1.0 – 4.01, XHTML 1.0 and 1.1, MathML, SMIL, SVG 1.0 and 1.1. In addition to being a syntax error detector the W3C validator will check some (but not all) of the accessibility specifications specified by the WCAGv2.
A website will be deemed to have failed on Criterion 1 if the website is found to have any errors after being passed through the W3C validator.
3.2.2 Criterion 2 – Images without “ALT” tags
Section 1.4.5 of the WCAGv2 specifies that websites should not contain images of text, the exception being when the images can be visually customized to the user’s requirements. This one section of the WCAGv2 alone results in non-compliance from nearly every website.
Proceedings of the Thirteenth Australasian User Interface Conference (AUIC2012), Melbourne, Australia
3.2.3 **Criterion 3 – Minimum colour contrast**
Section 1.4.3 of the WCAGv2 specifies that the text on a website should have a contrast ratio of at least 4.5:1 for AA standard and 7:1 for AAA standard. The only exception to this is logos and trademarks, in which no minimum colour contrast applies and large text (18pt and above) in which a lower contrast ratio of at least 3:1 is required.
**Colour brightness formula:**
\[
\frac{((Red\ value\times\ 299) + (Green\ value\times\ 587) + (Blue\ value\times\ 114))}{1000}
\]
**Colour difference formula:**
\[
\left(\max\ (Red\ 1,\ Red\ 2) - \min\ (Red\ 1,\ Red\ 2)\right) + \left(\max\ (Green\ 1,\ Green\ 2) - \min\ (Green\ 1,\ Green\ 2)\right) + \left(\max\ (Blue\ 1,\ Blue\ 2) - \min\ (Blue\ 1,\ Blue\ 2)\right)
\]
For a website to pass criterion 3, the text colour of all text on the home page and the “about us” page must reach at least AA standard by having a brightness difference greater than 125 and a colour difference greater than 500.
*Source:* [http://snook.ca/technical/colour_contrast/colour.html](http://snook.ca/technical/colour_contrast/colour.html)
3.2.4 **Criterion 4 – Text size increase**
Section 1.4.4 of the WCAGv2 specifies that, with the exception of captions and images of text, the user should be able to increase the size of the text by 200 percent without the loss of content or functionality. For the purpose of this test the definition of "loss of content or functionality" will be defined as: the text should be clear to read by not overflowing over another element, background image or other text.
A website will fail on Criterion 4 if, by increasing the text size by 200 percent, there is a loss of content or functionality or if the website has restricted the user from adjusting the text size by using specified font sizes in their style sheets.
3.2.5 **Criterion 5 – Flash / PDFs as content**
The document "Techniques for WCAGv2: Techniques and Failures for Web Content Accessibility Guidelines 2.0" specifies the accessibility best practices for Flash and PDF development. Included in the specification is the requirement that any Flash and PDF text content needs to be accessible for assistive technologies, including but not limited to: Job Access With Speech (JAWS) (4.5 and newer), Window-Eyes (4.2 and newer), Non Visual Desktop Access (NVDA), ZoomText (8 and newer).
For the purpose of this paper, a website will fail against criterion 5 if JAWS 4.5 cannot read any text contained in Flash or PDF documents. In the event that the website does not contain any Flash or PDF documents then the website will be considered to have passed Criterion 5.
3.2.6 **Criterion 6 – Breadcrumbs**
Breadcrumbs are a series of hyperlinks showing the user’s position and history within the website. Section 2.4.5 of the WCAGv2 specifies that there must be more than one way to locate a page within a website, with the exception of pages which are the result of a process. Section 2.4.8 of the WCAGv2 states that the user should be able to easily identify where he/ she is in the website.
For the purpose of this paper a website will be regarded as failing on criterion 6 if it does not display a breadcrumb trail for pages deeper than two levels in the navigation tree.
3.2.7 **Criterion 7 – Time dependent menus**
Principle 2 of the WCAGv2 states that the website’s user interface components and navigation must be operable. Specifically, this paper is assessing the functionality of dynamic menus. Many dynamic menus are built using a timer. Hence, if the user is a slow reader or is unable to move the mouse quickly, timed menus can make a website unusable. To test this criterion, dynamic menus will be navigated by moving the mouse pointer at a slow, uniform speed over the menu. To pass this criterion, a website’s dynamic menus must be operable at slow speed. The website will automatically pass against criterion 8 in the event that the website does not have any dynamic menus.
3.2.8 **Criterion 8 – URL error detection**
Missing pages or 404 errors can be caused by users typing in a webpage URL incorrectly or on occasion poor web content or link management. Section 3.3.3 of the WCAGv2 states that any user input error should be met with a correct usage suggestion. In the situation where a user misspells the “Contact Us” URL, the website should redirect the user to a “Page not found” page which will, in turn, suggest where the user will find the “Contact Us” page. This criterion will be tested through manual attempts to access the “Contact Us” and “About Us” pages by misspeaking the page URL by one character.
To pass against criterion 8 a website will need to either catch the error and provide a URL suggestion, or include a site map in a “Page not found” page. If the website does not catch the 404 error or provide a “page not found” page it will fail against criterion 8.
3.2.9 **Criterion 9 – Page titles**
Section 2.4.2 of the WCAGv2 states that all pages must have meaningful page titles that describe the topic or purpose of the page. This criterion will be tested by navigating through the website and observing whether the page title changes from page to page. A website will fail on criterion 9 if the page titles do not change or if the developer has not specified a page title.
3.2.10 **Criterion 10 – Use of PDF / Flash forms**
PDF and Flash solutions for data entry forms create usability issues for people with text readers or users who require magnification. A website will fail on criterion 10 if the forms used in the “Search” or “Contact Us” functionality are found to be built using flash or PDF technology.
A website will pass on this criterion if there are no forms present on the website or if the forms have been built using traditional HTML.
3.2.11 Criterion 11 - Form sample answers
Section 3.3.5 of the WCAGv2 states that user input forms must contain sample answers, assuming the sample answers do not jeopardize the security or validity of the input / form. To pass against criterion 11, websites will need to have sample answers in the “Contact Us” forms and search forms. In the event that neither form is present the website automatically passes criterion 11.
3.2.12 Criterion 12 - Form validation and bypass
Section 3.3.6 of the WCAGv2 states that all forms must provide error identification / validation. This type of validation is designed to, for example, stop a user from accidentally inputting a letter in a telephone number field, or to warn a user that he/she has entered an incorrect piece of data or omitted data. The WCAGv2 also states in section 2.4.1 that the user should be able to bypass any blocks. An example of a failure to provide a bypass is a website that features a compulsory home telephone number field.
This criterion will be tested in the context of a form on the website. Data that does not correspond to the prescribed fields will be entered and the website will be expected to provide an error message. If the website displays an error message, a bypass route will be sought.
A website will pass against criterion 12 if the form has validation and a bypass mechanism, or if the website does not contain a form. For the purposes of this paper, the authors will recognise the organisation’s contact details as a bypass mechanism.
4 Results
The results are displayed in the tables below and right. Both tables show criteria one through twelve along the top and indicate a pass or a fail of each criteria with a ‘tick’ or a ‘cross’ respectively.
Table 1 shows the largest 20 private Australian companies as derived from the Thomson Financials world scope database represented as A – T.
Table 2 shows the 20 organisations which make up the Australian federal portfolio represented as A – T.
<table>
<thead>
<tr>
<th></th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
</tr>
</thead>
<tbody>
<tr>
<td>A</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>B</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>C</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>D</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>E</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>F</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>G</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>H</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>I</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>J</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Table 1: 20 largest Australian private companies
<table>
<thead>
<tr>
<th></th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>10</th>
<th>11</th>
<th>12</th>
</tr>
</thead>
<tbody>
<tr>
<td>A</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>B</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>C</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>D</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>E</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>F</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>G</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>H</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>I</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>J</td>
<td>x x x x x</td>
<td>x x x x x</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Table 2: Australian federal government portfolio
Discussion
When reviewing the results it is important to remember that the criteria are not comparable, and that individuals with different needs will place different importance on certain criteria. A user who relies on a screen reader will regard criterion one (W3C checker) as of higher importance than colour contrast (criterion 3), however this may not be the same for another user.
The Australian government is in the process of enforcing the WCAGv2, and this is evident with three government portfolios achieving a pass in criterion one. A number of websites, both government and private industry, failed on criterion one with only one or two errors. It is possible that when these websites were developed they did meet criterion one (W3C checker) but through normal content editing and content changing, minor mistakes were made, resulting in the website failing to meet criterion one. Content Management Systems (CMS) have been largely blamed for this; however, it would be unfair to say that this is the CMS’s fault as by and large they are designed to the WCAGv2 specification. A more likely reason for the error is that a content editor has made a process mistake. An example of this would be adding an image without including the “ALT” text: this caused at least two websites to fail criterion one.
This is an issue which can be easily addressed with adequate staff training. Although the authors take issue with the specific level of government dictation and specification, the Canadian system of specifying website management roles (developer, content manager, designer) and assigning legal responsibility has merit. Companies and government departments would benefit from assigning specific individuals the responsibility of maintaining sections of website accessibility.
Criteria 10, 11 and 12 are based around accessibility of web forms. There is no legal requirement for a corporate or a government website to include a “contact us” form and it was noted that the government portfolio websites were less likely to include them. This is a limitation of the methodology used in this paper in that the criteria used rewards websites with less functionality. Because of this it is in an organisation’s interest to limit the use of technically “clever” designs as this increases the likelihood of creating accessibility issues. Taking the example in criterion 11 and 12 surrounding the provision of sample answers to form questions, by providing the example of ‘Joe Bloggs’ it could be argued that the user may be inclined to copy the example rather than entering in their own data, thus raising questions around the validity of the form. The WCAGv2 also instructs that a “bypass” capability should be available for required fields. If taken literally this means that if when asked to confirm a password by typing it a second time it is typed incorrectly, users should be able to bypass the password confirm step. This is an example where following the accessibility guidelines too closely will result in an inaccessible website.
The results show that there is a general trend for federal government websites to be more accessible than websites in private enterprise. Partly this can be explained by federal government’s unwillingness to use “contact us” forms and technically challenging designs. Website accessibility is a complicated problem and is specific to individual users, therefore, as website content keeps changing it will be near impossible to make a completely accessible website. That being said, it is the authors’ belief that there is no excuse for making a website which is syntactically flawed, and that passing the W3C automated checker should become an industry standard.
References
Sloan David, Heath Andy, Hamilton Fraser, Kelly Brain, Pertrie Helen, Phipps Lawrie (2006). Contextual Web Accessibility – Maximizing the benefit of Accessibility Guidelines
VGAT: Victorian Government Accessibility Toolkit, (2011). eServices Unit, Information Victoria, Department of Business and Innovation. Ver 3.1.1
|
{"Source-Url": "http://crpit.com/confpapers/CRPITV126Grantham.pdf", "len_cl100k_base": 8598, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 25638, "total-output-tokens": 9971, "length": "2e13", "weborganizer": {"__label__adult": 0.0006227493286132812, "__label__art_design": 0.00394439697265625, "__label__crime_law": 0.019195556640625, "__label__education_jobs": 0.01551055908203125, "__label__entertainment": 0.0003437995910644531, "__label__fashion_beauty": 0.0005788803100585938, "__label__finance_business": 0.0050201416015625, "__label__food_dining": 0.0005879402160644531, "__label__games": 0.0012569427490234375, "__label__hardware": 0.0021820068359375, "__label__health": 0.006000518798828125, "__label__history": 0.001300811767578125, "__label__home_hobbies": 0.00024116039276123047, "__label__industrial": 0.0006308555603027344, "__label__literature": 0.0013866424560546875, "__label__politics": 0.00688934326171875, "__label__religion": 0.000957489013671875, "__label__science_tech": 0.094482421875, "__label__social_life": 0.0002841949462890625, "__label__software": 0.2149658203125, "__label__software_dev": 0.6220703125, "__label__sports_fitness": 0.00034737586975097656, "__label__transportation": 0.0007100105285644531, "__label__travel": 0.0005407333374023438}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 41409, 0.0365]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 41409, 0.08493]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 41409, 0.89372]], "google_gemma-3-12b-it_contains_pii": [[0, 4716, false], [4716, 10933, null], [10933, 17093, null], [17093, 22909, null], [22909, 28790, null], [28790, 32566, null], [32566, 38378, null], [38378, 41409, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4716, true], [4716, 10933, null], [10933, 17093, null], [17093, 22909, null], [22909, 28790, null], [28790, 32566, null], [32566, 38378, null], [38378, 41409, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 41409, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 41409, null]], "pdf_page_numbers": [[0, 4716, 1], [4716, 10933, 2], [10933, 17093, 3], [17093, 22909, 4], [22909, 28790, 5], [28790, 32566, 6], [32566, 38378, 7], [38378, 41409, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 41409, 0.13636]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
bd9c6a20cce254edd5d9978246595fcb6615ef7a
|
2003
Defining Open Source Software Project Success
Kevin Crowston
*Syracuse University, School of Information Studies*
Hala Annabi
*Syracuse University, School of Information Studies*
James Howison
*Syracuse University, School of Information Studies*
Follow this and additional works at: https://surface.syr.edu/istpub
Part of the Databases and Information Systems Commons
Recommended Citation
This Article is brought to you for free and open access by the School of Information Studies (iSchool) at SURFACE. It has been accepted for inclusion in School of Information Studies - Faculty Scholarship by an authorized administrator of SURFACE. For more information, please contact surface@syr.edu.
DEFINING OPEN SOURCE SOFTWARE
PROJECT SUCCESS
Kevin Crowston, Hala Annabi, and James Howison
School of Information Studies
Syracuse University
Syracuse, NY USA
crowston@syr.edu hpannabi@syr.edu jhowison@syr.edu
Abstract
Information systems success is one of the most widely used dependent variables in information systems research. In this paper, we identify a range of measures that can be used to assess the success of open source software (OSS) projects. We identify measures based on a review of the literature, a consideration of the OSS development process and an analysis of the opinions of OSS developers. For each measure, we provide examples of how they might be used in a study of OSS development.
Keywords: Open source software, software development, system success
Introduction
Information systems (IS) success is one of the most widely used dependent variables in information systems research. Not surprisingly, much attention has been given to how best to measure it (e.g., DeLone and McLean 1992, 2002, 2003; Rai et al. 2002; Seddon 1997; Seddon et al. 1999). In this paper, we identify measures that can be applied to assess the success of open source software (OSS) projects based on a brief review of the literature, a consideration of the OSS development process and an analysis of the opinions of OSS developers. Since project success is likely to be a multidimensional construct, especially for OSS, our goal is to present a range of measures for future researchers to consider. As Seddon (1999) says, “a diversity of IS effectiveness measures is to be encouraged.”
The remainder of this paper is organized as follows. To ground our investigations, we first present the research context, namely OSS development. We then briefly review the literature on IS success to see what measures might be adopted and to identify problems in applying others. We then reconsider the underlying vision of system development to identify additional success measures that might be appropriate for OSS. For each measure, we provide examples of how they might be used in the context of a study of OSS. Finally, we compare our selection of measures to the opinions of OSS developers as expressed on SlashDot, a popular Web-based discussion board (http://slashdot.org/). The comparison suggests additional measures that might be incorporated to develop a fuller understanding of OSS project success. We conclude by making some suggestions for future research.
Research Context
OSS is a broad term used to embrace software that is developed and released under some sort of “open source” license. There are many licenses with a range of different features (Gacek et al., n.d.), but all allow inspection of the software’s source code. There are thousands of OSS projects, spanning a wide range of applications. Due to their size, success, and influence, the GNU/Linux operating system and the Apache Web Server are probably the most well known, but hundreds of other projects are
in widespread use, including projects on Internet infrastructure (e.g., sendmail, bind), user applications (e.g., the GIMP, OpenOffice), programming languages (e.g., Perl, Python, gcc), and games (e.g., Paradise). Much (though not all) open source software is also free software, in two senses: “free as in speech,” meaning that the code may be freely redistributed and reused in other OSS projects, and “free as in beer,” meaning that the software is available for download without charge. As well, many (though by no means all) OSS developers contribute to projects as volunteers without working for a common organization or being paid. As we will see, these two characteristics have implications for the applicability of certain measures of success.
It is important to develop measures of success for OSS projects for at least two reasons. First, having such measures should be useful for OSS project managers in assessing their projects. In some cases, OSS projects are sponsored by third parties, so measures are useful for sponsors to understand the return on their investment. Second, OSS is an increasingly visible and copied mode of systems development. Millions of users depend on OSS systems such as Linux (and the Internet, which is heavily dependent on OSS tools), but as Scacchi (2001) notes, “little is known about how people in these communities coordinate software development across different settings, or about what software processes, work practices, and organizational contexts are necessary to their success.” A recent EU/NSF workshop on priorities for OSS research identified the need both for learning “from open source modes of organization and production that could perhaps be applied to other areas” and for “a concerted effort on open source in itself, for itself” (Ghosh 2002). But to be able to learn from teams that are working well, we need to have a definition of “working well.”
**Literature Review**
OSS is a form of system development, so we begin our hunt for success measures in the Information Systems (IS) literature. Note, however, that we are not attempting an exhaustive review of this extensive literature, but rather are using the literature to identify success measures relevant to OSS. The most commonly cited model for IS success is the one developed by DeLone and McLean (1992, 2002, 2003), shown in Figure 1. This model suggests six interrelated measures of success: system quality, information quality, use, user satisfaction, individual impact, and organizational impact. Seddon (1997) proposed a related model that includes system quality, information quality, perceived usefulness, user satisfaction, and IS use. Taken together, these models suggest a number of possible measures that could be applied to OSS.

**System and Information Quality**
*Code quality* has been studied extensively in software engineering. This literature provides many possible measures of the quality of software including understandability, completeness, conciseness, portability, consistency, maintainability, testability, usability, reliability, structuredness, and efficiency (Boehm et al. 1976; Gorton and Liu 2002). To this list might be added the *quality of the system documentation*. Code quality measures would seem to be particularly applicable for studies of OSS, since the code is publicly available. Indeed, a few studies have already examined this dimension. For example, Stamelos et al. (2002) suggested...
that OSS code is generally of good quality. Mishra et al. (2002) offer an analytic model that suggests factors contributing to OSS code quality, such as number of developers, mix of talent level, etc. The dimension of information quality seems to be less applicable since not many OSS systems include information (data) per se.
**User Satisfaction**
User satisfaction is an often-used measure of system success. For example, it is common to ask stakeholders if they felt a project was a success (e.g., Guinan et al. 1998). There is some data available regarding user satisfaction with OSS projects. For example, Freshmeat, a Web-based system that tracks releases of OSS (http://freshmeat.net/), collects user ratings of projects. Unfortunately, these ratings are based on a non-random sample (i.e., users who take the time to volunteer a rating), making their representativeness suspect. Furthermore, we have observed that the scores seem to have low variance: in a recent sample of 59 projects, we found that scores ranged only from 7.47 to 9.07. It seems likely that users who do not like a piece of software simply do not bother to enter ratings. There do not seem to be any easily obtainable data on the related measures of perceived ease of use and usefulness (Davis 1989). *Opinions expressed on project mailing lists* are a potential source of qualitative data on these facets, although again there would be questions about the representativeness of the data.
In principle, it should be possible to *survey users* to collect their satisfaction with or perceptions of the software. However, to do so properly poses a serious methodological problem. Because most OSS projects are freely distributed through multiple channels, the population of users is unknown, making it impossible to create a true random sample of users. In this respect, OSS differs greatly from information systems developed in an organizational setting that have a clearly defined user population. The situation is also different than for the Web, another non-traditional systems environment, because with a Web site, users are by definition the ones who visit the site, making the population effectively self-identifying. To achieve the same effect for OSS, although it may annoy some users, the best solution might be to build the survey into the software. For example, recent versions of the Mozilla Web browser include a program that offers to report crashes and collect other feedback.
**Use**
Although there is some debate about its appropriateness (DeLone and McLean 2003; Seddon 1997), many studies employ system use as an indication of information systems success. For software for which use is voluntary, as is the case for most OSS, use seems like a potentially relevant indicator of the project’s success. Some interesting data are available. Avery Pennarun’s *Debian Popularity Contest* (http://people.debian.org/~apenwarr/popcon/) collects statistics on the usage of software on Linux machines running the Debian distribution. Users install a program that collects and reports usage information daily and the resulting statistics show which packages have been installed and which of these have been recently used. Unfortunately, these data are collected from a nonrandom sample of machines, running a particular Linux distribution, so the results are likely not representative of use in the broader population.
Rather than measuring actual use, it may be sufficient to count the actual or potential *number of users* of the software, which we label *popularity* (Stewart and Ammeter 2002). For rare projects, these numbers can be directly measured. For example, Netcraft conducts a survey of Web server deployment (http://news.netcraft.com/archives/webserver_survey.html), which estimates the market share of the Apache Web server. Other projects that require some kind of network connection could potentially be measured in the same way (e.g., instant messaging or peer-to-peer file sharing clients), but this approach does not seem to be widely applicable.
A simple measure of popularity is the *number of downloads* made of a project. These numbers are readily available from various sites. Of course not all downloads result in use, so variance in the conversion ratio will make downloads an unreliable indicator of use. Furthermore, because OSS can be distributed through multiple outlets, on-line as well as offline (e.g., on CDs), the count from any single source is likely to be quite unreliable as a measure of total users. A particularly important channel is distributions such as RedHat, SuSE or Debian. Distributions provide purchasers with preselected bundles of software packaged for easy installation and are often sold on a CD-ROM to obviate the need to download everything. Indeed, the most popular software might be downloaded only rarely because it is already installed on most users’ machines and stable enough to not require the download of regular updates. While the inclusion of software in a distribution might confound the usefulness of counts of downloads, the selection processes undertaken by distributions reflect expert judgment on the worth of the packages. Therefore *inclusion in distributions* is an independently important success measure.
Other sources of data reflecting on users are available. Freshmeat provides a popularity measure for packages it tracks, though a better name might be interest, as it is one step further removed from actual use. The measure is calculated as the geometric mean of subscriptions and two counts of page viewings of project information (for the precise details, see http://freshmeat.net/faq/view/30/). Similarly, SourceForge provides information on the number of page views of the information pages for projects it supports.
Finally, it may be informative to measure use from perspectives other than that of an end user. In particular, the openness of OSS means that other projects can build on top of it. Therefore, one measure of a project’s success may be that many other projects use it. Package dependency information between projects can be obtained from the package descriptions available through the various distributions’ package management systems. Analysis of source code could reveal the reuse of code from project to project (although identifying the true origin could be difficult, as SCO’s claim of ownership of parts of the Linux kernel demonstrates).
**Individual or Organizational Impacts**
The final measures in DeLone and McLean’s (1992) model are individual and organizational impacts for the users. Although there is considerable interest in the economic implications of OSS, these measures are hard to define for regular I/S projects and doubly hard for OSS projects, because of the problems defining the intended user base and expected outcomes. Therefore, these measures are likely to be unusable for most studies of individual OSS projects.
**Summary**
To summarize, existing models of information systems success suggest a range of potential success measures for OSS projects as shown in Table 1. However, a number of the measures are inapplicable, while others are difficult to apply in the OSS environment. We note that many of these measures are based on a vision of system development in an organization and do not take into account the unique characteristics of the OSS development environment. Therefore, in the next section, we examine the process of OSS development in more detail to identify additional potential measures of success.
<table>
<thead>
<tr>
<th>Measure of Success</th>
<th>Indicators</th>
<th>Audience</th>
</tr>
</thead>
</table>
| **System and information quality** | • Code quality (e.g., understandability, completeness, conciseness, portability, consistency, maintainability, testability, usability, reliability, structuredness, efficiency)
• Documentation quality | Users, developers |
| **User satisfaction** | • User ratings
• Opinions on mailing lists
• User surveys | Users, developers |
| **Use** | • Use (e.g., Debian Popularity Contest)
• Number of users
• Downloads
• Inclusion in distributions
• Popularity or views of information page
• Package dependencies
• Reuse of code | Developers |
| **Individual and organizational impacts** | • Economic and other implications | Users, developers |
The Process of OSS Development
In this section, we reexamine the vision of systems development underlying DeLone and McLean’s success model to identify additional measures that might be used for OSS project success. DeLone and McLean state that their model was built by considering “a process model [that] has just three components: the creation of a system, the use of the system, and the consequences of this system use” (2002), which we have shown graphically in Figure 2. We note that the measures included in the model focus on the use and consequences of the system (the right side of the figure), and do not open up either box in the process. While this focus may be appropriate given the traditional concern of information systems research with the organizational implication of IS, it seems to unduly restrict the range of measures considered.
The choice of measures also seems to be influenced by the relative ease of access to the use environment compared to the development environment (especially true for packaged or commercial software). In the context of OSS, however, researchers are frequently faced with the opposite situation, in that the development process is publicly visible and the use environment is difficult to study or even identify. For both reasons, we believe that it will be useful to complement existing success measures with ones that take advantage of the availability of data on the development process. The following discussion examines such measures of success, some of which have been previously discussed in the information systems literature.
Measures of the Output of Systems Development
Two of the measures in the DeLone and McLean model concern the product of the systems development process, namely systems quality and information quality. We first consider possible additional measures of this process step.
First, given the large number of abandoned projects (Ewusi-Mensah 1997), simply completing a project may be a sign of success. However, many OSS projects are continually in development, making it difficult to say when they are completed. Faced with this problem, Crowston and Scozzi (2002) instead measured success as the progress of a project from alpha to beta to stable status, as self-reported on SourceForge.
Second, another commonly used measure of success is whether the project achieved its goals. This assessment is typically made by a comparison of the project outcomes with the formal requirements specifications. However, OSS projects often do not have such specifications. Scacchi (2002) examined the process of requirements engineering in open source projects and provided a comparison with traditional processes (e.g., Davis 1990; Jackson 1995). He argues that rather than a formal process, OSS requirements are developed through what he terms software informalisms, which do not result in agreed requirements documentation that could later be analyzed to see whether the project has met its goals. Scacchi’s ethnography suggests that for OSS, goals will likely come from within through a discursive process centered on the developers. Therefore, a key measure for OSS may be simply developer satisfaction with the project, which could be measured by surveying developers. The developer community is much more clearly delineated than users, making such a survey feasible. Indeed, there have already been several OSS developer surveys (e.g., Ghosh 2002; Hertel et al. n.d.), although not on this topic specifically. Since in many projects there is a great disparity in the contribution of developers—a few developers contribute the bulk of the code (Mockus et al. 2000)—it may be desirable to weight developers’ opinions in forming an overall assessment of a project.
Measures of the Process of Systems Development
In DeLone and McLean’s (1992) process model, systems development is implicitly treated as a one-off event. However, for OSS projects (and indeed many other types of projects) development is instead an ongoing activity, as the project continues to release
“often and early” (Raymond 1998). In other words, an OSS project is characterized by a continuing process of developers fixing bugs, adding features, and releasing software. This characteristic of the OSS development process suggests a number of possible indicators of success.
**Number of Developers**
First, since many OSS projects are dependent on volunteer developers, being able to attract developers to a project on an on-going basis is important for their success. Thus the number of developers involved in a project could be an indicator of success. The number of developers can be measured in at least two ways. OSS development systems such as SourceForge list developers who are formally associated with each project. Examination of the mailing lists and other fora associated with projects can reveal the number of individuals who actively participate.
**Level of Activity**
More important than the sheer number of developers is their contribution to a project. Thus the level of activity of developers in submitting code and bug reports may be useful as an indicator of project success. For example, SourceForge computes and reports a measure of project activity based on the activities of developers. Researchers could also examine development logs for evidence of software being written and released.
**Cycle Time**
Another measure related to the group activity is time between releases. In OSS development, there is a strong community norm to “release early and release often,” which implies that an active release cycle is a sign of a healthy development process and project. For example, FreshMeat provides a “vitality score” (Stewart and Ammeter 2002) that assesses how recently a project has made an announcement of progress on the FreshMeat site (http://freshmeat.net/faq/view/27/).
In addition, detailed examination of bug-fixing and feature-request fulfillment activities might yield useful process data indicative of the project’s status. These processes involve interaction with the user community and can involve applying patches of contributed code supplied by non-core developers. Bug reports and feature requests are typically managed through a task-management system that records the developer and community discussion, permits labeling of priority items, and sometimes includes informal voting mechanisms to allow the community to express its level of interest in a bug or new feature.
We are currently exploring an analysis of the time to close bugs (or implement requested features) as a measure of project success. To collect data on bugs, we spidered the bug report data from SourceForge and extracted the length of time taken to fix bugs as well as other characteristics of each bug (priority, issue area, and explicit developer assignment). Our preliminary analysis of the bug and project-level data suggests that this measure shows interesting variance between projects and so may be a useful measure of this dimension of success.
**Project Effects on Projects**
Finally, because the projects are on-going, it seems important to consider the impact of a project on the abilities of the project team itself and its ability to continue or improve the development process. As Shenhar et al. put it, “how does the current project help prepare the organization for future challenges?” (2001).
**Employment Opportunities**
Some literature on the motivation of OSS developers suggests that developers participate to improve their employment opportunities (e.g., Lerner and Tirole 2000). Thus, one can consider salary (Hann et al. 2002) or jobs acquired through the involvement in a particular project as possible measures of success. For example, Hann et al. (2002) found that higher status within the Apache Project was associated with significantly higher wages. Again, one might measure these indicators by surveying developers. While for a single developer, these measure are confounded with innate talent, training, luck, etc., aggregating across many developers and across time may provide a useful project-level measure of success.
Individual Reputation
Similarly, literature also suggests that developers participating in OSS projects are rewarded with reputation in the community, and that this reputation is a sufficient reward for interaction. Kelty (2001) suggests that reputation might be measured through an analysis of credits located in source code (which he terms “greputation”). Alternative measures of OSS reputation might include the OSS communities’ implementation of a “Web of Trust” at the community site Advogato (http://www.advogato.org/trust-metric.html) where developer status is conferred through peer-review. Analyses of this kind of measure face the difficulty of tying the earning of reputation to the success of a particular project.
Knowledge Creation
Projects can also lead to creation of new knowledge for individuals as well as on the group level (Arent and Nørbjerg 2000). Through their participation in a project, individual developers may acquire new procedural and programming skills that would benefit them on future projects. This effect could be measured by surveying the developers for their perceived learning.
In addition, following Grant’s (1996) knowledge-based view of the firm, we view a firm (or in this case, a project) as a structure to integrate members’ knowledge into products. In this view, the project’s rules, procedures, norms and existing products are a reflection of knowledge being created by the project activities. This knowledge creation can be measured by observing and qualitatively analyzing changes in the written rules and procedures over time and may be reflected and transferred through the development of systems for OSS project support, such as SourceForge and Savannah. Analysis of the development of interactions and support systems closely linked to a project might give some insight into this aspect of project success.
Summary
In summary, consideration of the process of developing OSS suggests a number of additional measures indicative of success for these projects. These measures are summarized in Table 2. We note that as the measures move further back in the process model, they become increasingly removed from the user. As such, there may be a concern about their validity as measures of success: Is it a success if a project attracts developers but not users? Or if it develops high quality processes but not high quality code? We have two replies to this concern. First, the apparent disconnect may be an accurate representation of the reality of OSS projects, in which the developers frequently are the users. Second, the measures developed in this section should be viewed as complements to rather than replacements for the more conventional measures of success. Using a variety of measures will provide a richer picture of the status of a project. As well, because many of the measures seem likely to have measurement problems, adopting a portfolio of measures seems prudent.
<table>
<thead>
<tr>
<th>Measure of Success</th>
<th>Indicators</th>
<th>Audience</th>
</tr>
</thead>
</table>
| **Project output** | • Movement from alpha to beta to stable
• Achieved identified goals
• Developer satisfaction | Developers |
| **Process** | • Number of developers
• Level of activity (developer and user contributions, number of releases)
• Time between releases
• Time to close bugs or implement features | Developers, users |
| **Outcomes for project members** | • Individual job opportunities and salary
• Individual reputation
• Knowledge creation | Developers |
Open Source Developer Opinions
In the previous two sections, we developed a list of possible success measures for OSS projects based on a review of the literature and consideration of a simple model of OSS development. To determine whether these measures had content validity as indicators of OSS project success and to identify additional possible measures, we sought input from OSS developers. The goal of the study was to assess the completeness and validity of our list of factors, rather than the relative importance of each one or the relations among them. Therefore, we chose a method to generate a range of ideas rather than one to support statistical inference. In this section, we discuss our data elicitation and analysis techniques and results from the analysis.
Methods and Data
We submitted a question to the moderators of SlashDot (http://slashdot.org/), a Web-based discussion forum. SlashDot attracts interest and participation from OSS developers and users. The question was judged to be of interest to the audience and posted on the front page of the site. This data elicitation technique was more like an on-line focus group (or perhaps the initial stage of a Delphi study) than a survey, as respondents were a nonrandom sample and could see and respond to earlier postings. This approach was chosen to match our goal of generating ideas about success measures, rather than testing a theory or making inferences from generalizable data. To elicit comments, the following question was posted on SlashDot on April 22, 2003 (http://slashdot.org/article.pl?sid=03/04/21/239212):
> There have been a number of discussions on Slashdot and elsewhere about how good projects work (e.g., Talk to a Successful Free Software Project Leader), but less about how to tell if things are going well in the first place. While this may seem obvious, most traditional definitions of software project success seem inapplicable (e.g., profit) or nearly impossible to measure for most projects (e.g., market share, user satisfaction, organizational impact). In an organizational setting, developers can get feedback from their customers, the marketplace, managers, etc.; if you’re Apache, you can look at Netcraft’s survey of server usage; but what can the rest do? Is it enough that you’re happy with the code? I suspect that the release-early-and-often philosophy plays an important role here. I’m asking not to pick winners and losers (i.e., NOT a ranking of projects), but to understand what developers look at to know when things are going well and when they’re not.
The question received 201 responses within a few days.
Participants
Many of the individuals posting answers to our question identified themselves as developers or contributors to OSS projects. As a check on their qualifications, we searched SourceForge (a popular OSS development site) for information about the posters. Although SourceForge and SlashDot are separate sites, many developers have strong attachments to their user IDs and use the same one whenever possible, providing a possible link between the two systems. For example, it seems reasonable to expect that the user ID Abcd1234 identifies the same individual on both systems. We identified the SlashDot IDs of 72 posters who provided useful responses (some responses were anonymous). Of these 72, 34 IDs matched a SourceForge ID exactly, and 6 could be matched with a bit of research (e.g., by matching the real name of the individual; real names are available for a few SlashDot posters and many SourceForge developers). Of the matched IDs, 16 and 3 respectively were listed as members of SourceForge projects (i.e., about half). A few other posters had pointers to non-SourceForge OSS projects on their SlashDot information page or information about their employment, generally as software developers. These data are not conclusive, but do suggest that a number of the contributors to the study had sufficient background as OSS developers to be able to comment knowledgeably.
Analysis
A transcript of responses was downloaded on April 26, 2003, and content analyzed by two coders. The content analysis process was carried out using Atlas-ti, a qualitative data analysis software package. Messages were coded using the thematic unit as the unit of analysis. Once a measure was identified within a message, the coder selected the text containing the measure and coded that text using categories from our coding scheme. A total of 170 thematic units were identified and coded in 91 responses (i.e., some postings contained multiple units; the remaining responses did not contain text addressing the question, e.g., a posting containing an advertisement). The content analysis process employed a mixture of deductive and inductive procedures. The initial content analytic scheme was based on the literature review described above. During the process of content analysis, additional
themes emerged from the data. Saturation was reached through the content scheme presented in Appendix A. The two raters agreed on the codes for 78 percent of the units. We felt that this level of agreement was sufficient for the purposes of the analysis (identification of measures to compare to the literature review), so we did not go on to refine the definitions of codes or retrain the coders to increase agreement.
**Results**
The outcome of the content analysis is summarized in Table 3. The codes were organized into a two-level hierarchy for presentation, with detailed codes (level 2 in the table) clustered into meta-categories (level 1). In all, 32 percent of the units included elements from the developers meta-category, indicating that the respondents felt that a project is successful if the developers are involved, satisfied, enjoyed the process, and that there is a variety of them. The users meta-category also had a large number of responses: 23 percent of units indicated that the poster felt a project was successful if it satisfies users (other than developers) and that the users are involved in discussions and bug reports. Involvement of both users and developers was frequently mentioned, accounting for 31 percent of the units. Project recognition codes were found in 11 percent of the units, exceeding the number of responses indicating use as a measure of success, which accounted for 5 percent of instances. Finally, the product’s quality (13 percent) and process (13 percent) were suggested to be measures of success by respondents as well. Note that percentages are reported only for completeness. Given the nonrandom sample of contributors and the open data elicitation technique, the frequency of a response should not be interpreted as importance. The relative importance of these factors remains to be tested in future research.
<table>
<thead>
<tr>
<th>Level 1</th>
<th>Level 2</th>
<th>Frequency</th>
<th>Percentage</th>
</tr>
</thead>
<tbody>
<tr>
<td>User</td>
<td>Satisfaction</td>
<td>14</td>
<td>8%</td>
</tr>
<tr>
<td></td>
<td>Involvement</td>
<td>25</td>
<td>15%</td>
</tr>
<tr>
<td>Product</td>
<td>Meets requirements</td>
<td>9</td>
<td>5%</td>
</tr>
<tr>
<td></td>
<td>Code quality</td>
<td>11</td>
<td>6%</td>
</tr>
<tr>
<td></td>
<td>Portability</td>
<td>1</td>
<td>1%</td>
</tr>
<tr>
<td></td>
<td>Availability</td>
<td>2</td>
<td>1%</td>
</tr>
<tr>
<td>Process</td>
<td>Activity</td>
<td>5</td>
<td>3%</td>
</tr>
<tr>
<td></td>
<td>Adherence to process</td>
<td>10</td>
<td>6%</td>
</tr>
<tr>
<td></td>
<td>Bug Fixing</td>
<td>4</td>
<td>2%</td>
</tr>
<tr>
<td></td>
<td>Time</td>
<td>2</td>
<td>1%</td>
</tr>
<tr>
<td></td>
<td>Age</td>
<td>1</td>
<td>1%</td>
</tr>
<tr>
<td>Developers</td>
<td>Involvement</td>
<td>16</td>
<td>9%</td>
</tr>
<tr>
<td></td>
<td>Varied developers</td>
<td>2</td>
<td>1%</td>
</tr>
<tr>
<td></td>
<td>Satisfaction</td>
<td>29</td>
<td>17%</td>
</tr>
<tr>
<td></td>
<td>Enjoyment</td>
<td>8</td>
<td>5%</td>
</tr>
<tr>
<td>Use</td>
<td>Competition</td>
<td>4</td>
<td>2%</td>
</tr>
<tr>
<td></td>
<td>Number of users</td>
<td>2</td>
<td>1%</td>
</tr>
<tr>
<td></td>
<td>Downloads</td>
<td>3</td>
<td>2%</td>
</tr>
<tr>
<td>Recognition</td>
<td>Referral</td>
<td>3</td>
<td>2%</td>
</tr>
<tr>
<td></td>
<td>Attention and recognition</td>
<td>9</td>
<td>5%</td>
</tr>
<tr>
<td></td>
<td>Spin offs</td>
<td>6</td>
<td>4%</td>
</tr>
<tr>
<td>Influence</td>
<td></td>
<td>4</td>
<td>2%</td>
</tr>
<tr>
<td></td>
<td>Total</td>
<td>170</td>
<td></td>
</tr>
</tbody>
</table>
Discussion
Overall, the responses of the developers posting on SlashDot were in general agreement with the list of success measures we developed from the literature and our reexamination of the process. The analysis indicates that developers found their personal involvement, satisfaction, and enjoyment to be measures the success of a project, consistent with the view of OSS as “software that scratches an itch.” More interestingly, some new themes did emerge from the coding.
- First, a number of respondents suggested recognition (e.g., mention on other sites) as a measure of project success. Similarly, another suggested measure was the influence of the product or project’s process on other OSS groups and other commercial settings. These responses are consistent with the literature on OSS developers’ motivations that suggest recognition as a primary motivation for involvement.
- A second category that emerged was the level of involvement of the users as indicated by involvement of the users in submitting bug reports and participating in the project mailing lists. We had considered contributions from developers, but these responses reflect the fact that OSS projects are also dependent on help from users to identify problems and post suggestions.
- A final category that emerged from the data was the issue of porting. Developers consider porting of a product to different systems (especially to Windows) and requests for such ports as a measure of the success of the product. This theme might be considered a special case of popularity.
What was also surprising was what respondents did not say, in that respondents did not mention a few of the measures of success we had identified. For example, although several authors have suggested that developers are motivated by the chance to learn and perhaps get a better job, none of the respondents mentioned these factors. A possible explanation is the strong community norm that endorses altruism over expressions of self-interest, which may have restricted discussion in the non-anonymous and community-moderated SlashDot forum.
Conclusion
This paper makes a contribution to the developing body of empirical research on OSS by identifying a collection of success measures that might be applied to OSS. We have identified a range of possible measures by applying a popular model of IS success and by more detailed consideration of the vision of software development underlying that model. Furthermore, we have identified where data is available for particular measures. Finally, we checked our list of factors by comparing it to opinions of community members, which suggested additional measures beyond those we identified and raised questions about others.
We emphasize again that we do not view any single measure as the final word on success. As the measures focus on different aspects of the process, we expect that they will offer different perspectives on the process. Therefore, we suggest using a portfolio of measures, or perhaps developing synthetic measures that draw on different perspectives. Using multiple measures might be particularly interesting for examining how projects change their emphasis from one measure to another at different points in their evolution (Heo and Han 2003).
Acknowledgements
The authors gratefully acknowledge Chengetai Masango for his contribution to the content analysis process.
References
Gacek, C., Lawrie, T., and Arief, B. “The Many Leanings of Open Source,” unpublished manuscript, Centre for Software Reliability, Department of Computing Science, University of Newcastle, Newcastle Upon Tyne, United Kingdom, no date.
Hertel, G., Niedner, S., and Herrmann, S. “Motivation of Software Developers in Open Source Projects: An Internet-Based Survey of Contributors to the Linux Kernel,” unpublished manuscript, University of Kiel, Kiel, Germany, no date.
## Appendix A
### Content Analytic Scheme
<table>
<thead>
<tr>
<th>Level 1</th>
<th>Level 2</th>
<th>Description</th>
<th>Examples</th>
<th>Responses</th>
</tr>
</thead>
<tbody>
<tr>
<td>User</td>
<td>Satisfaction</td>
<td>Users of the product satisfied (code serves their need)</td>
<td>Every piece of software has an intended client, user or audience.</td>
<td>Are the users happy, overall?</td>
</tr>
<tr>
<td></td>
<td>Involvement</td>
<td>Users of the product are involved and interested, by submitting to mailing lists or bug reports or other forms of contribution</td>
<td>Second: I’ve had dozens of email’s asking for support as well as asking how to contribute. Traffic: both developer and user. Is there a relatively continuous level of input/interest in the project? If developers don’t want to develop, and users don’t want to use, it’s probably going nowhere, even if it’s the best thing since the BeOS. More activity in a mailing list usually indicates the size/success of a project. Small error in the makefile which causes something liek [sic] 50% of people [to] come back for help on compiling it. This gives me [a] pretty good estimate of how many people are actually using the package.</td>
<td></td>
</tr>
<tr>
<td>Product</td>
<td>Meets requirements</td>
<td>The product meets the requirement of design</td>
<td>How well does it fit the need for which it was designed?</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Code quality</td>
<td>Code structure and documentation is organized, clear, maintainable</td>
<td>Does what it is supposed to do, cleanly and efficiently, then by definition it is successful. It is well documented and maintained. Is the code maintainable? Take a look at the end product. Does it do what it’s supposed to without too many bugs? Stable in relation to the time invested.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Portability</td>
<td>Software portable to and compatible with other systems and programs</td>
<td>Successful Open Source Software tends to have a greater scope of use than it’s [sic] original conception. The programs I find myself using are programs that can interact with each other in a modular fashion; whether that be thought [sic] a piped command, or simply support for “generic” file formats (such as XML, CSV, etc. etc). Win32 port: Win32 port of a project.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Availability</td>
<td>The product is available through a number of avenues</td>
<td>Availability around the Internet (if your program is on say on most of the distributions).</td>
<td></td>
</tr>
<tr>
<td>Level 1</td>
<td>Level 2</td>
<td>Description</td>
<td>Examples</td>
<td>Responses</td>
</tr>
<tr>
<td>---------</td>
<td>---------</td>
<td>-------------</td>
<td>----------</td>
<td>-----------</td>
</tr>
<tr>
<td><strong>Process</strong></td>
<td>Activity</td>
<td>The project is active; fixing bugs, writing updates, documentation and releases</td>
<td>If there hasn’t been an update to it in like 2 years, then chances are, unless it was perfect the first time around, it will fail.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Adherence to process</td>
<td>The project has goals and objectives and have an established process that members adhere to</td>
<td>How is the process? Are there goals and are they being met? How is testing coverage and how often is testing being done? Milestones: establish concrete goals when you start the project, along with a timeline. Many goals, and projects evolve</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Bug fixing</td>
<td>Bug reports are attended to and fixed in reasonable time</td>
<td>Are issues being addressed in a timely manner?</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Time</td>
<td>How established is the software and how often do they release new features</td>
<td>Time is where you can measure your progress. This is where you can do things like determine milestones, develop feature lists and so on, then during the project you have a standard to compare yourself to.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Age</td>
<td>How long has the group been active</td>
<td></td>
<td></td>
</tr>
<tr>
<td><strong>Developers</strong></td>
<td>Contribution</td>
<td>There are a number of developers contributing to the project</td>
<td>Second: I’ve had dozens of email’s asking for support as well as asking how to contribute. Traffic: both developer and user. Is there a relatively continuous level of input/interest in the project? If developers don’t want to develop, and users don’t want to use, it’s probably going nowhere, even if it’s the best thing since the BeOS.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Varied developers</td>
<td>Developers from different projects, having different expertise contribute</td>
<td>Software developers!</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Satisfaction</td>
<td>Developers satisfy their need to innovate and develop code</td>
<td>Open source is scratching an itch, right? Is the itch scratched? If yes, then it’s [sic] a success.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Enjoyment</td>
<td>Developers enjoy working on the code and with the group</td>
<td>Do you enjoy working on it? Then it’s successful.</td>
<td></td>
</tr>
<tr>
<td><strong>Use</strong></td>
<td>In relation to competition</td>
<td>Replaced competitive products</td>
<td>So clearly and overwhelmingly superior to its predecessors that it supplanted them.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Number of users</td>
<td>How many users are using the product in addition to the developers</td>
<td>Are there more people using the project than developers? If so, it’s successful. If a project has no user base, then it is doomed to fail. How is a project going to succeed without a user base.</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Downloads</td>
<td>How many downloads of the product</td>
<td>Can usually judge the success of your project by counting downloads from your site. First: I’ve had hundreds of downloads, and since I run this project on a Cable Modem connection, my ISP hasn’t become unhappy :)</td>
<td></td>
</tr>
<tr>
<td>Level 1</td>
<td>Level 2</td>
<td>Description</td>
<td>Examples</td>
<td>Responses</td>
</tr>
<tr>
<td>---------</td>
<td>---------</td>
<td>-------------</td>
<td>----------</td>
<td>-----------</td>
</tr>
<tr>
<td>Recognition</td>
<td>Referral</td>
<td>Other sites, projects, organizations recognize and refer to the project</td>
<td>Links to site: and Third and finally (I think this one is a very good indicator): There are other websites out there that link to my site. Oh, and there’s a fourth optional measure of success...more for bragging rights...my site is THE FIRST result when querying google with “Java X10.” Use Google to see how often the name of the project comes up. Discussion in Google groups is also a good sign.</td>
<td></td>
</tr>
<tr>
<td>Attention</td>
<td></td>
<td>The project attracted negative or positive attention from other institutions</td>
<td>You have been sued by a huge mega corp with a team of lawyers over patent infringement and the EFF comes to your rescue. Stallman demands that people call it GNU</td>
<td></td>
</tr>
<tr>
<td>Spin offs</td>
<td></td>
<td>New projects or spins off original project</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Influence</td>
<td></td>
<td>Other projects adopt code or process from the project</td>
<td>Also adoption by other developers into the development group shows others are interested, so you must be doing something right.</td>
<td></td>
</tr>
</tbody>
</table>
|
{"Source-Url": "https://surface.syr.edu/cgi/viewcontent.cgi?article=1080&context=istpub", "len_cl100k_base": 10086, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 36051, "total-output-tokens": 11730, "length": "2e13", "weborganizer": {"__label__adult": 0.0003457069396972656, "__label__art_design": 0.0002982616424560547, "__label__crime_law": 0.000301361083984375, "__label__education_jobs": 0.0034542083740234375, "__label__entertainment": 6.61015510559082e-05, "__label__fashion_beauty": 0.00012195110321044922, "__label__finance_business": 0.0007534027099609375, "__label__food_dining": 0.00031375885009765625, "__label__games": 0.0006880760192871094, "__label__hardware": 0.00038743019104003906, "__label__health": 0.0003170967102050781, "__label__history": 0.00020766258239746096, "__label__home_hobbies": 8.994340896606445e-05, "__label__industrial": 0.0002073049545288086, "__label__literature": 0.0003662109375, "__label__politics": 0.0002446174621582031, "__label__religion": 0.0002770423889160156, "__label__science_tech": 0.005283355712890625, "__label__social_life": 0.0001398324966430664, "__label__software": 0.0096435546875, "__label__software_dev": 0.97607421875, "__label__sports_fitness": 0.00018453598022460935, "__label__transportation": 0.0003008842468261719, "__label__travel": 0.0001423358917236328}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 53089, 0.01884]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 53089, 0.22999]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 53089, 0.93118]], "google_gemma-3-12b-it_contains_pii": [[0, 938, false], [938, 3927, null], [3927, 7508, null], [7508, 12781, null], [12781, 16348, null], [16348, 20395, null], [20395, 24476, null], [24476, 27994, null], [27994, 32918, null], [32918, 36562, null], [36562, 40228, null], [40228, 45316, null], [45316, 49174, null], [49174, 51945, null], [51945, 53089, null]], "google_gemma-3-12b-it_is_public_document": [[0, 938, true], [938, 3927, null], [3927, 7508, null], [7508, 12781, null], [12781, 16348, null], [16348, 20395, null], [20395, 24476, null], [24476, 27994, null], [27994, 32918, null], [32918, 36562, null], [36562, 40228, null], [40228, 45316, null], [45316, 49174, null], [49174, 51945, null], [51945, 53089, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 53089, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 53089, null]], "pdf_page_numbers": [[0, 938, 1], [938, 3927, 2], [3927, 7508, 3], [7508, 12781, 4], [12781, 16348, 5], [16348, 20395, 6], [20395, 24476, 7], [24476, 27994, 8], [27994, 32918, 9], [32918, 36562, 10], [36562, 40228, 11], [40228, 45316, 12], [45316, 49174, 13], [49174, 51945, 14], [51945, 53089, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 53089, 0.26977]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
c5dc86b51f601e2bc287b094097c82aa37ab387c
|
EbbRT: Elastic Building Block Runtime - overview
Schatzberg, Dan
Computer Science Department, Boston University
Schatzberg, Dan; Cadden, James; Dong, Han; Krieger, Orran; Appavoo, Jonathan. EbbRT: Elastic Building Block Runtime - Overview. Technical Report BU-CS-TR 2015-005, Computer Science Department, Boston University, May 1, 2015.
http://hdl.handle.net/2144/21764
Boston University
EbbRT: Elastic Building Block Runtime - Overview
Dan Schatzberg, James Cadden, Orran Krieger, Jonathan Appavoo
Boston University
1 Introduction
Infrastructure as a Service (IaaS) provides a developer the ability to construct applications that dynamically acquire and release potentially large numbers of raw virtual or physical machines (nodes). The Elastic Building Block Runtime (EbbRT) is a new runtime for constructing and retro-fitting applications to take advantage of this opportunity.
EbbRT is a realization of the MultiLibOS model[?]. This model is based on the simple idea that not all IaaS nodes, used by a single application, need a general purpose OS. Rather, an asymmetric system software structure can be adopted where an application is distributed across a mix of general purpose OSs and specialized library OSs. The general purpose OS nodes support complete OS functionality and legacy compatibility. The rest of the nodes execute simple, customized, library operating systems that support a single application operation.
EbbRT provides a lightweight runtime that enables the construction of reusable, low-level system software which can integrate with existing, general purpose systems. It achieves this by providing a library that can be linked into a process on an existing OS, and as a small library OS that can be booted directly on an IaaS node.
The two core primitives that EbbRT provides are:
- **Events**: A lightweight, non-preemptive execution model that allows for an event-driven programming style to map directly to hardware interrupts.
- **Elastic Building Blocks**: An object oriented programming model that separates interface design from distributed implementation which enables the construction of composable and reusable software.
Context for EbbRT and our choice of primitives is presented in section 2. Section 3 provides an overview of EbbRT’s architecture and describe our prototype. Section 4 evaluates the prototype using three use cases, namely: 1) an EbbRT implementation of memcached[15], 2) a port of the V8[17] javascript engine and node.js[20], and 3) the integration of an elastically allocated distributed matrix object into the Sage[1] environment. These use cases demonstrate that EbbRT:
- A. enables applications to achieve high performance by customizing low-level system software,
- B. can support rich complex applications and run times, and
- C. allows an application to be modified incrementally to exploit the elasticity and scale of an IaaS.
2 EbbRT Context
EbbRT’s value and novelty lies in its unique combination of ideas from prior work. Specifically, EbbRT draws from work on library OSs, event driven software, and the use of partitioned object models in both multiprocessor and distributed systems software construction. In this section we provide the context for each and state how it is realized in EbbRT.
2.1 Library OSes
Library operating systems[14] organize a single application and the OS functionality it requires into a single address space and protection domain. The application code directly links to the OS code and invokes it via a standard function call. Library OSs enable reductions in overheads and the opportunity to specialize and tailor system functionality and interfaces for a particular application’s needs.
In recent years, several efforts have explored how virtualization can be leveraged to provide benefits by directly executing applications in their own VMs linked with a library OS [9, 8, 10, 26, 32, 22, 11, 30, 25, 29, 3]. These benefits range from improved security to higher performance. The basic approach is to extract out a particular function of an application and run it along with a library OS in its own virtual machine.
Generally, library OSs provide some level of ABI[32, 10] or API[26, 22, 25, 30, 3] compatibility. This has been done in three ways: 1) supporting C and C++ standard libraries, 2) porting of managed language runtimes such as Java[3, 22] and Ocaml[26] and/or 3) using a shim layer to forward system calls to a instance of a standard OS running in a different VM.
**EbbRT:** EbbRT provides a distributed runtime which allows processes of general purpose systems to launch back-end nodes running a lightweight library OS. The runtime allows for function offloading from the library OS to the general purpose system and vice-versa. From a users perspective an EbbRT application appears like any other process that is launched, owned and managed by the user. This front-end process serves both as the user’s access point to the application, and also as the access point for the back-end nodes to the front-end OS’s resources such as files and external I/O channels. There are cases under which an EbbRT application might exploit more than one front-end node to reduce contention and improve fault tolerance. Our current work, however, focuses on the case of an application having a single front-end and front-end process.
EbbRT exploits library OSs for the back-ends to allow application and hardware specific optimizations. In particular, the event and Ebb primitives described in the next sections can interact with the hardware at a very low level. All services implemented in the library OS can be tuned to the specific needs of the application.
The EbbRT library OS is distributed with a port of the C and C++ standard libraries. OS functionality is provided to these libraries by a set of manually constructed functions that invoke methods of EbbRT components. These components can be implemented to communicate with the front-end to alleviate the burden for native local implementation where appropriate. While labor intensive, this approach to compatibility is tractable for supporting managed runtime environments as demonstrated by our port to the node.js runtime.
Other library OSs [3, 26] have been developed to be deployed on a cloud, but EbbRT is the first distributed library OS we are aware of. Other research groups exploring new operating systems for the cloud [34, 39] are not focused on a library OS model. We believe that the asymmetric model adopted by EbbRT, that includes both general purpose and library OSs, is both unique and critical to allowing us to aggressively explore new technologies while supporting real applications.
### 2.2 Event Driven Software
Event driven architectures and associated programming models are designed to reflect and enable applications that must respond to asynchronous actions. Typically, this is done with a callback model such that when an action occurs, a programmer specified routine is invoked by the system in response.
Hardware inherently supports an event driven model through its interrupt and exception support. As such, the lowest level software of most operating systems is written in an event driven manner directly on top of the hardware mechanisms. Operating system research has also explored how systems software can be better structured to directly support network based application processing which is inherently event driven[38, 23, 28].
The suitability of event driven programming to network application programming has made it popular for cloud and internet applications, so much so that the legacy process and thread models of commodity OSs are often abandoned in favor of lighter weight user-level primitives for supporting event driven programming via some form of explicit stack switching. Similarly, many user-level libraries such as Boost.ASIO and libuv have been developed to ease the burden of writing portable event driven applications on top of commodity OS features. Further, web application runtimes and languages have widely embraced event driven models and incorporated features such as promises and anonymous functions to better facilitate the use of continuations that are often required when programming in an event driven fashion.
**EbbRT:** EbbRT supports a non-preemptive event driven execution model. Not only does this match the trends of IaaS application programming, but also, it allows for a lightweight implementation which maps directly to hardware mechanisms. Hardware interrupts cause application event handlers to be invoked. Event handlers run to completion with interrupts disabled. This allows application software to execute directly off of hardware interrupts without the need for thread scheduling and context switches. In order to support blocking programming interfaces, software can voluntarily yield the processor, saving its state, in order to dispatch further events. In contrast to other event driven operating systems [37, 2], where continuations required by an event driven system added tremendous programmer complexity, we make extensive use of C++11 language features, such as lambdas, to reduce programmer complexity.
This execution model allows for a number of optimizations. Per-core data structures can be reused across many events without the need to synchronize due to the lack of pre-emption. Additionally, because interrupts are only
enabled at the termination of an event-handler, state does not need to be saved when an interrupt occurs. At a larger level, much of the complexity of a scheduling infrastructure is avoided, allowing applications to easily control event execution.
The lack of preemption means that a long running event will make the system non-responsive to new events. Software developed directly to the base event model needs to be carefully designed to avoid this. We have so far found it natural to implement the core system software under this constraint. In scenarios where a more complex execution model is required, the event infrastructure can serve as a natural foundation for threads and schedulers to be constructed [4].
### 2.3 Partitioned Object Models
The development of high-performance, parallel software is non-trivial. The concurrency and locality management needed for good performance can add considerable complexity. Prior work has demonstrated that a partitioned object model can facilitate the construction of parallel system software, both for distributed and shared memory systems. In a partitioned object model, an object is internally composed of a set of distributed representatives. Each representative locally services requests, possibly collaborating with one or more other representatives of the same partitioned object instance. Cooperatively, all the representatives of the partitioned object implement the complete functionality of the object. To the clients of a partitioned object, the object appears and behaves like a traditional object.
The distributed nature of partitioned object models make them ideally suited for the design of both multi-processor and distributed system software, which often requires a high degree of modularity and yet benefits from the sharing, replicating and partitioning of data on a per-resource (object) basis. Fragmented Objects(FOs) [12, 27, 35] and Distributed Shared Objects(DSOs) [7, 18] both explore the use of a partitioned object model as a programming abstraction for coping with the latencies in a distributed network environment, LAN and WAN respectively. Clustered Objects[16, 24, 5] demonstrated the effectiveness of a partitioned object model in the construction of multi-processor operating systems.
**EbbRT:** Core to EbbRT is a partitioned object model called Elastic Building Blocks (Ebbs) that provide a model for software components to independently and elastically expand to react to system-wide demand. An Ebb is associated with its Ebblid, a system wide unique identifier. When a client invokes an interface of an Ebb, the request is directed to a per-core representative which may communicate with other representatives on other cores or nodes within the system to fulfill the request. EbbRT puts no restrictions on how the representatives of an object must communicate or organize themselves; allowing Ebbs to be used for a wide range of different software components.
Object invocations are directed efficiently to a representative by exploiting a virtual memory region backed by different physical pages on each core. Representatives are created on demand. When a request is made to a non-existent representative a programmer specified fault handler is invoked in order to construct it.
### 3 Architecture and Prototype
In this section we present the architecture of EbbRT and our prototype of it. The final subsection discusses an example code fragment from our prototype to clarify and illustrate salient concepts and features.
#### 3.1 Architecture
As discussed, EbbRT is structured as a MultiLibOS and supports a single instance of an application distributed across a set of IaaS provided nodes. Figure 1 illustrates the three layers of the EbbRT software architecture. The lowest layer provides the base software mechanisms for constructing the address spaces that the Ebb application will run within. The rest of the EbbRT software, illustrated in the top two layers, takes the form of Ebbs and execution occurs on light-weight non-preemptive events. The system base Ebb layer is mandatory and provides the support for Ebbs, events and off-node communication. While the Ebbs in this layer are mandatory, the implementations themselves can be customized for any par-

ticular application’s needs or hardware features. Collectively, these Ebbs define the base interfaces and function of EbbRT, everything else is specific to an application and linked in as necessary. While implementations may differ, the same interfaces are provided on the front and back ends.
### 3.1.1 Base Software mechanisms
In the case of a front-end, a standard user process, linked to the EbbRT library, serves as the EbbRT address space on the node. Back-ends, however, use custom boot images that contain the base EbbRT Library OS software that bootstraps the node and maps all the nodes resources to a single virtual address space running with ring zero privilege. All physical memory is identity mapped into this address space.
The base software mechanisms include interfaces for establishing flexible virtual mappings. This includes, for example, arbitrarily large virtual memory regions to be used for stacks, core-specific memory regions, and the ability for applications to specify their own fault handler for these regions. A full discussion of the base software mechanisms is out of scope for this paper.
### 3.1.2 System Base Ebbs
The EbbRT system is fundamentally composed of a set of Ebb instances that provide interfaces that additional Ebbs can be developed to. A single well known static instance of each of these Ebbs is provided when the application is initialized. In general, they are fully replicated and their internal representative construction happens on demand as nodes are added to the application and the instances themselves are accessed on a particular node. Below we briefly describe the role of each.
**Memory Allocator:** On back-end nodes after the base EbbRT Library OS initializes the virtual memory subsystem the Memory Allocator is initialized to serve as the general purpose memory allocator. The C and C++ runtimes are configured to use it.
**Event Manager:** This Ebb is responsible for providing the event interfaces and implementing a basic non-preemptive event loop per-core. It does not provide threads but rather a set of interfaces for specifying callbacks to execute in response to hardware events, in the form of interrupts, and software events. Software events are function calls that can, but do not have to, execute asynchronously with the caller. Events run to completion unless they manually switch stacks via calls to Event Manager methods.
**Ebb Allocator and Local Id Map:** These two Ebbs provide the base support for additional Ebbs to be created. On each node the the Ebb Allocator manages a range of Ebbs that are known to be unique and can be used to identify a single instance of an Ebb. The Local Id Map can be used by an Ebb instance to store common data to all of its representatives on a node.
**Network Manager and Messenger:** These two Ebbs provide a basic set of communication facilities so that cross node communication is possible. In particular, the Network Manager provides an event driven interface to the networking facilities of the local node. The Messenger uses the Network Manager to provide an interface for sending a message to an Ebb on a particular node. The arrival of a message can cause a representative to be created on that node.
**Global Id Map:** This Ebb provides an application wide table that serves as a place for Ebb instances to store data accessible across all nodes of the system. When an Ebb is instantiated, it can place data into the Global Id Map that can be used to construct representatives on an arbitrary node. In particular when an Ebb is first accessed on a node and it can not find information in the Local Id Map, the Ebb can then consult the Global Id Map. The data obtained can be used to populate the accessing node’s Local Id Map which in turn can be used to construct representatives of the Ebb on that node as needed.
**Node Allocator:** This Ebb provides an interface to the rest of the Ebb software for acquiring and booting a node. Its implementation is specific to a particular IaaS’s interfaces and performance characteristics. While on a commodity IaaS it can take tens of minutes to provide a node to a client, at least one IaaS is capable of providing hundreds of physical nodes to a client in sub-second time frames[6]. The Node Allocator can bridge this gap by internally creating a free pool of pre-allocated nodes loaded with a special image that puts the node into a dormant state waiting to be released for application use.
### 3.1.3 Application Specific Ebbs
Above the base EbbRT layer, arbitrary application specific Ebbs can be constructed. A critical goal of the architecture is to permit a high degree of specialization and customization for an application’s needs through composition and configuration.
EbbRT’s component architecture was chosen to make it viable to construct reusable libraries of Ebbs and its fine-grain decomposition provides many degrees of freedom to customize even its most basic functionality such as the event processing loop and interrupt dispatch by implementing an application customized implementation of the
Event Manager. Keeping with this compositional theme, EbbRT expects many traditional features of a library OS to be provided as independent libraries of Ebbs. This includes things like additional device support, files, network protocols and abstractions. Similarly, the enablement of libraries of application Ebbs that provide application specific Ebbs such as scalable and elastic matrices, are a core value of the architecture. The final runtime structure of an Ebb application should be a composition of Ebb instances that are solely focused and necessary for the application specific processing that is to be done.
3.2 Prototype
Our EbbRT prototype consists of a main body of C++ software from which two libraries are generated. The source is composed of approximately 9600 lines of C++ and 330 lines of assembly code. One library generated is a standard Linux library. This front-end library can be linked either statically or dynamically to a Linux application. The other library is a x86-64 custom EbbRT back-end library that can be used to create a boot image that contains the EbbRT library OS and can be launched in a KVM virtual machine. All software targeting the EbbRT library OS is built using a port of the GNU C++ toolchain.
In order to explore EbbRT, we have constructed a simple synthetic IaaS that launches KVM instances. Using our IaaS interface, a user can dynamically acquire nodes and boot them with arbitrary images. All nodes of a particular user are placed on a user specific private virtual network. In our prototype, the Node Allocator is a simple implementation that just calls out to our IaaS daemon. This daemon launches KVM virtual machines to boot with the specified image and set of arguments.
The Global Id Map of our prototype is a very simple centralized implementation where the representative on the launching front-end maintains the entire hash table. We expect other implementations of the Global Id Map to take on a much more robust and complex structure. In some scenarios a Chord [36] or Zookeeper [19] based implementation is likely to be appropriate.
The Memory Allocator implementation has been implemented based on the SLQB design[31]. It is naturally realized as an Ebb given its per-core design. We choose SLQB for its multi-core and NUMA friendliness and expect it to be a good match for multi-core optimized Ebbs. It is perfectly reasonable, however, for alternative Memory Allocator implementations to be developed as the need arises.
As illustrated by our case studies (see 4.1) our prototype Event Manager is simple but effective. We expect that a wide range of Event Manager variants be useful in tightly tailoring the event loop to an application’s specific needs beyond the ones that we have explored so far. While our prototype implementation lacks preemption and threading, our design allows for the Event Manager implementation to be evolved to serve as the foundation layer for scheduler activation[4] inspired Ebb libraries that provide pre-emption and threading as needed.
The Network Manager and Messenger implementation in our prototype have been deeply influenced by our current use of traditional Ethernet and IP based communication.
Finally, the prototype, as stated above, is realized on KVM. Nothing precludes EbbRT from running on a physical host and we expect that as IaaS providers evolve to hardware systems that make physical provisioning viable, our prototype can be modified and provide even greater value.
3.3 Example
We conclude our discussion of the EbbRT architecture and our prototype with an example. The top of Figure 2 presents a fragment of code that spawns a new software
event to be executed. To do so, the Spawn method of the Event Manager Ebb is invoked. This code fragment can be run in any EbbRT address space, front or back end, of an application.
The Event Manager is accessible via the global symbol `ebrt::event_mgr`. It's value is the EbbID of the single statically provided Event Manager instance. Every Ebb instance must have a unique EbbID that is obtained either statically or dynamically from the Ebb Allocator. The Ebb Allocator manages the EbbIds values to ensure uniqueness. In our prototype, we utilize the C++ support to override the dereference operator (\(\rightarrow\)) to invoke logic that translates the EbbID to a representative pointer.
The middle of Figure 2 illustrates the logical structure of the Event Manager. It is a fully replicated Ebb having one representative per-core, per-node. When the Spawn method is invoked the Ebb's dereference operator automatically invokes it on the representative associated with the core executing the call. The bottom of Figure 2 illustrates how this structure is realized by the Event Manager implementation using the Ebb infrastructure.
Within each EbbRT address space is a region of per-core translation memory that appears at the same virtual address but is backed by per-core physical memory\(^1\). An EbbId translates to an offset in the translation memory region. On each core, the location in the translation memory associated with the Event Manager's EbbID caches a pointer to the core specific representative. The Event Manager uses the Local Id Map to map it’s EbbID to a single instance of a node specific data structure. In the case of the Event Manager the data structure contains two maps, a replist and a nodelist. The former is a master list of representatives that exist on a node and the cores they map to. And the latter records the network addresses of all the nodes on which the Event Manager has representatives. Finally, the Event Manager, during initialization of the application, places in the Global Id Map the network address of a node that serves as the home node. The home is responsible for maintaining the master nodelist which is cached as needed on the other nodes.
Using this logical structure and layout in the Ebb infrastructure, the representatives of the prototype Event Manager implement the Event Manager interfaces. In the code fragment, we see the Spawn interface being invoked. This method specifies a function to be dispatched as an event. The default Event Manager behavior is for this function to be synchronously invoked on its own stack while the caller’s stack is placed aside. If this function or any of its subsequent functions attempt to block via other methods provided by the Event Manager, the calling stack will be reinstated and execution of the caller will be resumed at the return of Spawn. In this fashion, the Event Manager implements a form of manual handoff scheduling to spawned events. From the programmer’s perspective, however, spawned functions should be assumed to be asynchronous with respect to the caller of Spawn. Explicit spawn interfaces are provided that allow the programmer to force the function to be asynchronous in which case the Event Manager will place it on a list to be dispatched from the Event Managers event loop when execution returns to it.
In the case of the example code listed, the function to be executed is an anonymous function or more precisely a C++11 lambda. The syntax allows the code for the function to be specified in-line and provides the ability to create a closure that captures values that are in scope when the Spawn method is invoked. The EbbRT prototype makes frequent use of lambdas to simplify continuation based programming. In this case the function to be invoked calls three methods of the Event Manager: GetNum, GetNumNode and GetNumCore, they respectively return the number of functions that have been dispatched across the entire application, on this node and on this core by the Event Manager.
By exploiting a fully replicated structure, the Event Manager representatives by default implement the Spawn and event loop on a per-core basis using its own member data structures. As part of dispatching functions, the representative maintains a counter that it increments and thus tracks the number of functions called on this core. Given the non-preemptive nature of execution and per-core representative structure operations on these counters do not need to be synchronized. Additionally features of the memory allocator are used to ensure that the data structures of each representative are on distinct cache lines. This ensures that there is no false sharing between the counters, and thus good performance and scalability will be achieved on event dispatch while accurate counts are maintained. To implement the various GetNum operations the representatives do gathers as necessary using the replist and nodelist maintained on each node, and sending messages as necessary.
There are of course several alternative approaches to organizing representatives in the infrastructure. For example rather than, or in addition to, maintaining master replists and a master nodelist, the representatives could contain pointers that link them in to a ring and similarly the nodes could contain network addresses of neighbors to also form a ring. This flexibility exists to allow Ebb's to utilize the infrastructure in a manner that is most
\(^1\)On front-ends thread local storage (TLS) is exploited to provide similar function albeit with greater overhead.
appropriate for its needs, the application it designed to serve, and features of IaaS interconnection networks.
4 Evaluation
We evaluate and explore the EbbRT prototype through three case studies that evaluate and demonstrate different aspects of the system. The first use case, memcached, demonstrates the performance potential possible with our approach. The second, node.js, discusses our experience porting a rich managed runtime and demonstrates the viability of supporting rich unmodified applications. The third, Sage, demonstrates the value of the asymmetric model, allowing software packages to be incrementally modified to exploit the elasticity and scale of IaaS environments.
4.1 Memcached
This case study describes a memcached[15] server, implemented with EbbRT, to produce a bootable image. This use case demonstrates that EbbRT can be used to enable very simple application code to fully exploit the (virtualized) hardware and illustrates the use of the event driven execution model for a supporting a cloud application.
Memcached implements a simple key-value store. It is designed to be highly performant, and has become a common benchmark in the examination and optimisation of networked systems. It has also been shown by previous work to incur significant OS overhead [21], and hence is a natural target for a library OS.
4.1.1 Implementation
The back-end EbbRT memcached server is a simple single-core application that supports the standard memcached binary protocol. Our implementation is only 277 lines of original C++ code [40]. To a developer with knowledge of the EbbRT interfaces, this basic application can be developed in a single afternoon.
The lower portion of Figure 3 illustrates how the EbbRT library OS internals interact with the memcached application. At the bottom the Network Interface Card (The VirtIO Net paravirtualized device) deposits Ethernet frames into memory buffers. When a buffer is written to, the device marks an associated descriptor as dirty. When all buffers are used, the device will drop new Ethernet frames received.
In the steady higher load states the EbbRT network device driver Ebb (shown at the bottom of the diagram) uses a re-occurring idle event, this event runs when no other events exist. This poll event inspects the device state to check for used buffers. If none are found, interrupts are enabled on the device and the idle event handler is unregistered. If a used buffer is found, a descriptor to the buffer is passed to the Network Manager for further processing.
The memory containing the payload is never copied, a descriptor is passed through the networking stack all the way to the application.
The Network Manager wraps the Light Weight IP (lwIP) [13] networking stack that is linked and ported to the EbbRT Library OS. This software processes the frame and identifies it with a TCP connection. The Network Manager then invokes the application registered callback for data reception on that connection.
The memcached application logic will then run to completion (including any network sends) and return back to this point. This code will continue to return back until the top frame of the event is exited. At this point, the Event Manager’s event loop logic will briefly enable interrupts to process any pending interrupts. In the memcached scenario, the only interrupts that might occur are timer interrupts associated with network processing. The event loop will disable interrupts and then execute the idle handler.
Jointly the upper and lower portions of Figure 3 illustrates the entire path of input processing. What’s critical to note is how packet data and memory moves up from the NIC to the application on a single event with no preemption and no memory copies. This creates a run to completion packet processing model that encompasses all software logic, device, protocol stack and application. So much so that the application hash table directly stores all software logic, device, protocol stack and application.
4.1.2 Evaluation
Environment Experimental measurements were gathered on a single Dell PowerEdge R620 server, equipped with two 10-core Intel Xeon EV-2670v2 processors, the Intel C6202 chipset, and 32GB of DDR3 RAM. The host system ran CentOS 6.5 with Linux 2.6.32. Guest Linux VMs ran Debian 7.4 with Linux 3.2.0. Our IaaS simulation daemon, that ran on the host, was configured to deploy qemu-kvm (version 1.7.5) instances. Each KVM guest (EbbRT or Linux) was each given a single VCPU, pinned explicitly to an inactive physical core, and 4GB of memory. The guests were connected to the physical network via an Ethernet bridge on the host machine, and used KVM vhost-net.
To evaluate the performance of our memcached implementation we ran the memaslap benchmark included with memcached. Memaslap is run on a remote machine connected to the host machine via a switch and a gigabit Ethernet link. In this way, each test accounts for the round trip latencies of a single network hop (0.10 ms). Memaslap is configured to do a 9:1 ratio of Get operations to Set operations. We run the same experiments on the standard Linux memcached implementation to provide a comparison.
Figure 4 shows the throughput of memcached for a small payload as we increase the number of concurrent requests from the client. We see that the EbbRT implementation’s throughput peaks at around 64 connections, with about 1.7 times the throughput of the Linux implementation with the same concurrency.
With more than 64 connections, we then see that Linux’s throughput maintains the same rate, while EbbRT’s gradually degrades. The source of degradation for EbbRT is shown in Figure 5. We see that the main degradation is in the performance of the LWIP receive performance. Examining the code, we find that each receive ends up traversing a linked list of all the connections. Moreover, LWIP moves the most recently used connection to the front of the list, causing most accesses for this benchmark to need to traverse the entire list. We believe that this degradation stops once we hit a concurrency of 256, because the number of concurrent packets exceeds the ring buffer of the device, resulting in TCP retries, and slowing down some clients resulting in the LWIP optimization having less of a negative consequence.
This figure also demonstrates another important point. Even in our highly optimized environment, at low concurrency, the total time spent in application code is only around 15% of the total execution time for a single memcached operation. This demonstrates the importance of optimizing system software for this kind of application.
Figure 6 shows the performance of memcached, with a fixed concurrency of 64 sockets (our peak) as we increase the payload size. We see that EbbRT is able to process packets at a high enough rate to saturate the network at around 800 bytes, while the implementation on Linux is not able to saturate the network until packets are around 2 kilobytes. Note, the dip in performance for both implementations occurs when they segment packets across multiple ethernet frames. We are investigating the source of the sawtooth shown in EbbRT’s throughput as payload sizes become large.
Discussion We see from the performance data above that we are able to achieve major performance gains over the linux based implementation. Under peak conditions, with small payloads the EbbRT memcached implementation is able to handle 1.7 times as many requests as the Linux implementation and saturate the network at a much
smaller packet size. The current performance degradation is due to the LWIP library we use for TCP/IP. While LWIP is small and simple to port it is not designed or optimized for high-performance compared to Linux’s mature and server grade protocol stack. Not only is its performance under load problematic, but it does not have support for hardware optimizations like segmentation offload. As with other systems [26], we expect to need to implement/port a more performant TCP implementation over time.
To understand why performance is so much better with the EbbRT implementation, it’s worthwhile to compare what has to happen with the Linux implementation to the EbbRT based one. With Linux, the application calls epoll (a context switch), the kernel wakes it up when a packet arrives (context switch), the application then reads the data (context switch and copy) and then writes a reply (another context switch and copy). In contrast with EbbRT all these system calls and copies are avoided; EbbRT results in fewer context switches and buffer copies than Linux on every client request.
One option with an EbbRT implementation is to make optimizations that are brittle in that they are application specific and perform poorly for other applications. For example, we found that under extremely heavy load a minor performance improvement resulted when we handled packets in reverse order, since starving some clients under very heavy load limited the number of TCP timeouts observed. This same change resulted in orders of magnitude degradation on a simple tcp streaming benchmark. While this optimization is not that significant (and was not used while gathering the earlier results), it demonstrates how a very brittle change, that only performs well for just one application, is a reasonable option in a system like EbbRT.
The results we have obtained are consistent with Chronos[21], which achieved similar performance on top of Linux by bypassing the operating system. The two projects have adopted very different approaches to achieve the same goal. It is certainly possible to provide functions on a general purpose OS that allow applications to be developed that bypass any specific OS functionality. However, doing so results in significant OS complexity. Now that we can easily provision nodes on an IaaS cloud, we believe the EbbRT design provides a natural alternative to supporting the performance demanding applications that are a poor match for our general purpose systems, and alleviate the burden on commodity OSs to be simultaneously general purpose and robust while also needing to be special purpose and customized for a single application.
Perhaps the most important result of this memcached experiment is that the application took only 277 lines of original code to implement. The effort of developing EbbRT has made it possible for very simple applications to be written very close to the hardware.
Our experience developing memcached also demon-
strates that EbbRT is a natural match for the intrinsic event driven nature of this kind of application. Applications like memcached, which are fundamentally about handling networking events, are a mismatch for general purpose OSes, on which they have to construct an event model on top of threads within a protection domain isolated from the device.
It should be noted that our current implementation of memcached is limited to one core. While EbbRT is designed to efficiently support multi-core applications, the LWIP library we are using limit our multicore performance. Removing this barrier is, for now, future work.
4.2 NodeJS
This case study describes the port of node.js, a javascript environment for server-side applications, to EbbRT. It illustrates three points: 1) That EbbRT can support complex managed code environments, allowing existing software to run unmodified on the library OS back ends. 2) How EbbRT’s non-preemptive, event-driven execution environment is suitable for even large, complex applications such as node.js. 3) That OS functionality can be offloaded to a general purpose OS, easing the effort of porting to the EbbRT library OS.
Node.js links with several libraries to provide its event-driven environment. In particular, the two libraries which involved the most effort to port were V8, Google’s javascript engine written in C++, and libuv, a library written in C which abstracts OS functionality and callback based event-driven execution. Porting V8 was relatively straightforward as EbbRT supports the C++ standard library which V8 depends on. Additional OS dependent functionality such as clocks, timers and virtual memory are provided by the base Ebbs of the system.
Porting libuv required significantly more effort. There are over one hundred functions in the libuv interface which have OS specific implementations. We did not implement all of these functions, only those we reached in the process of running various Node.js applications.
Libuv manages an event loop which dispatches callbacks installed by the application. The application must execute the uv_run function to begin dispatching events. In the Linux implementation, installing a callback for reading data from a tcp socket is implemented by informing Linux of the desire to be notified when the tcp socket is available for reading, this is done via the epoll system call. In contrast, the EbbRT implementation installs a callback to be invoked by the Network Manager when tcp data arrives. The implementation of uv_run must then save its context (stack and a few general purpose registers) to be woken up when the callback is invoked. However, this callback executes on a separate stack from the uv_run stack. The callback can synchronously activate the previously saved context to execute the callback handler installed by the application. These context switches are much simpler than equivalent context switches on general purpose OSes because they do not involve a protection domain crossing and due to the lack of pre-emption, do not have to save many registers (only those that are callee saved as mandated by the ABI). This allows the libuv callbacks to be invoked synchronously from the hardware interrupt that caused it in much the same way that the memcached application was able to.
We were able to implement the networking interfaces provided by libuv in this fashion by installing callbacks to reactivate the uv_run context and invoke the application callbacks. This was sufficient to allow us to run node.js applications including tcp stream processors and web servers.
Filesystem access was implemented by invoking a FileSystem Ebb linked into the application. Rather than implement a file system and hard disk driver, our implementation offloaded calls to a representative running in a Linux process. Specifically our implementation of Libuv invokes the FileSystem Ebb which performs the offload by sending messages between its representatives. Our implementation of the FileSystem Ebb is naïve, sending messages and incurring round trip costs for every access rather than caching data. This allowed us to quickly get the rich functionality provided by the Linux filesystem with minimal development effort. Implementing a caching layer would require only changes to the FileSystem Ebb, without modifications to libuv.
Offloading allows us to execute node.js by launching a Linux process linked into the EbbRT library which then allocates a node loaded with the EbbRT library OS linked to node.js. Node.js can, via an Ebb, read the command line arguments that were originally passed to the Linux process. This then indicates the filename of the node.js script which is fetched from the Linux process and then loaded. This model allows us to rapidly spawn node.js instances on their own machine with integration via the frontend file system.
4.2.1 Discussion
In the memcached scenario, we demonstrated that EbbRT benefits applications by allowing them to map more closely to the hardware. Our memcached implementation was written directly to our base interfaces. Many applications, however, are too large to consider completely rewriting to target EbbRT, despite potential performance
improvements. Node.js offers us a number of different layers to consider for porting. One could have ported at the system call layer and emulated Linux and linked directly to the Linux libuv implementation. The Linux libuv implementation in many cases uses a thread pool to make blocking system calls in cases where Linux has poor support for non-blocking interfaces. EbbRT can support the libuv interfaces more directly and so we opted to do the port at that layer. This illustrates EbbRT’s suitability for an increasingly popular programming paradigm. Many cloud applications such as nginx [33], memcached, node.js, are designed to be event driven and depend on various event libraries to abstract the OS interfaces designed for event dispatch. EbbRT natively provides these interfaces and provides natural mappings for these applications.
The port of node.js (including V8 and libuv) is 1585 lines of code of which the majority (1237) is in the port of libuv. The port took a single graduate student two weeks to bring to level of completion where we were able to run node.js web servers capable of serving files (exercising both networking and file access interfaces). The final boot image which is generated is 5.76 megabytes in total size.
A key result of this port is the ability to run complex applications without requiring modification to the system’s base layers. The node.js application uses the same Event Manager and Networking Manager as the memcached application. We found no need for pre-emption while porting this application. This provides evidence that our approach leads to constructing reusable software, without which the effort to port applications to EbbRT would be daunting.
Had we needed to construct an execution environment for node.js which was orthogonal to the environment used by memcached, it would be difficult to argue that our approach is practical. The software written for one environment would not be able to interact with the software in another. The primitives provided by EbbRT are simple and lightweight, allowing for the optimizations exploited in the memcached application, yet the same primitives are also expressive enough to be suitable for a wide range of different applications.
From a networking perspective, node.js running on EbbRT has the same kind of performance advantages as in the memcached use case described above. This work also opens up the door for other performance advantages in optimizing the managed runtime to take full advantage of direct control of the page table and event dispatching for sake of improved garbage collection, memory management, and thread management [3, ?].
4.3 Sage
In this case study we extend Sage (mathematics software)[1] with EbbRT. This study demonstrates how a process running on a general purpose OS can elastically exploit an IaaS by offloading functionality to specialized library OSs.
Sage is an open source mathematics environment similar to Matlab. It provides many common math library routines and objects through a Python interface (typically accessed via an interactive shell). One limitation of Sage is that all of its standard routines and objects are designed to execute on a single machine and do not scale. Sage does support MPI interfaces but this puts the burden on a mathematical user to write explicit parallel code and requires users to setup a dedicated static MPI cluster. EbbRT integration into Sage provides a path for using IaaS resources to transparently enable a user to do large scale parallel computation with no additional burden.

Sage incorporates many software libraries; porting the entire Sage environment would be a significant investment in developer time. We instead, explore the ability to use EbbRT to incrementally modify existing applications. We created a Python module which can be dynamically loaded into the Sage environment. This module links with the EbbRT Linux library and provides a python matrix object which wraps a matrix Ebb. When this python matrix is instantiated at the command line an instance of the matrix Ebb is constructed to back it. When calls are made to the python matrix object they are forwarded to the matrix Ebb which may internally distribute its functionality to satisfy its interface. Figure 7 illustrates the realized runtime structure.
In our particular matrix Ebb implementation, the representative running within the Sage process on Linux allocates nodes from the Node Allocator booted with the EbbRT Library OS to hold a fixed tile of the matrix values and perform the core computations on that matrix tile. The matrix Ebb links with the Boost uBLAS library to provide local matrix operations. Nodes are allocated lazily, when an operation requires a particular portion of
the matrix for the first time. This structure allows for matrix operations to be done both lazily and in parallel. For example, as matrix elements are set, the Linux representative will allocate nodes as necessary to store the tile of the matrix that the element belongs to. Operations of the matrix Ebb such as element-wise randomization can naturally be done in parallel across the tiles. Our matrix Ebb implements a number of matrix operations such as summation, multiplication, element-wise randomization, and element access.
The EbbRT Library OS is well suited for offloading computationally expensive functionality because it allows the application complete control of the hardware. For example, interrupts are disabled which prevents context switches from causing cache pollution to slow down the computation. Additionally, complete control over memory allows the use of large pages to reduce TLB contention.
From the perspective of a user at the Sage console, the matrix behaves just as any other python object. In fact, if an instance of the matrix object is garbage collected (perhaps due to the python variable going out of scope), the underlying Ebb is destroyed and any nodes that were allocated are freed to the NodeAllocator. This is a feature of the particular matrix ebb implementation. A different implementation may colocate matrices on the same nodes in which case it’s destruction logic would encapsulate the dependency. Ebb encapsulation ensures that such differences in implementation would not impact Sage or the python module.
### 4.3.1 Discussion
The performance of the matrix operations that we have implemented as Ebbs is what one would expect from a distributed tile oriented matrix implementation. Fundamentally, the point of this exercise was not a demonstration of our particular matrix Ebb’s parallel superiority but rather in EbbRT’s ability to extend Sage with a distributed matrix Ebb and have it naturally and transparently used.
EbbRT’s MultiLibOS design and implementation enable a customized version of a front-end library and backend library OS to be developed that targets a core application function that can be integrated in to an existing complex application stack. Our implementation was able to introduce fine grain elasticity into Sage where IaaS node consumption grows and shrinks with not only the number of matrix instances but even more finely with the active tiles of large matrices.
This study illustrates that EbbRT can be an effective tool for evolving the use of IaaS resources. Novel uses of an IaaS can be evolved by extending existing applications with EbbRT. Libraries of reusable Ebbs that target core primitives such as various types of matrices and associated operations can be developed. The libraries can be used to explore the incremental acceleration of many applications in addition to the wholesale development of new applications. As IaaS providers evolve support for higher performance data-center interconnects with features such as RDMA and native support for collective and reduce operators[6], EbbRT libraries can enable direct application use.
## 5 Conclusion
We have introduced a new system software runtime called EbbRT. EbbRT explores a unique system architecture, where general purpose OSs are augmented by small library OSs to exploit the features of an IaaS provider. Our system adopts a non-preemptive execution model which allows the event driven nature of modern cloud applications to take advantage of the hardware directly. We also explore a new partitioned object model, called Ebbs, which encapsulate distributed software, allowing components to be independently customized and reused.
Our runtime allows applications to run software on our lightweight library operating system without requiring large investment in porting existing, non-performance critical functionality. We have demonstrated through our memcached implementation that by allowing applications to more directly exploit the hardware, significant performance advantages can be realized. Our node.js port shows that by offloading functionality, we can rapidly port rich applications to reap the benefits of library operating systems. Finally, our Sage application shows how we can integrate our library with existing applications to enable the use of IaaS resources in a fine-grain fashion.
In contrast to a conventional operating system, which at some level can be defined to be complete, EbbRT is intended to provide a structure for constantly evolving system software to meet new application needs and hardware. Results presented in this paper give us some confidence that the architecture will be flexible enough to meet this challenge.
Serious open questions remain about our system design. One important assumption of this work is that IaaS providers will further improve the ability to rapidly provision hardware on demand. We fear that some value of our system will be lost if this does not bear true. Another significant concern is that the development of different applications will lead to large vertical stacks of software which do not compose. Many different implementations of system software may also cause a significant configuration
challenge. It remains to be seen how these challenges impact the system.
References
|
{"Source-Url": "http://dcommon.bu.edu/bitstream/handle/2144/21764/2015-005-ebbrt-overview.pdf?isAllowed=y&sequence=1", "len_cl100k_base": 10511, "olmocr-version": "0.1.50", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 48546, "total-output-tokens": 14054, "length": "2e13", "weborganizer": {"__label__adult": 0.0003330707550048828, "__label__art_design": 0.0003676414489746094, "__label__crime_law": 0.00023818016052246096, "__label__education_jobs": 0.0005908012390136719, "__label__entertainment": 8.177757263183594e-05, "__label__fashion_beauty": 0.00014770030975341797, "__label__finance_business": 0.0002541542053222656, "__label__food_dining": 0.0003139972686767578, "__label__games": 0.000530242919921875, "__label__hardware": 0.0019092559814453125, "__label__health": 0.00043702125549316406, "__label__history": 0.00035691261291503906, "__label__home_hobbies": 9.453296661376952e-05, "__label__industrial": 0.0004520416259765625, "__label__literature": 0.0002231597900390625, "__label__politics": 0.0002338886260986328, "__label__religion": 0.0004799365997314453, "__label__science_tech": 0.05584716796875, "__label__social_life": 7.957220077514648e-05, "__label__software": 0.00946044921875, "__label__software_dev": 0.92626953125, "__label__sports_fitness": 0.0002267360687255859, "__label__transportation": 0.0005984306335449219, "__label__travel": 0.0002244710922241211}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 61740, 0.03041]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 61740, 0.25881]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 61740, 0.90043]], "google_gemma-3-12b-it_contains_pii": [[0, 392, false], [392, 4134, null], [4134, 9406, null], [9406, 13700, null], [13700, 18796, null], [18796, 22466, null], [22466, 28035, null], [28035, 30581, null], [30581, 35580, null], [35580, 38555, null], [38555, 43768, null], [43768, 48576, null], [48576, 53792, null], [53792, 55806, null], [55806, 59860, null], [59860, 61740, null]], "google_gemma-3-12b-it_is_public_document": [[0, 392, true], [392, 4134, null], [4134, 9406, null], [9406, 13700, null], [13700, 18796, null], [18796, 22466, null], [22466, 28035, null], [28035, 30581, null], [30581, 35580, null], [35580, 38555, null], [38555, 43768, null], [43768, 48576, null], [48576, 53792, null], [53792, 55806, null], [55806, 59860, null], [59860, 61740, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 61740, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 61740, null]], "pdf_page_numbers": [[0, 392, 1], [392, 4134, 2], [4134, 9406, 3], [9406, 13700, 4], [13700, 18796, 5], [18796, 22466, 6], [22466, 28035, 7], [28035, 30581, 8], [30581, 35580, 9], [35580, 38555, 10], [38555, 43768, 11], [43768, 48576, 12], [48576, 53792, 13], [53792, 55806, 14], [55806, 59860, 15], [59860, 61740, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 61740, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
2a841f3b0dcfd48a84049472cd628340df1c0b51
|
Cloud computing presents a new way to supplement the current consumption and delivery model for IT services based on the Internet, by providing for dynamically scalable and often virtualized resources as a service over the Internet. To date, there are a number of notable commercial and individual cloud computing services, including Amazon, Google, Microsoft, Yahoo, and Salesforce [19]. Details of the services provided are abstracted from the users who no longer need to be experts of technology infrastructure. Moreover, users may not know the machines which actually process and host their data. While enjoying the convenience brought by this new technology, users also start worrying about losing control of their own data. The data processed on clouds are often outsourced, leading to a number of issues related to accountability, including the handling of personally identifiable information. Such fears are becoming a significant barrier to the wide adoption of cloud services [30].
To allay users’ concerns, it is essential to provide an effective mechanism for users to monitor the usage of their data in the cloud. For example, users need to be able to ensure that their data are handled according to the service-level agreements made at the time they sign on for services in the cloud. Conventional access control approaches developed for closed domains such as databases and operating systems, or approaches using a centralized server in distributed environments, are not suitable, due to the following features characterizing cloud environments. First, data handling can be outsourced by the direct cloud service provider (CSP) to other entities in the cloud and these entities can also delegate the tasks to others, and so on. Second, entities are allowed to join and leave the cloud in a flexible manner. As a result, data handling in the cloud goes through a complex and dynamic hierarchical service chain which does not exist in conventional environments.
To overcome the above problems, we propose a novel approach, namely Cloud Information Accountability (CIA) framework, based on the notion of information accountability [44]. Unlike privacy protection technologies which are built on the hide-it-or-lose-it perspective, information accountability focuses on keeping the data usage transparent and trackable. Our proposed CIA framework provides end-to-end accountability in a highly distributed fashion. One of the main innovative features of the CIA framework lies in its ability of maintaining lightweight and powerful accountability that combines aspects of access control, usage control and authentication. By means of the CIA, data owners can track not only whether or not the service-level agreements are being honored, but also enforce access and usage control rules as needed. Associated with the accountability feature, we also develop two distinct modes for auditing: push mode and pull mode. The push mode refers to logs being periodically sent to the data owner or stakeholder while the pull mode refers to an alternative approach whereby the user (or another authorized party) can retrieve the logs as needed.
The design of the CIA framework presents substantial challenges, including uniquely identifying CSPs, ensuring the reliability of the log, adapting to a highly decentralized infrastructure, etc. Our basic approach toward addressing these issues is to leverage and extend the programmable capability of JAR (Java ARchives) files to automatically log the usage of the users’ data by any entity in the cloud. Users will send their data along with any policies such as access control policies and logging policies that they want to
enforce, enclosed in JAR files, to cloud service providers. Any access to the data will trigger an automated and authenticated logging mechanism local to the JARs. We refer to this type of enforcement as "strong binding" since the policies and the logging mechanism travel with the data. This strong binding exists even when copies of the JARs are created; thus, the user will have control over his data at any location. Such decentralized logging mechanism meets the dynamic nature of the cloud but also imposes challenges on ensuring the integrity of the logging. To cope with this issue, we provide the JARs with a central point of contact which forms a link between them and the user. It records the error correction information sent by the JARs, which allows it to monitor the loss of any logs from any of the JARs. Moreover, if a JAR is not able to contact its central point, any access to its enclosed data will be denied.
Currently, we focus on image files since images represent a very common content type for end users and organizations (as is proven by the popularity of Flickr [14]) and are increasingly hosted in the cloud as part of the storage services offered by the utility computing paradigm featured by cloud computing. Further, images often reveal social and personal habits of users, or are used for archiving important files from organizations. In addition, our approach can handle personal identifiable information provided they are stored as image files (they contain an image of any textual content, for example, the SSN stored as a .jpg file).
We tested our CIA framework in a cloud testbed, the Emulab testbed [42], with Eucalyptus as middleware [41]. Our experiments demonstrate the efficiency, scalability and granularity of our approach. In addition, we also provide a detailed security analysis and discuss the reliability and strength of our architecture in the face of various nontrivial attacks, launched by malicious users or due to compromised Java Running Environment (JRE).
In summary, our main contributions are as follows:
- We propose a novel automatic and enforceable logging mechanism in the cloud. To our knowledge, this is the first time a systematic approach to data accountability through the novel usage of JAR files is proposed.
- Our proposed architecture is platform independent and highly decentralized, in that it does not require any dedicated authentication or storage system in place.
- We go beyond traditional access control in that we provide a certain degree of usage control for the protected data after these are delivered to the receiver.
- We conduct experiments on a real cloud testbed. The results demonstrate the efficiency, scalability, and granularity of our approach. We also provide a detailed security analysis and discuss the reliability and strength of our architecture.
This paper is an extension of our previous conference paper [40]. We have made the following new contributions. First, we integrated integrity checks and oblivious hashing (OH) technique to our system in order to strengthen the dependability of our system in case of compromised JRE. We also updated the log records structure to provide additional guarantees of integrity and authenticity. Second, we extended the security analysis to cover more possible attack scenarios. Third, we report the results of new experiments and provide a thorough evaluation of the system performance. Fourth, we have added a detailed discussion on related works to prepare readers with a better understanding of background knowledge. Finally, we have improved the presentation by adding more examples and illustration graphs.
The rest of the paper is organized as follows: Section 2 discusses related work. Section 3 lays out our problem statement. Section 4 presents our proposed Cloud Information Accountability framework, and Sections 5 and 6 describe the detailed algorithms for automated logging mechanism and auditing approaches, respectively. Section 7 presents a security analysis of our framework, followed by an experimental study in Section 8. Finally, Section 9 concludes the paper and outlines future research directions.
2 RELATED WORK
In this section, we first review related works addressing the privacy and security issues in the cloud. Then, we briefly discuss works which adopt similar techniques as our approach but serve for different purposes.
2.1 Cloud Privacy and Security
Cloud computing has raised a range of important privacy and security issues [19], [25], [30]. Such issues are due to the fact that, in the cloud, users’ data and applications reside—at least for a certain amount of time—on the cloud cluster which is owned and maintained by a third party. Concerns arise since in the cloud it is not always clear to individuals why their personal information is requested or how it will be used or passed on to other parties. To date, little work has been done in this space, in particular with respect to accountability. Pearson et al. have proposed accountability mechanisms to address privacy concerns of end users [30] and then develop a privacy manager [31]. Their basic idea is that the user’s private data are sent to the cloud in an encrypted form, and the processing is done on the encrypted data. The output of the processing is deobfuscated by the privacy manager to reveal the correct result. However, the privacy manager provides only limited features in that it does not guarantee protection once the data are being disclosed. In [7], the authors present a layered architecture for addressing the end-to-end trust management and accountability problem in federated systems. The authors’ focus is very different from ours, in that they mainly leverage trust relationships for accountability, along with authentication and anomaly detection. Further, their solution requires third-party services to complete the monitoring and focuses on lower level monitoring of system resources.
Researchers have investigated accountability mostly as a provable property through cryptographic mechanisms, particularly in the context of electronic commerce [10], [21]. A representative work in this area is given by [9]. The authors propose the usage of policies attached to the data and present a logic for accountability data in distributed settings. Similarly, Jagadeesan et al. recently proposed a logic for designing accountability-based distributed systems [20]. In [10], Crispo and Ruffo proposed an interesting approach
related to accountability in case of delegation. Delegation is complementary to our work, in that we do not aim at controlling the information workflow in the clouds. In a summary, all these works stay at a theoretical level and do not include any algorithm for tasks like mandatory logging.
To the best of our knowledge, the only work proposing a distributed approach to accountability is from Lee and colleagues [22]. The authors have proposed an agent-based system specific to grid computing. Distributed jobs, along with the resource consumption at local machines are tracked by static software agents. The notion of accountability policies in [22] is related to ours, but it is mainly focused on resource consumption and on tracking of subjobs processed at multiple computing nodes, rather than access control.
2.2 Other Related Techniques
With respect to Java-based techniques for security, our methods are related to self-defending objects (SDO) [17]. Self-defending objects are an extension of the object-oriented programming paradigm, where software objects that offer sensitive functions or hold sensitive data are responsible for protecting those functions/data. Similarly, we also extend the concepts of object-oriented programming. The key difference in our implementations is that the authors still rely on a centralized database to maintain the access records, while the items being protected are held as separate files. In previous work, we provided a Java-based approach to prevent privacy leakage from indexing [39], which could be integrated with the CIA framework proposed in this work since they build on related architectures.
In terms of authentication techniques, Appel and Felten [13] proposed the Proof-Carrying authentication (PCA) framework. The PCA includes a high order logic language that allows quantification over predicates, and focuses on access control for web services. While related to ours to the extent that it helps maintaining safe, high-performance, mobile code, the PCA's goal is highly different from our research, as it focuses on validating code, rather than monitoring content. Another work is by Mont et al. who proposed an approach for strongly coupling content with access control, using Identity-Based Encryption (IBE) [26]. We also leverage IBE techniques, but in a very different way. We do not rely on IBE to bind the content with the rules. Instead, we use it to provide strong guarantees for the encrypted content and the log files, such as protection against chosen plaintext and ciphertext attacks.
In addition, our work may look similar to works on secure data provenance [5], [6], [15], but in fact greatly differs from them in terms of goals, techniques, and application domains. Works on data provenance aim to guarantee data integrity by securing the data provenance. They ensure that no one can add or remove entries in the middle of a provenance chain without detection, so that data are correctly delivered to the receiver. Differently, our work is to provide data accountability, to monitor the usage of the data and ensure that any access to the data is tracked. Since it is in a distributed environment, we also log where the data go. However, this is not for verifying data integrity, but rather for auditing whether data receivers use the data following specified policies. Along the lines of extended content protection, usage control [33] is being investigated as an extension of current access control mechanisms. Current efforts on usage control are primarily focused on conceptual analysis of usage control requirements and on languages to express constraints at various level of granularity [32], [34]. While some notable results have been achieved in this respect [12], [34], thus far, there is no concrete contribution addressing the problem of usage constraints enforcement, especially in distributed settings [32]. The few existing solutions are partial [12], [28], [29], restricted to a single domain, and often specialized [3], [24], [46]. Finally, general outsourcing techniques have been investigated over the past few years [2], [38]. Although only [43] is specific to the cloud, some of the outsourcing protocols may also be applied in this realm. In this work, we do not cover issues of data storage security which are a complementary aspect of the privacy issues.
3 PROBLEM STATEMENT
We begin this section by considering an illustrative example which serves as the basis of our problem statement and will be used throughout the paper to demonstrate the main features of our system.
Example 1. Alice, a professional photographer, plans to sell her photographs by using the SkyHigh Cloud Services. For her business in the cloud, she has the following requirements:
- Her photographs are downloaded only by users who have paid for her services.
- Potential buyers are allowed to view her pictures first before they make the payment to obtain the download right.
- Due to the nature of some of her works, only users from certain countries can view or download some sets of photographs.
- For some of her works, users are allowed to only view them for a limited time, so that the users cannot reproduce her work easily.
- In case any dispute arises with a client, she wants to have all the access information of that client.
- She wants to ensure that the cloud service providers of SkyHigh do not share her data with other service providers, so that the accountability provided for individual users can also be expected from the cloud service providers.
With the above scenario in mind, we identify the common requirements and develop several guidelines to achieve data accountability in the cloud. A user who subscribed to a certain cloud service, usually needs to send his/her data as well as associated access control policies (if any) to the service provider. After the data are received by the cloud service provider, the service provider will have granted access rights, such as read, write, and copy, on the data. Using conventional access control mechanisms, once the access rights are granted, the data will be fully available at the service provider. In order to track the actual usage of the
data, we aim to develop novel logging and auditing techniques which satisfy the following requirements:
1. The logging should be decentralized in order to adapt to the dynamic nature of the cloud. More specifically, log files should be tightly bounded with the corresponding data being controlled, and require minimal infrastructural support from any server.
2. Every access to the user’s data should be correctly and automatically logged. This requires integrated techniques to authenticate the entity who accesses the data, verify, and record the actual operations on the data as well as the time that the data have been accessed.
3. Log files should be reliable and tamper proof to avoid illegal insertion, deletion, and modification by malicious parties. Recovery mechanisms are also desirable to restore damaged log files caused by technical problems.
4. Log files should be sent back to their data owners periodically to inform them of the current usage of their data. More importantly, log files should be retrievable anytime by their data owners when needed regardless the location where the files are stored.
5. The proposed technique should not intrusively monitor data recipients’ systems, nor it should introduce heavy communication and computation overhead, which otherwise will hinder its feasibility and adoption in practice.
4 CLOUD INFORMATION ACCOUNTABILITY
In this section, we present an overview of the Cloud Information Accountability framework and discuss how the CIA framework meets the design requirements discussed in the previous section.
The Cloud Information Accountability framework proposed in this work conducts automated logging and distributed auditing of relevant access performed by any entity, carried out at any point of time at any cloud service provider. It has two major components: logger and log harmonizer.
4.1 Major Components
There are two major components of the CIA, the first being the logger, and the second being the log harmonizer. The logger is the component which is strongly coupled with the user’s data, so that it is downloaded when the data are accessed, and is copied whenever the data are copied. It handles a particular instance or copy of the user’s data and is responsible for logging access to that instance or copy. The log harmonizer forms the central component which allows the user access to the log files.
The logger is strongly coupled with user’s data (either single or multiple data items). Its main tasks include automatically logging access to data items that it contains, encrypting the log record using the public key of the content owner, and periodically sending them to the log harmonizer. It may also be configured to ensure that access and usage control policies associated with the data are honored. For example, a data owner can specify that user X is only allowed to view but not to modify the data. The logger will control the data access even after it is downloaded by user X.
The logger requires only minimal support from the server (e.g., a valid Java virtual machine installed) in order to be deployed. The tight coupling between data and logger, results in a highly distributed logging system, therefore meeting our first design requirement. Furthermore, since the logger does not need to be installed on any system or require any special support from the server, it is not very intrusive in its actions, thus satisfying our fifth requirement. Finally, the logger is also responsible for generating the error correction information for each log record and send the same to the log harmonizer. The error correction information combined with the encryption and authentication mechanism provides a robust and reliable recovery mechanism, therefore meeting the third requirement.
The log harmonizer is responsible for auditing.
Being the trusted component, the log harmonizer generates the master key. It holds on to the decryption key for the IBE key pair, as it is responsible for decrypting the logs. Alternatively, the decryption can be carried out on the client end if the path between the log harmonizer and the client is not trusted. In this case, the harmonizer sends the key to the client in a secure key exchange.
It supports two auditing strategies: push and pull. Under the push strategy, the log file is pushed back to the data owner periodically in an automated fashion. The pull mode is an on-demand approach, whereby the log file is obtained by the data owner as often as requested. These two modes allow us to satisfy the aforementioned fourth design requirement. In case there exist multiple loggers for the same set of data items, the log harmonizer will merge log records from them before sending back to the data owner. The log harmonizer is also responsible for handling log file corruption. In addition, the log harmonizer can itself carry out logging in addition to auditing. Separating the logging and auditing functions improves the performance. The logger and the log harmonizer are both implemented as lightweight and portable JAR files. The JAR file implementation provides automatic logging functions, which meets the second design requirement.
4.2 Data Flow
The overall CIA framework, combining data, users, logger and harmonizer is sketched in Fig. 1. At the beginning, each user creates a pair of public and private keys based on Identity-Based Encryption [4] (step 1 in Fig. 1). This IBE scheme is a Weil-pairing-based IBE scheme, which protects us against one of the most prevalent attacks to our architecture as described in Section 7. Using the generated key, the user will create a logger component which is a JAR file, to store its data items.
The JAR file includes a set of simple access control rules specifying whether and how the cloud servers, and possibly other data stakeholders (users, companies) are authorized to access the content itself. Then, he sends the JAR file to the cloud service provider that he subscribes to. To authenticate the CSP to the JAR (steps 3-5 in Fig. 1), we use OpenSSL-based certificates, wherein a trusted certificate authority certifies the CSP. In the event that the access is requested by a user, we employ SAML-based authentication [8], wherein a trusted identity provider issues certificates verifying the user’s identity based on his username.
Once the authentication succeeds, the service provider (or the user) will be allowed to access the data enclosed in the JAR. Depending on the configuration settings defined at the time of creation, the JAR will provide usage control associated with logging, or will provide only logging functionality. As for the logging, each time there is an access to the data, the JAR will automatically generate a log record, encrypt it using the public key distributed by the data owner, and store it along with the data (step 6 in Fig. 1). The encryption of the log file prevents unauthorized changes to the file by attackers. The data owner could opt to reuse the same key pair for all JARs or create different key pairs for separate JARs. Using separate keys can enhance the security (detailed discussion is in Section 7) without introducing any overhead except in the initialization phase. In addition, some error correction information will be sent to the log harmonizer to handle possible log file corruption (step 7 in Fig. 1). To ensure trustworthiness of the logs, each record is signed by the entity accessing the content. Further, individual records are hashed together to create a chain structure, able to quickly detect possible errors or missing records. The encrypted log files can later be decrypted and their integrity verified. They can be accessed by the data owner or other authorized stakeholders at any time for auditing purposes with the aid of the log harmonizer (step 8 in Fig. 1).
As discussed in Section 7, our proposed framework prevents various attacks such as detecting illegal copies of users’ data. Note that our work is different from traditional logging methods which use encryption to protect log files. With only encryption, their logging mechanisms are neither automatic nor distributed. They require the data to stay within the boundaries of the centralized system for the logging to be possible, which is however not suitable in the cloud.
Example 2. Considering Example 1, Alice can enclose her photographs and access control policies in a JAR file and send the JAR file to the cloud service provider. With the aid of control associated logging (called AccessLog in Section 5.2), Alice will be able to enforce the first four requirements and record the actual data access. On a regular basis, the push-mode auditing mechanism will inform Alice about the activity on each of her photographs as this allows her to keep track of her clients’ demographics and the usage of her data by the cloud service provider. In the event of some dispute with her clients, Alice can rely on the pull-mode auditing mechanism to obtain log records.
5 Automated Logging Mechanism
In this section, we first elaborate on the automated logging mechanism and then present techniques to guarantee dependability.
5.1 The Logger Structure
We leverage the programmable capability of JARs to conduct automated logging. A logger component is a nested Java JAR file which stores a user’s data items and corresponding log files. As shown in Fig. 2, our proposed JAR file consists of one outer JAR enclosing one or more inner JARs.
The main responsibility of the outer JAR is to handle authentication of entities which want to access the data stored in the JAR file. In our context, the data owners may not know the exact CSPs that are going to handle the data. Hence, authentication is specified according to the server's functionality (which we assume to be known through a lookup service), rather than the server's URL or identity. For example, a policy may state that Server X is allowed to download the data if it is a storage server. As discussed below, the outer JAR may also have the access control functionality to enforce the data owner's requirements, specified as Java policies, on the usage of the data. A Java policy specifies which permissions are available for a particular piece of code in a Java application environment. The permissions expressed in the Java policy are in terms of File System Permissions. However, the data owner can specify the permissions in user-centric terms as opposed to the usual code-centric security offered by Java, using Java Authentication and Authorization Services. Moreover, the outer JAR is also in charge of selecting the correct inner JAR according to the identity of the entity who requests the data.
Example 3. Consider Example 1. Suppose that Alice's photographs are classified into three categories according to the locations where the photos were taken. The three groups of photos are stored in three inner JAR J1, J2, and J3, respectively, associated with different access control policies. If some entities are allowed to access only one group of the photos, say J1, the outer JAR will just render the corresponding inner JAR to the entity based on the policy evaluation result.
Each inner JAR contains the encrypted data, class files to facilitate retrieval of log files and display enclosed data in a suitable format, and a log file for each encrypted item. We support two options:
- **PureLog.** Its main task is to record every access to the data. The log files are used for pure auditing purpose.
- **AccessLog.** It has two functions: logging actions and enforcing access control. In case an access request is denied, the JAR will record the time when the request is made. If the access request is granted, the JAR will additionally record the access information along with the duration for which the access is allowed.
The two kinds of logging modules allow the data owner to enforce certain access conditions either proactively (in case of AccessLogs) or reactively (in case of PureLogs). For example, services like billing may just need to use PureLogs. AccessLogs will be necessary for services which need to enforce service-level agreements such as limiting the visibility to some sensitive content at a given location.
To carry out these functions, the inner JAR contains a class file for writing the log records, another class file which corresponds with the log harmonizer, the encrypted data, a third class file for displaying or downloading the data (based on whether we have a PureLog, or an AccessLog), and the public key of the IBE key pair that is necessary for encrypting the log records. No secret keys are ever stored in the system. The outer JAR may contain one or more inner JARs, in addition to a class file for authenticating the servers or the users, another class file finding the correct inner JAR, a third class file which checks the JVM's validity using oblivious hashing. Further, a class file is used for managing the GUI for user authentication and the Java Policy.
5.2 Log Record Generation
Log records are generated by the logger component. Logging occurs at any access to the data in the JAR, and new log entries are appended sequentially, in order of creation \( LR = (r_1, \ldots, r_k) \). Each record \( r_i \) is encrypted individually and appended to the log file. In particular, a log record takes the following form:
\[
r_i = \langle ID, Act, T, Loc, h((ID, Act, T, Loc)|r_{i-1}| \ldots |r_1), sig \rangle
\]
Here, \( r_i \) indicates that an entity identified by I D has performed an action Act on the user's data at time T at location Loc. The component \( h((ID, Act, T, Loc)|r_{i-1}| \ldots |r_1) \) corresponds to the checksum of the records preceding the newly inserted one, concatenated with the main content of the record itself (we use I to denote concatenation). The checksum is computed using a collision-free hash function [37]. The component \( sig \) denotes the signature of the record created by the server. If more than one file is handled by the same logger, an additional ObjI D field is added to each record. An example of log record for a single file is shown below.
Example 4. Suppose that a cloud service provider with ID Kronos, located in USA, read the image in a JAR file (but did not download it) at 4:52 pm on May 20, 2011. The corresponding log record is
\[
(Kronos, View, 2011-05-29 16:52:30,USA, 45rftT024g, r94gm30130ff).
\]
The location is converted from the IP address for improved readability.
To ensure the correctness of the log records, we verify the access time, locations as well as actions. In particular, the time of access is determined using the Network Time Protocol (NTP) [35] to avoid suppression of the correct time by a malicious entity. The location of the cloud service provider can be determined using IP address. The JAR can perform an IP lookup and use the range of the IP address to find the most probable location of the CSP. More advanced techniques for determining location can also be used [16]. Similarly, if a trusted time stamp management infrastructure can be set up or leveraged, it can be used to record the time stamp in the accountability log [1]. The most critical part is to log the actions on the users' data. In the current system, we support four types of actions, i.e., Act has one of the following four values: \( \text{view}, \text{download}, \text{timed} \text{access}, \text{and Location-based access} \). For each action, we propose a specific method to correctly record or enforce it depending on the type of the logging module, which are elaborated as follows:
- **View.** The entity (e.g., the cloud service provider) can only read the data but is not allowed to save a raw copy of it anywhere permanently. For this type of action, the PureLog will simply write a log record about the access, while the AccessLogs will enforce the action through the enclosed access control module. Recall that the data are encrypted and
stored in the inner JAR. When there is a view-only access request, the inner JAR will decrypt the data on the fly and create a temporary decrypted file. The decrypted file will then be displayed to the entity using the Java application viewer in case the file is displayed to a human user. Presenting the data in the Java application, viewer disables the copying functions using right click or other hot keys such as PrintScreen. Further, to prevent the use of some screen capture software, the data will be hidden whenever the application viewer screen is out of focus. The content is displayed using the headless mode in Java on the command line when it is presented to a CSP.
- **Download.** The entity is allowed to save a raw copy of the data and the entity will have no control over this copy neither log records regarding access to the copy.
If PureLog is adopted, the user’s data will be directly downloadable in a pure form using a link. When an entity clicks this download link, the JAR file associated with the data will decrypt the data and give it to the entity in raw form. In case of AccessLogs, the entire JAR file will be given to the entity. If the entity is a human user, he/she just needs to double click the JAR file to obtain the data. If the entity is a CSP, it can run a simple script to execute the JAR.
- **Timed access.** This action is combined with the view-only access, and it indicates that the data are made available only for a certain period of time.
The Purelog will just record the access starting time and its duration, while the AccessLog will enforce that the access is allowed only within the specified period of time. The duration for which the access is allowed is calculated using the Network Time Protocol. To enforce the limit on the duration, the AccessLog records the start time using the NTP, and then uses a timer to stop the access. Naturally, this type of access can be enforced only when it is combined with the View access right and not when it is combined with the Download.
- **Location-based access.** In this case, the PureLog will record the location of the entities. The AccessLog will verify the location for each of such access. The access is granted and the data are made available only to entities located at locations specified by the data owner.
5.3 Dependability of Logs
In this section, we discuss how we ensure the dependability of logs. In particular, we aim to prevent the following two types of attacks. First, an attacker may try to evade the auditing mechanism by storing the JARs remotely, corrupting the JAR, or trying to prevent them from communicating with the user. Second, the attacker may try to compromise the JRE used to run the JAR files.
5.3.1 JARs Availability
To protect against attacks perpetrated on offline JARs, the CIA includes a log harmonizer which has two main responsibilities: to deal with copies of JARs and to recover corrupted logs.
Each log harmonizer is in charge of copies of logger components containing the same set of data items. The harmonizer is implemented as a JAR file. It does not contain the user’s data items being audited, but consists of class files for both a server and a client processes to allow it to communicate with its logger components. The harmonizer stores error correction information sent from its logger components, as well as the user’s IBE decryption key, to decrypt the log records and handle any duplicate records. Duplicate records result from copies of the user’s data JARs. Since user’s data are strongly coupled with the logger component in a data JAR file, the logger will be copied together with the user’s data. Consequently, the new copy of the logger contains the old log records with respect to the usage of data in the original data JAR file. Such old log records are redundant and irrelevant to the new copy of the data. To present the data owner an integrated view, the harmonizer will merge log records from all copies of the data JARs by eliminating redundancy.
For recovering purposes, logger components are required to send error correction information to the harmonizer after writing each log record. Therefore, logger components always ping the harmonizer before they grant any access right. If the harmonizer is not reachable, the logger components will deny all access. In this way, the harmonizer helps prevent attacks which attempt to keep the data JARs offline for unnoticed usage. If the attacker took the data JAR offline after the harmonizer was pinged, the harmonizer still has the error correction information about this access and will quickly notice the missing record.
In case of corruption of JAR files, the harmonizer will recover the logs with the aid of Reed-Solomon error correction code [45]. Specifically, each individual logging JAR, when created, contains a Reed-Solomon-based encoder. For every \( n \) symbols in the log file, \( n \) redundancy symbols are added to the log harmonizer in the form of bits. This creates an error correcting code of size \( 2n \) and allows the error correction to detect and correct \( n \) errors. We choose the Reed-Solomon code as it achieves the equality in the Singleton Bound [36], making it a maximum distance separable code and hence leads to an optimal error correction.
The log harmonizer is located at a known IP address. Typically, the harmonizer resides at the user’s end as part of his local machine, or alternatively, it can either be stored in a user’s desktop or in a proxy server.
5.3.2 Log Correctness
For the logs to be correctly recorded, it is essential that the JRE of the system on which the logger components are running remain unmodified. To verify the integrity of the logger component, we rely on a two-step process: 1) we repair the JRE before the logger is launched and any kind of access is given, so as to provide guarantees of integrity of the JRE. 2) We insert hash codes, which calculate the hash values of the program traces of the modules being executed by the logger component. This helps us detect modifications of the JRE once the logger component has been launched, and are useful to verify if the original code flow of execution is altered.
These tasks are carried out by the log harmonizer and the logger components in tandem with each other. The log
harmonizer is solely responsible for checking the integrity of the JRE on the systems on which the logger components exist before the execution of the logger components is started. Trusting this task to the log harmonizer allows us to remotely validate the system on which our infrastructure is working. The repair step is itself a two-step process where the harmonizer first recognizes the Operating System being used by the cloud machine and then tries to reinstall the JRE. The OS is identified using nmap commands. The JRE is reinstalled using commands such as sudo apt install for Linux-based systems or $ <jre>.exe [lang=] [s] [IEXPLORE=1] [MOZILLA=1] [INSTALLDIR=:] [STATIC=1] for Windows-based systems.
The logger and the log harmonizer work in tandem to carry out the integrity checks during runtime. These integrity checks are carried out using oblivious hashing [11]. OH works by adding additional hash codes into the programs being executed. The hash function is initialized at the beginning of the program, the hash value of the result variable is cleared and the hash value is updated every time there is a variable assignment, branching, or looping. An example of how the hashing transforms the code is shown in Fig. 3.
As shown, the hash code captures the computation results of each instruction and computes the oblivious-hash value as the computation proceeds. These hash codes are added to the logger components when they are created. They are present in both the inner and outer JARs. The log harmonizer stores the values for the hash computations. The values computed during execution are sent to it by the logger components. The log harmonizer proceeds to match these values against each other to verify if the JRE has been tampered with. If the JRE is tampered, the execution values will not match. Adding OH to the logger components also adds an additional layer of security to them in that any tampering of the logger components will also result in the OH values being corrupted.
6 END-TO-END AUDITING MECHANISM
In this section, we describe our distributed auditing mechanism including the algorithms for data owners to query the logs regarding their data.
6.1 Push and Pull Mode
To allow users to be timely and accurately informed about their data usage, our distributed logging mechanism is complemented by an innovative auditing mechanism. We support two complementary auditing modes: 1) push mode; 2) pull mode.
Push mode. In this mode, the logs are periodically pushed to the data owner (or auditor) by the harmonizer. The push action will be triggered by either type of the following two events: one is that the time elapses for a certain period according to the temporal timer inserted as part of the JAR file; the other is that the JAR file exceeds the size stipulated by the content owner at the time of creation. After the logs are sent to the data owner, the log files will be dumped, so as to free the space for future access logs. Along with the log files, the error correcting information for those logs is also dumped.
This push mode is the basic mode which can be adopted by both the PureLog and the AccessLog, regardless of whether there is a request from the data owner for the log files. This mode serves two essential functions in the logging architecture: 1) it ensures that the size of the log files does not explode and 2) it enables timely detection and correction of any loss or damage to the log files.
Concerning the latter function, we notice that the auditor, upon receiving the log file, will verify its cryptographic guarantees, by checking the records’ integrity and authenticity. By construction of the records, the auditor, will be able to quickly detect forgery of entries, using the checksum added to each and every record.
Pull mode. This mode allows auditors to retrieve the logs anytime when they want to check the recent access to their own data. The pull message consists simply of an FTP pull command, which can be issues from the command line. For naive users, a wizard comprising a batch file can be easily built. The request will be sent to the harmonizer, and the user will be informed of the data’s locations and obtain an integrated copy of the authentic and sealed log file.
6.2 Algorithms
Pushing or pulling strategies have interesting tradeoffs. The pushing strategy is beneficial when there are a large number of accesses to the data within a short period of time. In this case, if the data are not pushed out frequently enough, the log file may become very large, which may increase cost of operations like copying data (see Section 8). The pushing mode may be preferred by data owners who are organizations and need to keep track of the data usage consistently over time. For such data owners, receiving the logs automatically can lighten the load of the data analyzers. The maximum size at which logs are pushed out is a parameter which can be easily configured while creating the logger component. The pull strategy is most needed when the data owner suspects some misuse of his data; the pull mode allows him to monitor the usage of his content immediately. A hybrid strategy can actually be implemented to benefit of the consistent information offered by pushing mode and the convenience of the pull mode. Further, as discussed in Section 7, supporting both pushing and pulling modes helps protecting from some nontrivial attacks.
The log retrieval algorithm for the Push and Pull modes is outlined in Fig. 4.
1. Let TS(NTP) be the network time protocol timestamp
2. pull := 0
3. \( rec := \{UID, OID, AccessType, Result, Time, Loc\} \)
4. \( \text{curtime} := \text{TS}(\text{NTP}) \)
5. \( \text{lsizen} := \text{sizeof}(\log) \) // current size of the log
6. if \( (\text{curtime} - \text{tbeg} < \text{time}) \) &&
\( (\text{lsizen} < \text{size}) \) \&\& (pull == 0) then
7. \( \log := \log + \text{ENCRYPT}(rec) \) // ENCRYPT
is the encryption function used to encrypt the record
8. if PING to CJAR // send a PING to the harmonizer to check if it is alive
9. PUSH RS(rec) // write the error correcting bits
10. else
11. EXIT(1) // error if no PING is received
12. end if
13. end if
14. if \( (\text{curtime} - \text{tbeg} > \text{time}) \) \&\& \( (\text{lsizen} >= \text{size}) \) \&\& (pull \neq 0) then
15. // Check if PING is received
16. if PING-CJAR then
17. PUSH log // write the log file to the harmonizer
18. RS(log) := NULL // reset the error correction records
19. \( \text{tbeg} := \text{TS}(\text{NTP}) \) // reset the \( \text{tbeg} \) variable
20. pull := 0
21. else
22. EXIT(1) // error if no PING is received
23. end if
24. end if
Fig. 4. Push and pull PureLog mode.
The log retrieval algorithm for the Push and Pull modes is outlined in Fig. 4.
The algorithm presents logging and synchronization steps with the harmonizer in case of PureLog. First, the algorithm checks whether the size of the JAR has exceeded a stipulated size or the normal time between two consecutive dumps has elapsed. The size and time threshold for a dump are specified by the data owner at the time of creation of the JAR. The algorithm also checks whether the data owner has requested a dump of the log files. If none of these events has occurred, it proceeds to encrypt the record and write the error-correction information to the harmonizer.
The communication with the harmonizer begins with a simple handshake. If no response is received, the log file records an error. The data owner is then alerted through e-mails, if the JAR is configured to send error notifications. Once the handshake is completed, the communication with the harmonizer proceeds, using a TCP/IP protocol. If any of the aforementioned events (i.e., there is request of the log file, or the size or time exceeds the threshold) has occurred, the JAR simply dumps the log files and resets all the variables, to make space for new records.
In case of AccessLog, the above algorithm is modified by adding an additional check after step 6. Precisely, the AccessLog checks whether the CSP accessing the log satisfies all the conditions specified in the policies pertaining to it. If the conditions are satisfied, access is granted; otherwise, access is denied. Irrespective of the access control outcome, the attempted access to the data in the JAR file will be logged.
Our auditing mechanism has two main advantages. First, it guarantees a high level of availability of the logs. Second, the use of the harmonizer minimizes the amount of workload for human users in going through long log files sent by different copies of JAR files. For a better understanding of the auditing mechanism, we present the following example.
Example 5. With reference to Example 1, Alice can specify that she wants to receive the log files once every week, as it will allow her to monitor the accesses to her photographs. Under this setting, once every week the JAR files will communicate with the harmonizer by pinging it. Once the ping is successful, the file transfer begins. On receiving the files, the harmonizer merges the logs and sends them to Alice. Besides receiving log information once every week, Alice can also request the log file anytime when needed. In this case, she just needs to send her pull request to the harmonizer which will then ping all the other JARs with the “pull” variable to 1. Once the message from the harmonizer is received, the JARs start transferring the log files back to the harmonizer.
7 Security Discussion
We now analyze possible attacks to our framework. Our analysis is based on a semihonest adversary model by assuming that a user does not release his master keys to unauthorized parties, while the attacker may try to learn extra information from the log files. We assume that attackers may have sufficient Java programming skills to disassemble a JAR file and prior knowledge of our CIA architecture. We first assume that the JVM is not corrupted, followed by a discussion on how to ensure that this assumption holds true.
7.1 Copying Attack
The most intuitive attack is that the attacker copies entire JAR files. The attacker may assume that doing so allows accessing the data in the JAR file without being noticed by the data owner. However, such attack will be detected by our auditing mechanism. Recall that every JAR file is required to send log records to the harmonizer. In particular, with the push mode, the harmonizer will send the logs to data owners periodically. That is, even if the data
owner is not aware of the existence of the additional copies of its JAR files, he will still be able to receive log files from all existing copies. If attackers move copies of JARs to places where the harmonizer cannot connect, the copies of JARs will soon become inaccessible. This is because each JAR is required to write redundancy information to the harmonizer periodically. If the JAR cannot contact the harmonizer, the access to the content in the JAR will be disabled. Thus, the logger component provides more transparency than conventional log files encryption; it allows the data owner to detect when an attacker has created copies of a JAR, and it makes offline files unaccessible.
### 7.2 Disassembling Attack
Another possible attack is to disassemble the JAR file of the logger and then attempt to extract useful information out of it or spoil the log records in it. Given the ease of disassembling JAR files, this attack poses one of the most serious threats to our architecture. Since we cannot prevent an attacker to gain possession of the JARs, we rely on the strength of the cryptographic schemes applied to preserve the integrity and confidentiality of the logs.
Once the JAR files are disassembled, the attacker is in possession of the public IBE key used for encrypting the log files, the encrypted log file itself, and the *.class files. Therefore, the attacker has to rely on learning the private key or subverting the encryption to read the log records.
To compromise the confidentiality of the log files, the attacker may try to identify which encrypted log records correspond to his actions by mounting a chosen plaintext attack to obtain some pairs of encrypted log records and plain texts. However, the adoption of the Weil Pairing algorithm ensures that the CIA framework has both chosen ciphertext security and chosen plaintext security in the random oracle model [4]. Therefore, the attacker will not be able to decrypt any data or log files in the disassembled JAR file. Even if the attacker is an authorized user, he can only access the actual content file but he is not able to decrypt any other data including the log files which are viewable only to the data owner. From the disassembled JAR files, the attackers are not able to directly view the access control policies either, since the original source code is not included in the JAR files. If the attacker wants to infer access control policies, the only possible way is through analyzing the log file. This is, however, very hard to accomplish since, as mentioned earlier, log records are encrypted and breaking the encryption is computationally hard.
Also, the attacker cannot modify the log files extracted from a disassembled JAR. Would the attacker erase or tamper a record, the integrity checks added to each record of the log will not match at the time of verification (see Section 5.2 for the record structure and hash chain), revealing the error. Similarly, attackers will not be able to write fake records to log files without going undetected, since they will need to sign with a valid key and the chain of hashes will not match. The Reed-Solomon encoding used to create the redundancy for the log files, the log harmonizer can easily detect a corrupted record or log file.
Finally, the attacker may try to modify the Java classloader in the JARs in order to subvert the class files when they are being loaded. This attack is prevented by the sealing techniques offered by Java. Sealing ensures that all packages within the JAR file come from the same source code [27]. Sealing is one of the Java properties, which allows creating a signature that does not allow the code inside the JAR file to be changed. More importantly, this attack is stopped as the JARs check the classloader each time before granting any access right. If the classloader is found to be a custom classloader, the JARs will throw an exception and halt. Further, JAR files are signed for integrity at the time of creation, to avoid that an attacker writes to the JAR. Even if an attacker can read from it by disassembling it—he cannot “reassemble” it with modified packages. In case the attacker guesses or learns the data owner’s key from somewhere, all the JAR files using the same key will be compromised. Thus, using different IBE key pairs for different JAR files will be more secure and prevent such attack.
### 7.3 Man-in-the-Middle Attack
An attacker may intercept messages during the authentication of a service provider with the certificate authority, and replay the messages in order to masquerade as a legitimate service provider. There are two points in time that the attacker can replay the messages. One is after the actual service provider has completely disconnected and ended a session with the certificate authority. The other is when the actual service provider is disconnected but the session is not over, so the attacker may try to renegotiate the connection. The first type of attack will not succeed since the certificate typically has a time stamp which will become obsolete at the time point of reuse. The second type of attack will also fail since renegotiation is banned in the latest version of OpenSSL and cryptographic checks have been added.
### 7.4 Compromised JVM Attack
An attacker may try to compromise the JVM.
To quickly detect and correct these issues, we discussed in Section 5.3 how to integrate oblivious hashing to guarantee the correctness of the JRE [11] and how to correct the JRE prior to execution, in case any error is detected. OH adds hash code to capture the computation results of each instruction and computes the oblivious-hash value as the computation proceeds. These two techniques allow for a first quick detection of errors due to malicious JVM, therefore mitigating the risk of running subverted JARs. To further strengthen our solution, one can extend OH usage to guarantee the correctness of the class files loaded by the JVM.
### 8 Performance Study
In this section, we first introduce the settings of the test environment and then present the performance study of our system.
#### 8.1 Experimental Settings
We tested our CIA framework by setting up a small cloud, using the Emulab testbed [42]. In particular, the test environment consists of several OpenSSL-enabled servers:
one head node which is the certificate authority, and several computing nodes. Each of the servers is installed with Eucalyptus [41]. Eucalyptus is an open source cloud implementation for Linux-based systems. It is loosely based on Amazon EC2, therefore bringing the powerful functionalities of Amazon EC2 into the open source domain. We used Linux-based servers running Fedora 10 OS. Each server has a 64-bit Intel Quad Core Xeon E5530 processor, 4 GB RAM, and a 500 GB Hard Drive. Each of the servers is equipped to run the OpenJDK runtime environment with IcedTea6 1.8.2.
8.2 Experimental Results
In the experiments, we first examine the time taken to create a log file and then measure the overhead in the system. With respect to time, the overhead can occur at three points: during the authentication, during encryption of a log record, and during the merging of the logs. Also, with respect to storage overhead, we notice that our architecture is very lightweight, in that the only data to be stored are given by the actual files and the associated logs. Further, JAR act as a compressor of the files that it handles. In particular, as introduced in Section 3, multiple files can be handled by the same logger component. To this extent, we investigate whether a single logger component, used to handle more than one file, results in storage overhead.
8.2.1 Log Creation Time
In the first round of experiments, we are interested in finding out the time taken to create a log file when there are entities continuously accessing the data, causing continuous logging. Results are shown in Fig. 5. It is not surprising to see that the time to create a log file increases linearly with the size of the log file. Specifically, the time to create a 100 Kb file is about 114.5 ms while the time to create a 1 MB file averages at 731 ms. With this experiment as the baseline, one can decide the amount of time to be specified between dumps, keeping other variables like space constraints or network traffic in mind.
8.2.2 Authentication Time
The next point that the overhead can occur is during the authentication of a CSP. If the time taken for this authentication is too long, it may become a bottleneck for accessing the enclosed data. To evaluate this, the head node issued OpenSSL certificates for the computing nodes and we measured the total time for the OpenSSL authentication to be completed and the certificate revocation to be checked. Considering one access at the time, we find that the authentication time averages around 920 ms which proves that not too much overhead is added during this phase. As of present, the authentication takes place each time the CSP needs to access the data. The performance can be further improved by caching the certificates.
The time for authenticating an end user is about the same when we consider only the actions required by the JAR, viz. obtaining a SAML certificate and then evaluating it. This is because both the OpenSSL and the SAML certificates are handled in a similar fashion by the JAR. When we consider the user actions (i.e., submitting his username to the JAR), it averages at 1.2 minutes.
8.2.3 Time Taken to Perform Logging
This set of experiments studies the effect of log file size on the logging performance. We measure the average time taken to grant an access plus the time to write the corresponding log record. The time for granting any access to the data items in a JAR file includes the time to evaluate and enforce the applicable policies and to locate the requested data items.
In the experiment, we let multiple servers continuously access the same data JAR file for a minute and recorded the number of log records generated. Each access is just a view request and hence the time for executing the action is negligible. As a result, the average time to log an action is about 10 seconds, which includes the time taken by a user to double click the JAR or by a server to run the script to open the JAR. We also measured the log encryption time which is about 300 ms (per record) and is seemingly unrelated from the log size.
8.2.4 Log Merging Time
To check if the log harmonizer can be a bottleneck, we measure the amount of time required to merge log files. In this experiment, we ensured that each of the log files had 10 to 25 percent of the records in common with one other. The exact number of records in common was random for each repetition of the experiment. The time was averaged over 10 repetitions. We tested the time to merge up to 70 log files of 100 KB, 300 KB, 500 KB, 700 KB, 900 KB, and 1 MB each. The results are shown in Fig. 6. We can observe that the time increases almost linearly to the number of files and size of files, with the least time being taken for merging two 100 KB log files at 59 ms, while the time to merge 70 1 MB files was 2.35 minutes.
research is aimed at providing software tamper resistance to the notion of a secure JVM [18] being developed by IBM. This example, we will investigate whether it is possible to leverage integrity of the JRE and the authentication of JARs [23]. For commands remain constant.
This time is found to average around 9 ms. The number of hash commands varies based on the size of the code in the installation/repair process, and by the time taken for the compression provided by JAR files, the size of the logger is dictated by the size of the largest files it contains. Notice that we purposely did not include large log files (less than 5 KB), so as to focus on the overhead added by having multiple content files in a single JAR. Results are in Fig. 7.
8.2.5 Size of the Data JAR Files
Finally, we investigate whether a single logger, used to handle more than one file, results in storage overhead. We measure the size of the loggers (JARs) by varying the number and size of data items held by them. We tested the increase in size of the logger containing 10 content files (i.e., images) of the same size as the file size increases. Intuitively, in case of larger size of data items held by a logger, the overall logger also increases in size. The size of logger grows from 3,500 to 4,035 KB when the size of content items changes from 200 KB to 1 MB. Overall, due to the compression provided by JAR files, the size of the logger is dictated by the size of the largest files it contains. We proposed innovative approaches for automatically indexing policies for text files, usage control for executables, and generic accountability and provenance controls.
Fig. 7. Size of the logger component.
8.2.6 Overhead Added by JVM Integrity Checking
We investigate the overhead added by both the JRE installation/repair process, and by the time taken for computation of hash codes.
The time taken for JRE installation/repair averages around 6,500 ms. This time was measured by taking the system time stamp at the beginning and end of the installation/repair.
To calculate the time overhead added by the hash codes, we simply measure the time taken for each hash calculation. This time is found to average around 9 ms. The number of hash commands varies based on the size of the code in the log files. When the size does not change with the content, the number of hash commands remain constant.
9 Conclusion and Future Research
We proposed innovative approaches for automatically logging any access to the data in the cloud together with an auditing mechanism. Our approach allows the data owner to not only audit his content but also enforce strong back-end protection if needed. Moreover, one of the main features of our work is that it enables the data owner to audit even those copies of its data that were made without his knowledge.
In the future, we plan to refine our approach to verify the integrity of the JRE and the authentication of JARs [23]. For example, we will investigate whether it is possible to leverage the notion of a secure JVM [18] being developed by IBM. This research is aimed at providing software tamper resistance to Java applications. In the long term, we plan to design a comprehensive and more generic object-oriented approach to facilitate autonomous protection of traveling content. We would like to support a variety of security policies, like indexing policies for text files, usage control for executables, and generic accountability and provenance controls.
References
Sundareswaran et al.: Ensuring Distributed Accountability for Data Sharing in the Cloud
Smitha Sundareswaran received the bachelor’s degree in electronics and communications engineering in 2005 from Jawaharlal Nehru Technological University, Hyderabad, India. She is currently working toward the PhD degree in the College of Information Sciences and Technology at the Pennsylvania State University. Her research interests include policy formulation and management for Distributed computing architectures.
Anna C. Squicciarini received the PhD degree in computer science from the University of Milan, Italy, in 2006. She is an assistant professor at the College of Information Science and Technology at the Pennsylvania State University. During the years of 2006-2007, she was a postdoctoral research associate at Purdue University. Her main interests include access control for distributed systems, privacy, security for Web 2.0 technologies and grid computing. She is the author or coauthor of more than 50 articles published in refereed journals, and in proceedings of international conferences and symposia. She is a member of the IEEE.
Dan Lin received the PhD degree from National University of Singapore in 2007. She is an assistant professor at Missouri University of Science and Technology. She was a postdoctoral research associate from 2007 to 2008. Her research interests cover many areas in the fields of database systems and information security.
For more information on this or any other computing topic, please visit our Digital Library at www.computer.org/publications/dlib.
|
{"Source-Url": "http://www.kresttechnology.com/krest-academic-projects/krest-mtech-projects/CSE/java-cloud%20computing2013-2014/cloud%20computing/24/7.Ensuring%20distributed%20accountability%20for%20data%20sharing%20in%20the%20cloud.pdf", "len_cl100k_base": 13466, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 40650, "total-output-tokens": 16978, "length": "2e13", "weborganizer": {"__label__adult": 0.0003638267517089844, "__label__art_design": 0.000579833984375, "__label__crime_law": 0.0010700225830078125, "__label__education_jobs": 0.0033397674560546875, "__label__entertainment": 0.00011932849884033204, "__label__fashion_beauty": 0.0002117156982421875, "__label__finance_business": 0.0007219314575195312, "__label__food_dining": 0.0003027915954589844, "__label__games": 0.0007328987121582031, "__label__hardware": 0.002582550048828125, "__label__health": 0.0007109642028808594, "__label__history": 0.0004529953002929687, "__label__home_hobbies": 0.00016987323760986328, "__label__industrial": 0.0005726814270019531, "__label__literature": 0.0003635883331298828, "__label__politics": 0.00044345855712890625, "__label__religion": 0.00043582916259765625, "__label__science_tech": 0.32958984375, "__label__social_life": 0.00016748905181884766, "__label__software": 0.040283203125, "__label__software_dev": 0.61572265625, "__label__sports_fitness": 0.00022530555725097656, "__label__transportation": 0.0005865097045898438, "__label__travel": 0.00018405914306640625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 72896, 0.03865]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 72896, 0.25213]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 72896, 0.91148]], "google_gemma-3-12b-it_contains_pii": [[0, 3676, false], [3676, 10168, null], [10168, 16367, null], [16367, 22695, null], [22695, 25829, null], [25829, 32327, null], [32327, 38650, null], [38650, 44076, null], [44076, 49163, null], [49163, 55489, null], [55489, 60347, null], [60347, 67787, null], [67787, 72896, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3676, true], [3676, 10168, null], [10168, 16367, null], [16367, 22695, null], [22695, 25829, null], [25829, 32327, null], [32327, 38650, null], [38650, 44076, null], [44076, 49163, null], [49163, 55489, null], [55489, 60347, null], [60347, 67787, null], [67787, 72896, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 72896, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 72896, null]], "pdf_page_numbers": [[0, 3676, 1], [3676, 10168, 2], [10168, 16367, 3], [16367, 22695, 4], [22695, 25829, 5], [25829, 32327, 6], [32327, 38650, 7], [38650, 44076, 8], [44076, 49163, 9], [49163, 55489, 10], [55489, 60347, 11], [60347, 67787, 12], [67787, 72896, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 72896, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
813516ddb81ee9b98af3dbd82bb009d6d8404054
|
ABSTRACT
While problem solving is a crucial aspect of programming, few learning opportunities in computer science focus on teaching problem-solving skills like planning. In this paper, we present Pyrus, a collaborative game designed to encourage novices to plan in advance while programming. Through Pyrus, we explore a new approach to designing educational games we call behavior-centered game design, in which designers first identify behaviors that learners should practice to reach desired learning goals and then select game mechanics that incentivize those behaviors. Pyrus leverages game mechanics like a failure condition, distributed resources, and enforced turn-taking to encourage players to plan and collaborate. In a within-subjects user study, we found that pairs of novices spent more time planning and collaborated more equally when solving problems in Pyrus than in pair programming. These findings show that game mechanics can be used to promote desirable learning behaviors like planning in advance, and suggest that our behavior-centered approach to educational game design warrants further study.
CCS CONCEPTS
- Human-centered computing → Collaborative and social computing systems and tools;
KEYWORDS
Educational Games, Collaborative Learning, CS Education, Problem Solving, Behavior-Centered Game Design
The first and second authors contributed equally to this paper
work slowly and inefficiently, suggesting a need for further improvement. Researchers have concluded that, unlike experts, novices often struggle to develop the skills needed to execute the problem-solving process effectively. Research on novice programmers has repeatedly shown that, unlike experts, novices do not practice problem-solving behaviors such as planning in advance [37, 54, 60, 63].
A variety of approaches have been explored to help novices develop problem-solving skills, including scaffolding problem solving [39, 40, 50], a new CS curriculum that focuses on problem-solving skills [34], and interventions that aim to help novices when they get stuck [12, 28, 49]. For example, Loksa et al. found that students have higher self-efficacy and metacognitive awareness when provided a list of problem-solving stages as guidance [40]. Linn et al. found that expert commentaries on the process of solving complex programming problems can help students learn problem-solving skills [39]. Cao et al. provide context-sensitive suggestions to help stuck users through an “Idea Garden” [12].
We contribute to research by exploring a new approach for encouraging novices to practice problem solving. Rather than directly teaching or scaffolding the problem-solving process, Pyrus uses game mechanics to incentivize behaviors like planning in advance, deepening our understanding of different approaches for teaching problem solving.
**Collaborative Learning**
Collaborative learning has been studied extensively in the learning sciences [30], and research shows that the active discussion of ideas in small groups can promote both engagement and critical thinking [26]. However, empirical studies show that not all collaborative learning experiences are equally effective [20]. Dillenbourg and Schneider identified a set of mechanisms that make collaborations effective, including explaining reasoning, considering and comparing different approaches, and regulating mutual understanding of a problem [21]. Researchers have explored a variety of methods for structuring collaborative activities to foster effective collaborations, often referred to as scripts [20]. One common scripting approach, known as Jigsaw, gives each member of a group exclusive access to a necessary part of the problem solution to prevent any individual from being able to solve the problem alone [4]. Jigsaw has been used to foster collaboration in a variety of learning environments, including cognitive tutoring systems [46] and CS1 labs [56].
In computer science education, the most widely adopted method of introducing collaboration is through pair programming [44, 45, 62], a core tenant of the eXtreme Programming methodology [6]. In pair programming, two partners work together to solve programming problems: the *pilot* controls the keyboard and implements the solution, while the *co-pilot* checks the implemented code for errors and bugs. Research shows that pair programming helps students solve problems more quickly and effectively [62] and that novices who pair...
program are more confident in their work than students who do not pair [45]. However, pair programming is not strongly scripted and the pilot and co-pilot roles can break down in practice [13]. We are not aware of any research that aims to re-design the pair programming experience to improve its collaborative effectiveness.
We contribute to this body of research by exploring a new method of structuring collaboration between pairs of programmers by using game mechanics to directly incentivize effective collaborative behaviors. Pyrus’ design builds on scripting and Jigsaw by using mechanics like enforced turn-taking and distributed resources to promote collaboration.
Educational Games
An extensive body of prior work has investigated the potential of teaching programming through educational games. Researchers and practitioners have designed games that teach language constructs [3, 22], computational thinking and algorithm design [9, 29, 31, 33, 47], and program comprehension via testing and debugging [8, 38]. For example, in Wu’s Castle, novice programmers learn language constructs by writing arrays and loops to accomplish game goals [22], and in Code Hunt, players practice debugging skills by solving puzzles given only clues and test cases for the target algorithm [8]. While games have been developed to teach various aspects of programming, we are not aware of any that specifically promote planning during problem solving.
Researchers agree that educational games have potential to support learning [14, 25, 42], but we still have a limited understanding of how to integrate learning theory into games to create effective educational experiences. As a result, researchers have created resources to help educational game designers. All three et. al. developed a framework that combines learning objectives and instructional design principles with game mechanics, dynamics, and aesthetics [1]. Culyba developed a framework for designing transformational games that change their players by identifying desired transformations (e.g. learning outcomes) and the barriers that inhibit them [17]. Wendel et. al. present a set of requirements for the design of collaborative learning games [61]. These approaches focus on identifying high-level instructional principles and game affordances that could support learning, rather than understanding how to choose low-level game mechanics based on a stated behavioral goal.
In this work, we explore a new behavior-centered game design approach that focuses on identifying game mechanics that will promote desirable low-level learning behaviors. We see this approach as one which could compliment higher-level educational game design frameworks. Some educational games have been developed around stated behavioral goals: “brain points” were added to the math game Refraction to promote persistence and strategy [53], and GrACE teaches abstraction and algorithms by encouraging players to practice stepwise thinking [31]. We build on this work by formalizing the behavior-centered approach to game design.
3 BEHAVIOR-CENTERED GAME DESIGN
Our goal in this work is to design a game that encourages novice programmers to practice more programming problem-solving behaviors, in particular planning. To achieve this goal, we explore a new approach for creating educational games that we call behavior-centered game design. The key insight of this approach is that the game designer identifies specific low-level behaviors they want players to practice in order to reach high-level learning objectives, rather than targeting a high-level learning objective outright.
Specifically, behavior-centered game design is a process through which game designers (1) identify the obstacles learners face in reaching a learning outcome, (2) identify the behaviors that learners need to practice to reach the learning outcome, and (3) select game mechanics that will directly incentivize those behaviors to overcome the obstacles. These game mechanics are then assembled into a playable game which can be evaluated for its effectiveness in encouraging the targeted behaviors. For Pyrus, our desired learning outcome was for novice programmers to develop programming problem-solving skills, in part through effective collaboration. We detail our behavior-centered approach below.
Problem-solving
Obstacle: novice programmers struggle with the problem-solving process. Novices commonly make errors during problem solving and omit early stages of the process such as planning [36, 43, 58]. Without planning in advance, novices may work toward a solution with a poor understanding of the problem, which leads to mistakes like writing unnecessary code or not accounting for edge cases [23]. These types of errors are notoriously difficult for novices to debug [2]. In addition, a lack of planning means that novices are decomposing problems and composing solutions simultaneously, two steps which are already independently challenging [36]. These difficulties are present even when novices work in pairs [27].
Behavior: plan solutions in advance. To address these difficulties, we focused on incentivizing planning. In the planning stage of problem-solving, programmers devise a potential solution and evaluate it through processes like mental simulation or writing pseudocode. While programmers should plan before implementing a solution, the programming problem-solving process is not strictly ordinal and programmers may revisit the planning stage after implementing a partial solution. Our goal was to encourage planning in general, and planning in advance of implementation in particular.
Mechanics: discrete actions, distributed resources, and a failure condition. To encourage novices to plan their solutions in advance, we included three game mechanics in Pyrus that make it difficult for players to write programs without planning: discrete actions, distributed resources, and a failure condition. In Pyrus, players make progress by executing discrete actions, which are the building blocks that players use to compose their solutions. We hypothesized that discretizing the act of writing code using actions would encourage novices to work more deliberately, resulting in more planning. In addition, some programming constructs that players need to solve problems are only available if they possess the corresponding distributed resources, which are non-transferable and can only be used by the player who holds the resource. We hypothesized that distributed resources would encourage players to plan while determining how to use their respective resources most effectively. Finally, the number of actions a pair can execute is limited by a failure condition; if players do not solve the problem in a given number of actions, they lose the game. We hypothesized that the failure condition would create urgency and encourage players to use their discrete actions and distributed resources deliberately to avoid losing.
Collaboration
Obstacle: novices fail to collaborate effectively. While collaboration could encourage novices to discuss their ideas and plan more effectively, research on collaborative learning shows that people often struggle to collaborate without support. According to Dillenbourg et al., collaborative tasks must be structured to encourage effective interactions if they are to succeed [20, 21]. While pair programming is commonly employed in introductory computer science courses, this practice does not provide enough structure to guarantee effective collaboration. Partners are encouraged to take on the pilot and co-pilot roles, but these roles are not structured and switching between roles is often unenforced. In practice, research shows that the roles sometimes break down entirely, with little or no distinction between partners [13]. Furthermore, since partners take on the roles ad hoc, there is nothing to stop the pilot from dominating the conversation and implementation, leaving the co-pilot with nothing to do. Pairs are free to practice pre-established bad programming habits, even if they do so collaboratively.
Behavior: participate equally in problem solving. In the context of programming, we define an “effective collaboration” as one in which both partners are actively involved in discussing, designing, and implementing the solution to a problem. Rather than allowing one partner dominate by spending more time in the pilot role, we would like to see both partners participate equally in the problem-solving process and in particular the construction of the solution.
Mechanics: enforced turn-taking and distributed resources. To encourage players to participate equally in problem solving, we included two game mechanics in Pyrus that foster positive interdependence, or the feeling that teammates are reliant on each other’s success as part of their own [65]: enforced turn-taking and distributed resources. In Pyrus, enforced turn-taking forces players to switch roles at regular intervals. Furthermore, turns are designed to be short enough to keep both players actively engaged. We hypothesized that frequent turn-taking would encourage equal participation. In addition, distributed resources provide each player with only some of the programming constructs required to solve a problem. While turns enforce a division of labor, resources enforce a division of responsibilities. We hypothesized that making it impossible for one partner to succeed without the help of their teammate would encourage partners to discuss and coordinate, resulting in equal participation.
4 PYRUS SYSTEM
Pyrus (Figure 1) is a two-player game in which programmers collaborate in person to solve programming problems. As in traditional programming, players write and debug real code. Unlike traditional programming, players must work with special game mechanics and rules to solve their problem. In Pyrus, two programmers work together to write a program to solve a problem specified in a prompt (§4) with the goal of passing all provided test cases (§7). Each player works on their own computer, but they write code in a shared...
At the end of each turn, the pilot draws two cards from the deck (§2). During play, each player performs up to four actions (§2). There are three types of actions (§6). The write code action allows the pilot to write a single statement to declare a variable of a primitive data type or perform an operation on existing variables (e.g., var x = 12). The consume card action allows the pilot to use a card in his or her hand to implement the corresponding construct in the editor. That card is then discarded. The discard card action allows the pilot to discard a card and draw a new one from the deck.
The pilot may elect to end his or her turn at any time (§11). At the end of each turn, the pilot draws two cards from the deck and the other player (the co-pilot) starts a new turn as the pilot. Players trade turns like this until they trigger either the win or failure condition. Players win if at any point during the game, they press the button to run their code against the test cases (§8), and all tests pass. Players fail if a player attempts to draw a card (either by using a discard action or automatically at the end of a turn) when the deck is empty. After failure, the game is reset to the start state.
In Pyrus, the cards are distributed resources. The game uses enforced turn-taking to structure play, and players perform up to four discrete actions per turn. Play ends when the players complete a challenge or when the failure condition is triggered. These four game mechanics were designed to work in concert to encourage our two target behaviors: planning in advance and equal participation in problem solving.
Representative scenario
To illustrate a game of Pyrus, consider the following example game played by novice programmers JoJo and Arby. When JoJo and Arby join the game, each player is dealt four cards and JoJo is selected as pilot. JoJo is shown the interface in Figure 1. Arby’s interface shows everything that JoJo’s does except the three action buttons and the End Turn button.
The solution requires a loop and conditional, and while JoJo has a loop card, only Arby has a conditional. They decide that JoJo should use a Write Code action to declare a variable which will keep track of the loop’s termination condition. JoJo clicks Write Code, declares a primitive variable (e.g., var count = 0;), and submits his action. Next, JoJo decides to implement his loop. He clicks Consume Card, selects his loop card, and writes for (var i = 0; i < arr.length; i++) { }. Now, both players agree that Arby should implement her conditional, so JoJo clicks End Turn. As JoJo’s turn ends, he draws two cards, and his interface displays that Arby is now pilot. Arby’s interface now gives her access to the actions, which she can use to write her conditional. After Arby consumes her conditional card to implement an if statement, JoJo and Arby decide to test their code, so JoJo clicks on the Run Code button. Their code is run against some test cases and the results are displayed below the prompt.
Play continues until JoJo and Arby deplete the deck, which triggers the failure condition and resets the game. Eventually, they pass all test cases and Arby clicks Submit Code to win.
5 STUDY DESIGN
To evaluate whether Pyrus encourages novices to plan and collaborate effectively, we conducted a within-subjects study in which pairs worked on programming challenges using both Pyrus and pair programming. The study was conducted as a half-day JavaScript bootcamp, meaning that everyone participated in the study on a single day. We were interested in evaluating (1) whether novices plan more in Pyrus, and (2) whether novices participate more equally in Pyrus.
Participants
Eighteen undergraduate students at a large private university (three female) participated in our study. They were recruited through a department mailing list. The study was presented as a bootcamp that would teach participants JavaScript fundamentals, give them an opportunity to practice solving programming challenges, and contribute to research. In order to maintain consistent level of experience, we invited students who had taken CS1 and CS2 but had no experience with JavaScript to participate. We chose this population because these students had enough experience to solve programming challenges that required problem-solving skills, and were unlikely to get stuck on syntax errors. While this population had more experience than the participants of most studies of novice programmers, we think they were appropriate for this study given our research goals. All participants provided informed consent for participation in the study, and were compensated with a $60 Amazon gift card for their time.
Procedure
First, participants were led through a 60-minute JavaScript tutorial by the authors. Next, they were split into pairs and worked on problems in one of two environments (Pyrus and
pair programming) for 45 minutes. Following this problem-solving session, they were given a 30-minute break, during which some students were interviewed and others filled out an online survey. Next, they worked on problems in the other environment for 45 minutes, followed by another 30-minute break for interviews and surveys.
**JavaScript Tutorial.** While each of our participants had programming experience, none was familiar with JavaScript. Therefore, to maintain a relatively even skill level and ensure that participants could work on JavaScript programming challenges, we first provided a 60-minute tutorial on JavaScript. This tutorial was designed by the authors and led by the first author. It covered the fundamentals of JavaScript syntax and programming constructs that participants would need to complete the challenges, including variables, built-in methods, and control-flow structures. Students practiced writing code in exercises during the tutorial, and all authors were available to answer questions.
**Problem-Solving Sessions.** We counterbalanced the problem-solving sessions to reduce order effects. After the tutorial, participants were randomly split into two groups and paired, such that one group had five pairs and the other had four. Participants in the first group solved problems in pair programming during the first session and Pyrus during the second, while participants in the second group did the opposite.
Before each programming session, one of the researchers described the environment that the participants would be working with. For pair programming, the researcher played a tutorial video explaining that the partners would work on a single computer, and that one would be the pilot while the other would be the co-pilot. The partners were told they should switch roles every 10 minutes, but this was not enforced. For Pyrus, the researcher first described the game and the game rules. Then, the researcher played a tutorial video showing an example of someone interacting with the Pyrus interface. During the problem-solving sessions, partners sat next to each other and were able to discuss freely. The researchers were available to answer any questions participants had about the two interfaces and the Pyrus game rules. We did not answer any questions about JavaScript or approaches for solving the challenges, but instructed participants to use the Internet as a resource.
To keep the two conditions as similar as possible, participants worked on the pair programming challenges in a web interface that was equivalent to Pyrus, but without any of the game mechanics (Figure 2). During pair programming, the pairs shared a single computer, rather than each typing on their own computer as in Pyrus, to emulate the traditional pair programming protocol that has been studied in educational contexts [45, 62].
We designed a sequence of four JavaScript programming problems for each of the programming sessions and participants worked on the problems in order. We sourced problems from the introductory programming website CodingBat1 and also used simplified versions of problems found in *Cracking the Coding Interview*2. The problems were ranked in difficulty based on length, complexity of the solution, and variance in possible solution approaches, and then ordered such that they increased in difficulty. Our goal was to ensure that students with higher levels of incoming skill would not finish all problems within the given 45 minute time period.
**Interviews and Surveys.** During the 30 minutes following each programming session, a subset of participants were interviewed about their experience. We interviewed ten randomly selected participants after the first session, and eight of those same participants after the second session (fewer due to a limited availability of researchers). We followed a semi-structured interviewing protocol, asking questions about the pair programming experience (e.g. “How did you behave when it was your partner’s turn to write the code?”) and the Pyrus experience (e.g. “How did you decide what actions you would take on your turn?”). At the end of the second interview, we also asked questions that encouraged participants to directly compare the two experiences (e.g. “Compare and contrast your experience pair programming in the normal editor and in Pyrus. Did you approach solving challenges differently?”). All participants who were not interviewed by a researcher completed online surveys that asked the same questions as the semi-structured interviews.
6 DATA ANALYSIS
We collected data from a variety of sources during our study, and apply both qualitative and quantitative approaches to analyze that data. Below we present each of our data sources, along with our measures and analysis process for each.
1https://codingbat.com/java
2http://www.crackingthecodinginterview.com/
Programming Session Transcripts
During programming sessions, we recorded audio of the dialog between pairs as they worked to solve problems, with the goal of learning about their problem-solving process and the quality of their collaborations. The resulting 13.5 hours of audio data were transcribed, with an associated timestamp for every line in the transcript. To address our research questions, we focused on understanding whether Pyrus encourages novices to plan in advance and participate equally in problem solving, as per our definition of effective collaboration. We developed a coding scheme that allowed us to quantify the amount of time pairs spent engaged in different stages of the problem-solving process, as well as the amount of time each partner spent in the pilot role.
To build our codebook for problem-solving stages, we took a deductive approach [48]. We first created codes for four problem-solving stages that have been identified as part of the programming process in the literature [19, 40]: (1) understanding the problem, (2) planning the solution, (3) implementing the solution, and (4) debugging the solution. We then amended and refined our codebook by following a data-driven inductive process [48]. We completed a comprehensive reading of the transcript files, refined our code definitions, and identified three additional problem-solving stages specific to the Pyrus environment: (5) planning around Pyrus, (6) interacting with Pyrus, and (7) implementing outside of IDE. Each line of the transcript was coded with at most one problem-solving stage, and the stages could occur in any order. Two authors worked together to code a training set of data, and met to resolve any conflicts and iteratively refine the codebook definitions. Then they independently coded 11% of the data (two transcripts) and achieved an inter-rater reliability Cohen’s kappa 0.85 (values above 0.8 are considered “almost perfect” [15, 35]). The two authors then divided up and independently coded the rest of the transcripts.
In addition to coding each line in the transcript with a problem-solving stage, the authors also coded which partner was in the pilot role for each line. In pair programming, where the partners shared one computer, the pilot was the person who was typing. In Pyrus, where the partners each worked on their own laptop, the pilot was the person whose turn it was in the game. It was straightforward to determine who was the pilot from the audio data; participants talked about switching roles, and typing was audible.
Once the transcripts were coded, we used a quantitative approach to analyze the data. To capture the pairs’ problem-solving process, we calculated the amount of time they spent in each problem-solving stage using the timestamps in the transcripts. To measure planning in advance, we also calculated the amount of time spent in the “planning the solution” stage before the first “implementing the solution” stage. To capture the equality of pairs’ collaboration, we calculated the amount of time each partner spent in the pilot role. For all of these measures, we computed time using the timestamps associated with each line in the transcript. We chose to analyze time rather than counts of coded statements to ensure that our analysis would not inflate code counts for the pairs who were more talkative. We used a repeated measures ANOVA to analyze these within-subjects measures.
Programming Session Log Data
During the programming sessions we also collected log data from the Pyrus and pair programming interfaces. For each problem that the pair worked on we recorded their code in the editor every 30 seconds and the number of test cases the pair passed when they ran their code or submitted their final solution. Our goal in analyzing this data was to understand how quickly pairs wrote code and solved problems in the two conditions. We computed two measures with this data: (1) the amount of time it took for pairs to correctly pass all test cases for a problem, and (2) the number of problems solved (a problem is considered solved when all test cases have been passed). We used a repeated measures ANOVA to analyze these within-subjects measures.
Interview Transcripts and Survey Responses
Finally, we saved responses to the surveys and audio recorded interviews with participants. The resulting 3.6 hours of interview audio data were transcribed. We analyzed this data by conducting a thematic analysis [10]. All four authors began with a comprehensive reading of the transcripts to identify codes. Then we met and discussed our proposed codes to develop a codebook. Each interview was coded by one author, checked by another, and any disagreements between the two authors were discussed by all four. Our final codebook includes 73 codes, and is organized in a hierarchical structure with top-level codes such as “problem-solving process,” “collaboration roles,” and “Pyrus game mechanics.”
Two authors independently coded four interviews (22.22% of data). We used a pooled PABAK Kappa to determine inter-rater reliability, which accounts for the prevalence of codes and potential bias between observers [11, 15, 18]. Our Kappa was 0.61 (0.61–0.8 is “substantial” strength of agreement) [35]). After coding the interview and survey data, related codes were clustered into themes that represent the high-level findings from our analysis.
7 RESULTS
Our results show that Pyrus encouraged novices to plan in advance and to participate more equally, the two behavioral outcomes we targeted during the game design process. However, we also found that some participants found Pyrus frustrating and inefficient. We unpack these findings below.
**Figure 3: Amount of time pairs spent in each of the four main problem-solving stages while working on problems in Pyrus and pair programming.**
**Pyrus encouraged novices to plan in advance**
Across all three data sources, we found evidence that novices planned their solutions in advance more in Pyrus than in pair programming. We consider planning in advance to include time spent in two problem-solving stages, the planning the solution stage and the planning around Pyrus stage. When working in Pyrus, plans for how to solve the problem were intertwined with plans for how to manage resources like cards and turns, and thus both are essential parts of planning in advance. We found that pairs in Pyrus planned for 14.07 minutes per session on average, compared to 7.41 minutes in the control (F(1,8)=3.48, p<0.001), as shown in Figure 3. Even when we only consider time spent in the planning the solution stage, during which pairs exclusively planned their solution to the problem, we saw that pairs planned for an average of 10.4 minutes per session in Pyrus compared to 7.41 in pair programming. However, given the size of this difference and our small sample size, this was not statistically significant (F(1,8)=0.43, p=0.10).
This finding was further supported by our interview and survey data. Six out of 18 participants (representing five out of nine total pairs) mentioned that they did not plan in advance while pair programming, compared to zero participants in Pyrus. In contrast, 16 out of 18 participants (representing all nine pairs) mentioned that they planned in advance in Pyrus, compared to five out of 18 participants (representing four out of nine pairs) in pair programming.
Overall, we found that participants approached problem solving differently in the two conditions. We analyzed the time when pairs first entered the implementation stage in the problem-solving session transcript, and found that pairs start implementing significantly earlier in pair programming (F(1,8)=0.783, p<0.05), at an average time of 2.87 minutes compared to 7.45 minutes in Pyrus. In interviews, multiple participants mentioned that they used a trial-and-error approach while pair programming. For example, PS39 stated “We were able to tackle the code without having to spend too much time thinking of solutions. Instead we addressed issues as they came up.” This finding is consistent with prior work showing that novices do not plan solutions in advance [37, 60].
In contrast, pairs in Pyrus planned more of their solutions before beginning to implement. PS30 describes this difference: “In Pyrus, I had to think much further ahead. This actually made it easier to think about the project as a whole, though. Instead of tackling one bit at a time, I started to look more into the big picture of it all.” When talking about their strategy in Pyrus, five participants (representing three pairs) mentioned that they wrote their solutions on paper before implementing them in the editor. An analysis of the problem-solving codes showed that pairs spent an average of 2.53 minutes planning before writing their first line of code in Pyrus, compared to 0.47 minutes in pair programming (F(1,8)=0.68, p<0.05).
We also found that these different approaches to problem-solving necessitated different approaches to debugging. Pairs spent more time debugging in pair programming, an average of 7.53 minutes compared to 4.70 minutes in Pyrus, although this difference was not significant (F(1,8)=0.41, p=0.11). When talking about their pair programming experience, six participants, (representing five distinct pairs) mentioned completely abandoning their partial solutions to start over from scratch. For example, PS41 said “There were several times that we had to start over when we realized our approach was too complicated and there were easier ways. Starting over allowed us to have a fresh slate.” PS31 expounds further on the debugging process in pair programming: “Even though we got more problems done, and we were doing them quicker, it was a lot more error. There was a lot of bugs and a lot of things we had to correct along the way.” This suggests that during pair programming, some participants had errors in their solution ideas or approaches that were not revealed until the debugging process. Participants also mentioned that they made fewer mistakes in Pyrus; PS45 described the difference in strategy in the two conditions, and how this impacted the number of mistakes he and his partner made:
“[In Pyrus] it was just a lot more organized. We would pretty much write down almost exactly what we were going to do, and then we would just put it in. Then, we’d have to kind of play the game where we figure out who has what things at their disposal. Then, for the pair programming it was like we could afford to make a lot more mistakes. We didn’t have to write down our whole approach and we could just kind of start writing... But, I do think that our process was kind of a lot less organized and clean.”
These findings suggest that the problem-solving approaches that participants took as a result of the programming environment impacted the amount of debugging that was necessary to arrive at a correct solution.
When talking about their problem-solving approach in Pyrus, many participants mentioned that the game mechanics influenced their behavior. Ten participants mentioned that they prioritized plans based on the available cards, and five participants mentioned being more careful in their implementations because of the failure condition. PS44 described the strategy he and his partner used in Pyrus: “we first looked at the cards in hand. We then wrote out our plans to solve the challenge on paper.” PS45 mentioned that the failure condition influenced his process, stating “[in Pyrus] you had to be careful, because otherwise you’d lose if you did it wrong... You really had to have a plan to be using your resources properly.” These statements indicate that the Pyrus game mechanics influenced the way participants approached the problem-solving process, incentivizing them to plan their solutions in advance and be more deliberate while programming.
**Pyrus encourages pairs to participate more equally**
Through our data analysis, we found evidence that pairs participated more equally in problem solving in Pyrus than when pair programming. We analyzed the codes that captured when each partner was in the pilot role by calculating the difference in typing time between the two partners. We found that this difference was significantly smaller in Pyrus ($F(1,8)=1.17, p<0.05$); in Pyrus the difference in time spent typing between participants was 5.69 minutes, compared to a 17.78 minute difference in pair programming. This can be partially explained by the fact that for five pairs in the pair programming condition, the less-active typer spent under 10 of the 45 minutes in the pilot role.
Overall, we found that participants took on different roles in pair programming. PS31 stated simply, “It was less of a contributing when you were not typing, and more contribution when you were typing.” Ten participants described the pilot as being the decision maker or leader. For example, PS46 states “Our process, I guess, just like kind of lead person who just kind of... did it, I guess? Unless they got like stuck or something.” PS35 said “We each implemented our own solutions when it was our time to work because we thought it would be easier to do our own solutions than explain our solution to the other person.” In contrast, many described the role of the co-pilot as following along, pointing out small errors, and helping to debug. For example, PS41 states “I let them code their solution to the problem because it was how we decided to work. I watched for errors and commented when I saw them.” Some participants also mentioned feeling lost when in the co-pilot role; PS32 states, “It’s really just like trying to interpret what they’re doing, as opposed to interpreting what we’re doing.” These findings suggest that the pilot and co-pilot serve very different roles in pair programming, and that working in pairs does not inherently encourage collaborative planning.
In Pyrus, participants rarely mentioned differences between the roles of the typer and the observer. For example, when describing the roles in Pyrus, PS38 states “We talked about what we needed to do first, and then whoever’s turn it was just wrote it. There wasn’t that much of a difference between whose turn it was.” Six participants explicitly mentioned that they felt they were on the same page with their partner when working in Pyrus. When discussing why they collaborated equally in Pyrus, participants mentioned the enforced turn-taking mechanic. PS33 stated:
> "In other classes when you work in pairs, normally one person does all the typing, and I feel like that can really easily lead to an imbalance as far as learning goes. Forcing each person [in Pyrus] to type this way, I think, really does help make both people be aware of what’s going on."
PS45 summarized the difference between Pyrus and pair programming nicely:
> "[In Pyrus] we were switching off who was writing what. It was just completely necessary to be really clear about what we were both doing from the outset. You really had to have a plan to be using your resources properly. And then, in [pair programming] it was just kind of like... as long as that person kind of had the framework in their mind we didn’t need everyone to know everything completely.”
These findings show that the enforced turns mechanic in Pyrus changed the way that pairs collaborated while working on programming problems, resulting in more active participation from the non-dominant partner and a deeper shared understanding of the problem approach and solution.
**Novices found Pyrus frustrating and inefficient**
While we found evidence that Pyrus encourages more planning in advance and more equal participation, many participants found that Pyrus restricted their ability to implement code. The lack of flexibility led to frustration. For example, PS46 said "It’s just really annoying when you can see... You know exactly what I need to type, but then it just won’t let you do it". Additionally, PS43 stated "I think a lot of coding is trying something, then failing. It’s a system which doesn’t allow you to fail, without failing completely, and restarting from scratch, which I think is really, really unhelpful”.
However, not all participants found the experience frustrating; for example PS45 stated: “I probably wouldn’t write out my whole process beforehand. But working in Pyrus, we did, which I actually think it made it a lot more efficient. And I think we realized a lot of things that we would run into earlier than if we would have just started writing, so that was good.”
Participants also noted that writing code in Pyrus was slower than in pair programming, a point that came up in
the interviews and surveys of ten participants, even though the bootcamp and activities placed no emphasis on programming speed. In our analysis of the log data, we found that pairs did work more slowly in Pyrus, completing an average of 0.78 problems per session on average compared to 2.56 for pair programming (F(1,8)=1.83, p<0.01). This is not surprising, given that Pyrus required pairs to plan not only their solutions, but also how to implement them with the available cards and actions. Our analysis of the problem-solving stages showed that pairs spent an average of 4.02 minutes planning around Pyrus and 2.53 minutes interacting with Pyrus, a substantial amount of additional time. Interestingly, attitudes of frustration towards slower modes of programming have also been observed among novices in pair programming [13].
Four participants did note that they would like to incorporate some of the strategies they used in Pyrus into their own programming more often. For example, PS39 stated: “When coding in Pyrus, I was forced to formulate a plan before beginning. We also had to come to an agreement on how to tackle the problem. This is something I rarely do when coding on my own, but something I wish I did more of.” When asked if there is any context in which they would like to use Pyrus, PS39 said “I would use Pyrus if I was working on a code with someone to learn or for fun.” However, several participants said they would not like to use Pyrus in any contexts. For example, PS43 said “I’m tempted to say that there is nothing that I would want my normal coding experience to resemble this coding experience.” This suggests that further iteration on the game design is needed to ensure that the experience is fun, particularly to make sure the constraints are not so restrictive as to cause frustration. However, it is also interesting to note that novices seem to value efficiency over equal participation and time spent planning their solutions, which could be a result of their prior programming experiences and their expectations around coding.
8 CONCLUSION
Our findings provide preliminary evidence that behavior-centered game design can be effective in guiding the design of a game that encourages players to practice a set of target behaviors, even when their natural inclination may be to do the opposite. In our within-subjects study, we found that Pyrus successfully encouraged novices to plan in advance and participate equally in problem solving, the behaviors we targeted in our design. Pairs in Pyrus spent twice the amount of time planning as they did in pair programming, a significant increase. There was also a significantly smaller difference in the amount of time each partner spent in the active pilot role. Most importantly, participants described how their problem-solving and collaborative behaviors were influenced by game mechanics like the failure condition, distributed resources, and enforced turns, showing how mechanics can be used to drive behavior in educational games.
While some participants enjoyed playing Pyrus, many found the game’s constraints frustrating and complained that Pyrus was less efficient than pair programming. We believe some of these frustrations could be addressed through design iterations to improve game balance. For example, if we ensure that players have access to the cards they need to solve each problem in the first half of the deck, they may experience less frustration. Participants’ complaints that Pyrus is “inefficient” and “slow” suggest that novices value solving problems quickly, and may prioritize this over developing a deep understanding of the problem and solution. Perhaps learners would be less likely to fixate on pace and performance if the game was explicitly framed around the goal of developing an effective problem-solving process.
Furthermore, while Pyrus successfully encouraged planning, we do not yet know whether practicing planning through Pyrus teaches novices transferable problem-solving skills. Even though some participants recognized that planning in advance was good for their understanding and problem-solving success, the pairs who played Pyrus first did not adopt those behaviors in the following pair programming session. Given that participants only interacted with Pyrus for 45 minutes, this lack of transfer is unsurprising. However, it is possible that Pyrus may over-scaffold planning and equal participation, which has been an issue in other popular educational games [41]. In the future, we envision integrating Pyrus into classroom lessons that teach problem-solving to help students practice behaviors like planning, and supporting transfer through activities like reflection [55]. Additional research in real-world contexts is needed to understand whether and how planning practice in Pyrus can be transferred outside of the game context.
This exploratory work contributes a new educational game Pyrus, which we see as a compelling proof-of-concept for behavior-centered game design. However, there is still much to learn about this methodology and how to apply it in service of higher-level learning goals. In future work, we plan to design additional games using a behavior-centered approach to better understand the strengths and weaknesses of this methodology. We also plan to analyze existing educational games through a behavior-centered lens to better understand the mechanics that lead to their success. However, this work takes an important initial step towards improving our understanding of how to design effective educational games.
ACKNOWLEDGMENTS
We thank the students and faculty in the Design, Technology, and Research program and the Delta Lab for their valuable feedback. This work was supported by Undergraduate Research Grants from Northwestern University.
|
{"Source-Url": "http://eleanorourke.com/papers/pyrus_collaborative_programming_chi.pdf", "len_cl100k_base": 9076, "olmocr-version": "0.1.50", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 38681, "total-output-tokens": 10845, "length": "2e13", "weborganizer": {"__label__adult": 0.00121307373046875, "__label__art_design": 0.0018053054809570312, "__label__crime_law": 0.0013742446899414062, "__label__education_jobs": 0.232421875, "__label__entertainment": 0.0003905296325683594, "__label__fashion_beauty": 0.0007495880126953125, "__label__finance_business": 0.0011167526245117188, "__label__food_dining": 0.0017414093017578125, "__label__games": 0.01314544677734375, "__label__hardware": 0.0027008056640625, "__label__health": 0.001868247985839844, "__label__history": 0.0015106201171875, "__label__home_hobbies": 0.0006794929504394531, "__label__industrial": 0.0016222000122070312, "__label__literature": 0.0015106201171875, "__label__politics": 0.0013246536254882812, "__label__religion": 0.00170135498046875, "__label__science_tech": 0.06341552734375, "__label__social_life": 0.0007052421569824219, "__label__software": 0.0099945068359375, "__label__software_dev": 0.654296875, "__label__sports_fitness": 0.0021343231201171875, "__label__transportation": 0.0018739700317382812, "__label__travel": 0.000766754150390625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 50569, 0.02239]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 50569, 0.57479]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 50569, 0.95956]], "google_gemma-3-12b-it_contains_pii": [[0, 1396, false], [1396, 4461, null], [4461, 10096, null], [10096, 14578, null], [14578, 19473, null], [19473, 24356, null], [24356, 30060, null], [30060, 35088, null], [35088, 41049, null], [41049, 46866, null], [46866, 46866, null], [46866, 50569, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1396, true], [1396, 4461, null], [4461, 10096, null], [10096, 14578, null], [14578, 19473, null], [19473, 24356, null], [24356, 30060, null], [30060, 35088, null], [35088, 41049, null], [41049, 46866, null], [46866, 46866, null], [46866, 50569, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 50569, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 50569, null]], "pdf_page_numbers": [[0, 1396, 1], [1396, 4461, 2], [4461, 10096, 3], [10096, 14578, 4], [14578, 19473, 5], [19473, 24356, 6], [24356, 30060, 7], [30060, 35088, 8], [35088, 41049, 9], [41049, 46866, 10], [46866, 46866, 11], [46866, 50569, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 50569, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
374e83e5f2323d78722288ed85893a49f1d65f5b
|
The DUDE Runtime System: An Object-Oriented Macro-Dataflow Approach to Integrated Task and Object Parallelism; CU-CS-779-95
Dirk C. Grunwald
University of Colorado Boulder
Suvas Vajzacharya
University of Colorado Boulder
Follow this and additional works at: http://scholar.colorado.edu/csci_techreports
Recommended Citation
http://scholar.colorado.edu/csci_techreports/733
The DUDE Runtime System:
An Object-Oriented Macro-Dataflow Approach
To Integrated Task and Object Parallelism
Dirk Grunwald Suvas Vajracharya
Department of Computer Science,
Campus Box 430, University of Colorado,
Boulder, CO 80309-0430
(Email: {grunwald, suvas}@cs.colorado.edu)
CU-CS-779-95 August 1995
University of Colorado at Boulder
Copyright © 1995 by
Dirk Grunwald Suvas Vajracharya
Department of Computer Science,
Campus Box 430, University of Colorado,
Boulder, CO 80309-0430
(Email:{grunwald, suvas}@cs.colorado.edu)
The DUDE Runtime System:
An Object-Oriented Macro-Dataflow Approach
To Integrated Task and Object Parallelism
Dirk Grunwald Suvas Vajracharya
Department of Computer Science,
Campus Box 430, University of Colorado,
Boulder, CO 80309-0430
(Email:{grunwald,suvas}@cs.colorado.edu)
August 1995
Abstract
Modern parallel programming languages allow programmers to specify parallelism using implicitly parallel constructs such as data parallel or object parallel methods, and explicitly parallel constructs, such as doall, doacross, parallel section or programmer-level threads. In this paper, we present the design of a runtime system that executes data-parallel (or object-parallel) code in the presence of explicit parallelism. This facilitates load balancing between data-parallel computations running in threads of distinct parallel sections, as well as inter-loop load balancing. Although sufficient runtime structure is provided for most extant languages, the runtime system is extensible, allowing compilers to customize the runtime system.
To motivate why such a runtime system is desirable, we use show performance improvements for programs with complex data dependence relations, such as multigrid solvers.
1 Introduction
Most efforts on simplifying or improving parallel programs has focused either on large compiler systems, such as HPF Fortran, or small optimizations such as improved synchronization or barrier algorithms. A runtime system is the interface between a compiler and the underlying operating system and hardware; synchronization algorithms are one part of a runtime system. The design of runtime systems can dramatically affect the way compilers convert programs into a parallel form.
Scheduling is the central function of most runtime systems. Poor scheduling decisions can introduce significant performance problems in parallel programs. Runtime systems for “high performance” systems used for data-parallel languages usually provide a single virtual processor for each physical processor. These virtual processors then perform loop-level scheduling for the work of individual parallel constructs, such as doall loops. Other runtime systems support a large number of virtual processors, or threads, and concentrate on efficiently scheduling those threads [5]. Both loop-level and thread-level scheduling decisions require information about the machine architecture, taking into account the number of processors, memory bandwidth and communication delays.
Furthermore, such scheduling mechanisms should be portable across a range of architectures to simplify the code that must be generated by a compiler.
Increasingly, programming languages need support for a number of virtual threads while still providing an infrastructure for efficient loop-level scheduling. Virtual threads can be used to invoke multiple program sections in parallel and to mask communication latency for message passing programs. Loop-level scheduling is still needed for executing data-parallel operations, since the overhead of loop scheduling is usually significantly less than that for thread scheduling.
In this paper, we show how an integrated runtime system can be designed to perform both loop-level scheduling and task scheduling. Our runtime system is designed for shared memory computers that may be further connected using a message passing interface. We assume the shared memory computers have a pronounced memory hierarchy; examples of such architectures are the KSR-1 [21] and distributed shared memory systems [20]. Compilers must target a specific machine model supported by the runtime system, and we feel the art of designing a runtime system is to provide an interface with the most generality that can be implemented efficiently across a number of systems. More general constructs allow the compiler to defer scheduling decisions until execution time, when they can be optimized by the runtime system; however, this only works if the runtime system is efficient.
Our runtime system uses a macro-dataflow approach; the definition, or producer, of data and the use, or consumer, of that data are explicitly specified during execution. This distributes synchronization overhead and provides a very flexible scheduling construct. We call our runtime system the Definition-Use Description Environment, or DUDE, and it is currently implemented as a layer on top of the existing Awesime threads library [15]. Normally, dataflow execution models have been associated with dataflow processors [2, 26, 26], but the macro-dataflow model has been implemented in software as well [3, 29]. Often, as in the case of MENTAT, an entire language is designed around the macro-dataflow approach.
By comparison, we simply use the macro dataflow notions to provide a description of the dependence relations in a program. In many ways, the DUDE system is a fusion of existing macro-data flow techniques and thread and loop-level scheduling systems. We discuss these related systems in §4. For the moment, we describe a sample program implement in the DUDE environment and the performance we have measured.
1.1 Performance for the Quasi-Geostrophic Multigrid Application
Figure 1 diagrammatically illustrates a program that specifies task parallelism using the cobegin construct and parallel iteration using the doall construct. This program illustrates one possible structure for the multigrid solver of a quasi-geostropic multigrid (QGMG) application, used by an NSF Grand Challenge project with which we are associated. We describe the problem in more detail later; for now, it suffices to see that we have two independent tasks, represented by either fork of the cobegin statement. Each branch performs two data-parallel operations, specified by doall operations. In a true data-parallel language, other constructs may replace the doall operations, but the semantics would be similar. There are several details of our program not shown by this diagram. Each pair of doall loops in the cobegin statement has a dependence distance of one. Thus, the iteration marked '1' must finish before the iteration marked '7' can begin; however, iteration '1' and '4' may execute concurrently.
(a) Diagrammatic Illustration of Program Combining Task and Data Parallelism
(b) Schedule for Conventional Runtime System
(c) Possible Schedule for Proposed Runtime System
Figure 1: Example Program and Schedules on Two Processor System
There is considerably more parallelism in this program than the `cobegin` and `doall` semantics imply. We assume the compiler may be able to determine some of this dependence information – computing dependence information has been extensively studied, and there has been recent work on analyzing explicitly parallel programs [12, 9]. A conventional runtime system might implement this program by closely following the structure of the original program. This is illustrated in Figure 1 for two processors. Each `doall` construct is executed in its entirety, and the execution of `doall` blocks is separated by barrier synchronization.
There have been numerous methods proposed to schedule the individual iterations of the `doall` loops, such as guided self scheduling and factoring [28, 17]. Conventional runtime systems use static, dynamic or some variant of adaptive scheduling to assign iterations to specific processors. Typically, a single dimension of a multidimensional iteration space is scheduled, although some researchers have considered scheduling nested loops [31]. Using a conventional runtime system, dependence constraints between iterations are enforced by event synchronization (`post` and `wait`) or by nesting sequential constructs within the outer parallel loop. Eager et al. [10] proposed a scheduling paradigm, called `chores`, that is similar to loop-level scheduling of multi-dimensional iteration spaces. The Chore system directly represents multi-dimensional iteration spaces using runtime data structures. The iteration space is dynamically subdivided, essentially providing the same scheduling decisions as existing dynamic scheduling algorithms. However, dependence constraints within an iteration space can be specified by a dependence function. The DUDE runtime system inherits much of its structure from the Chore system; we extend chores to include inter-operation dependence constraints and and different mechanisms for implementing dependence functions.
A performance comparison of the conventional and the DUDE runtime systems for the two independent multigrid solvers having the structure described in Figure 1 is shown in Figure 2. The performance improvement shown in this figure can be attributed to two effects: the elimination of barrier synchronization and the scheduling of two tasks (each a multigrid solver) in parallel. The benefits of eliminating barrier synchronization alone can be seen in Figure 3, which is a comparison of conventional methods with the proposed runtime system for a single data-parallel task, the Red/Black SOR.
The rest of the paper is organized as follows. We start by explaining the QGMG application in §2 to be in the position to explain what language constructs are desirable and to motivate the design of the DUDE runtime system. This then sets the stage for §3, which describes our proposed runtime system. Section 4 surveys prior work and why there is a need for the proposed runtime system. In §5 we describe in detail the performance results fore-shadowed in this section. Finally, we close with discussion of future work and conclusions.
2 Sample Application: Quasi-Geostrophic Multigrid Solver
In this section we describe the Quasi Geostrophic Multigrid (QGMG) solver to motivate the design decisions of the proposed runtime system. The quasi-geostrophic equations describe the nonlinear dynamics of rotating, stably stratified fluids which is used to numerically simulate the highly turbulent nature of planetary flows of the Earth’s atmosphere and ocean. Planetary-scale fluid motions in the Earth’s atmosphere are important to the the study of Earth’s climate. A more complete description of the QGMG application is available [7, 32]. Here, we concentrate on only the computational aspect of the problem as it relates to DUDE runtime system. As described [7],
Figure 2: Speedup for Multigrid Solver on the KSR1.
Figure 3: Speedup for Red/Black SOR on the KSR1
Figure 4: Dependence relations in a 1-Dimensional multigrid application during a single V-cycle
The QG equations of motion are
$$\frac{\partial q}{\partial t} + \frac{\partial \psi}{\partial x} \frac{\partial q}{\partial y} - \frac{\partial \psi}{\partial x} + \beta \frac{\partial \psi}{\partial x} = -D$$
where
$$q = \frac{\partial^2 \psi}{\partial x^2} + \frac{\partial^2 \psi}{\partial y^2} + \frac{\partial}{\partial z} \left( \frac{1}{S(z)} \frac{\partial \psi}{\partial z} \right)$$
The best method for solving these equations is the multigrid solver. The two main variables in these equations are $q$ and $\psi$, each of which requires a multigrid Solver and its associated data structures. In a language that supports cobegin, two tasks can be spawned to independently solve for the two variables. Each task executes data-parallel operations, as shown earlier in Figure 1, with additional interaction between the two tasks. The combined data and task parallelism provides many opportunities for improving performance.
The multigrid method has received much attention due to its fast convergence and the challenge of parallelizing the algorithm [6, 30, 8, 13]. The basic idea of the multigrid method is to obtain an initial approximation for a given grid by first solving on a coarser grid. Since the coarser grid is simple to compute, we can use an iterative method such as the Gauss Seidel method to get an approximation solution on the coarse grid and then interpolate this approximation to the finer grid. A recursive use of this basic idea leads to the full multigrid algorithm: First use relaxation (smoothing) on an input matrix to obtain an approximation with the smooth error. Using this error, a correction to this approximation is computed on a coarser grid. Computation is mapped to a coarser grid using a restriction operator, the simplest of which is to simply copy some of the points from the fine grid onto the coarse grid. The coarsest grid can be solved exactly, after which we begin interpolating (prolonging) the correction back to finer grids.
The algorithm we described uses a V-cycle but there are many variants. Restricting and prolonging amounts to climbing up and down a pyramid of matrices where the base is the finest grid.
and the coarsest grid is the top of the pyramid. It is easier to understand the dependence constraints using a simpler one-dimensional multigrid solver as an example. Figure 4 shows the dependence relations between levels of a V-cycle for a one-dimensional problem. Each circle represents the execution of a single iteration of the relaxation function. Each level of the pyramid consists of three operations: Smooth the even elements of the matrix, smooth the odd elements, obtain an approximation and restrict to next coarsest grid level if going up the pyramid or prolong to next finest grid level if going down. There is a dependence across the levels of the pyramids, as indicated by the arrows in Figure 4. Normally, the dependence relations shown in Figure 4 are satisfied by completing all iterations in each level before starting the next level. Figure 4 shows that this is not necessary; the iteration indicated by the lower darker circle can be started when the three iterations on which it depends finish.
Conventional parallel implementation of the multigrid method involves partitioning the finest grid matrix among the available processors. Processor utilization is acceptable on fine grids, but as the algorithm climbs up the pyramid to coarser grids, the majority of the processors will be idle. Recall that the QGMG problem must solve two multigrid problems; these can be executed concurrently, but many conventional runtime systems do not support the concurrent computation of two data parallel computations. A third problem is that barrier synchronization strictly forces an operation to complete before the next one begins. The operations described above (smooth, prolong, restrict) are only dependent on neighboring elements to complete, not the entire matrix. If we allow some processes to continue processing the next operation immediately after the neighboring points have been calculated, we can greatly improve processor utilization.
The DUDE runtime system addresses all of these problems. We eliminate the barriers implicit in the multigrid algorithm, substituting nested explicit dependence information. The dependence information indicates when higher levels of the V-cycle can start execution, and multiple parallel operations can be executed in parallel.
Data dependence constraints between groups of iterations, termed iterates in our system, are enforced by runtime dependence information determined during compilation. Control dependence operations, such as the barriers in the previous examples, are also controlled by runtime representations. In our current implementation of the runtime system, this example program runs without using barrier operations. All dependence information is specified using “precedence edges” in the runtime structures. Since this precedence information only involves a subset of the processors in the system, synchronization is faster, reducing overhead. In effect, the DUDE runtime system provides a concrete runtime implementation for the dependence information shown in Figure 4.
3 The Design of DUDE
The DUDE runtime system is based on AWESIME [15] (A Widely Extensible Simulation Environment), an existing object-oriented runtime system for shared-address parallel architectures. The AWESIME library currently runs on workstations using Digital Alpha AXP, SPARC, Intel 'x86', MIPS R3000 and Motorola 68K processors, as well as the KSR-1 massively parallel processor. The AWESIME library has been in use for a number of years, primarily for efficient process-oriented discrete event simulation – for example, Tera Computer Corporation uses AWESIME for operating system simulations.
We have extended the AWESIME run-time system to implement the Definition-Use Description Environment. In DUDE, objects of class Thread are a basic unit of task parallelism and objects
of class **Iterate** are a basic unit of data parallelism. Both **Thread** and **Iterate** are subclasses of the **PObject** (parallel object) class, which represents any unit of parallelism managed by the scheduler. A **Thread** has a stack and state information. As with many runtime systems, the overhead of saving this state information during context-switches can be minimized by creating only one **Thread** per processor, but programmers are able to create any number of threads. In related work, we are using whole-program compiler optimization to reduce the space and time overhead for threads. Precedence constraints due to data dependences in the application program can be satisfied for **Threads** using the synchronization mechanisms supported by DUDE, such as **barriers or semaphores**. These operations only make sense for stateful concurrent objects that can block and resume execution (i.e., threads). By comparison, **Iterates** run to completion and are not context switched. **Iterates** can not block on barriers or semaphores, since they have no state; instead, explicit precedence information is used. Because **Iterates** lack state, they can be created and managed much more efficiently than threads.
The abstraction to **PObject** over these two subclasses allows applications to use both **Thread** and **Iterate** objects. A **Thread** or **Iterate** can only be created by sub-classing the existing classes. For example, an iterate describing a particular computation would be represented by a subclass of **Iterate**. All behavior specific to that computation will be encapsulated in the subclass. In this paper, we frequently refer to the activity of a **Thread** or **Iterate**, but such references should be understood to refer to a subclass of those classes.
As with all objects in C++, the class constructor is invoked when an iterate or thread is created. Arguments to the iterate or thread are specified in the application program and are recorded in the corresponding instance variables. Any **PObject** can be bound to specific processors using the **CPUAffinity** method. The **PObject** class provides a **virtual function, main** to customize the activity of each thread or iterate. The **main** method is the starting point for a new **Thread** or **Iterate** and is provided by subclasses of **Thread** and **Iterate**. Thus, the body of **main** can be a unit of execution in a data parallel loop or the body of a task.
Parallel objects are scheduled using a **CpuMux**, or CPU multiplexor. There are several subclasses to the **CpuMux** base class, defining the scheduling policy to be used for specific application. Each CPU multiplexor repeatedly selects a **PObject** to execute, and executes that object. The execute method specialized for **Threads** will context switch at this point, while an **Iterate** will directly execute the function associated with the individual **Iterate** object.
Dynamic dispatch based on object type is used through-out **AWESOME** and **DUDE**. The **CpuMux** object represents a hardware processor. Using the object-oriented model provided by C++, we provide specialized **CpuMux** subclasses for different parallel architectures that provide different work-sharing strategies. The most common work-sharing mechanism uses a separate scheduler for each **CpuMux**, and **CpuMux**’s “steal” from each other if they are idle. As another example of dynamic dispatch, users can select a barrier algorithm that is most appropriate to the architecture [16] or problem.
The **DUDE** runtime system uses the abstraction and inheritance constructs of C++ to keep the scheduling policy, the underlying hardware, the type of objects being scheduling, the type of synchronization and other aspects of the system mutually orthogonal. As we will see, we need not sacrifice efficiency for this generality and modularity. Dynamic dispatch is also the basis of loop scheduling using the **Iterate** class, which we describe in some detail.
3.1 Data Parallelism: Computation using Iterates
The Iterate class is the core construct for data parallel computation in DUDE. The Iterate class provides a mechanism that can best be described as a large grain dataflow execution model. The goal is to relieve the application programmer or the compiler from concerns regarding locality of data, enforcement of synchronization of data constraints and scheduling.
Figure 5 shows the instance variables and the methods that the application programmer must specify. The main method is the operation that is to be performed on the data. The descriptor specifies a portion of the parallel loop accessed by the main method. The lower bound, upper bound and the stride can all be extracted from the descriptor. Each Iterate also contains an internal loop control variable and a loop terminating variable. All of these variables are initialized in the Iterate’s constructor (called Iterate101 in the diagram).
The remaining methods are used to determine the continuation of an Iterate. When an Iterate finishes execution, the scheduler determines if the completed Iterate has satisfied any precedence constraint. Figure 7 shows the scheduling engine. The scheduler calls the generateDescriptors method of the completed Iterate which returns a list of data descriptors. Each descriptor represents an arc in the precedence graph. This descriptor is then used as a key to a table that counts of number of Iterates that have finished and the number needed to satisfy the dependence constraint. Constraints are satisfied if the count is equal to the expected value of the dependence count. The getNumDeps method returns the number of expected dependents. If the constraints are satisfied, then the makeContinuation method is used to instantiate the continuation and add them to the work heap. The runtime system performs all the synchronization required to insure that the precedence constraints are satisfied. The application programmer or the compiler need only express the dependence information in the form of the generateDescriptor method.
Note that the dependence constraints also distribute the synchronization that occurs in the program. In distributed shared memory computers, such as the KSR-1, synchronization among a large number of processors causes particular cache lines to become hot-spots [27]. By distributing the activity over a number of synchronization variables, the hardware parallelism supported by the multiple communication levels in a system such as the KSR can be exploited.
By providing the concept of dependence and use specification in the runtime system, we can also execute multiple parallel operations concurrently. The QGMM program must solve two multi-grid problems to advance a single time-step. A traditional runtime system, or even advanced systems such as the Chores model [10] must sequentially schedule the computation in each doall or loop nesting. By allowing all operations to be evaluated in parallel, we increase the scheduling opportunities, allowing the runtime system to select a better schedule.
The iteration space is initially sub-divided into fixed sized chunks, with each chunk being represented by an Iterate object. The iteration space is described using a symbolic representation, much
void RedSOR::main()
{
for (short i = getSY(); i <= getEY(); i += getST()) {
for (short j = getSX(); j <= getEX(); j += getST()) {
mydata[i][j] = Func(mydata[i-1][j] + mydata[i][j+1]
+ mydata[i+1][j] + mydata[i][j-1]));
mydata[i+1][j+1] = Func(mydata[i][j+1] + mydata[i+1][j+2]
+ mydata[i+2][j+1] + mydata[i+1][j]));
}
}
}
int RedSOR::getNumDepsc()
{
return 5;
}
BlackSOR *RedSOR::makeContinuation(DESC desc)
{
return BlackSOR::MyAlloc(desc.J, desc.K);
}
DESC * RedSOR::generateDescriptors()
{
// get current index to this Iterate.
int J = getJ();
int K = getK();
if (getLoopIndex() == getEnd()) return NULL;
// create dependence vector
DESC *desc = FormDescriptor(J, K, -1, 0, +1, 0, 0, -1, 0, +1);
return desc;
}
**Figure 6:** Some Methods from the Red/Black SOR Iterate
Figure 7: Scheduling Engine of the Proposed Runtime System
as was done in the Chores system; however, we found that the repeated evaluation of the symbolic dependence constraint was too slow. Instead, the first time a construct is executed, the symbolic representation is used to create a series of Iterate instances that represent a sequential execution of a subset of the iteration space. These sequential sections are then dynamically scheduled across multiple processors. The decomposition of the iteration space can be cached to reduce the time to start the computation if that routine is executed repeatedly.
One of several distribution techniques, such as block or distribution, may be used to initiate the decomposition. If the processors experience load imbalance, as determined by a scheduling heuristic, these fixed sized Iterates may be further subdivided during the execution of a parallel construct. When all the subdivided chunks are completed in that iteration, the original Iterate that was subdivided resumes its initial size for successive executions. Partitioning need not be concerned with the data dependence specified in the Iterate since the partitions are only in effect for the duration of one loop iteration. In other words, completion of any one of the subdivided parts is not sufficient to begin enabling continuations; the entire portion must be completed. This reduces the overhead of subdividing the computation, because the dependence information of continuations does not need to be modified.
The rational for initially decomposing an Iterate into fixed sized parts is threefold. First, fixed size chunks simplify maintaining the dependence information, and make that process more efficient. Allowing variable size chunks imply a less efficient data descriptor that take a range of values instead of indices. We initially implemented such a structure, similar to the Data Access Descriptor [4], but found it was too slow in practice. Secondly, and more importantly, fixed size chunks allows the scheduler to establish an affinity between a chunk and the processor improving data locality. Each chunk has a preferred processor when it is rescheduled on the next iteration of the loop. This affinity is only compromised if there is a great load-imbalance or there is insufficient work left to be done. Thirdly, contention for a single large chunk at the beginning of the computation is avoided because each CPU can start with an Iterate from its own local queue.
Initially these chunks or Iterates are distributed to local queues of the CpuMux's. The CpuMux for each individual processor grabs an Iterate from the local queue to process. If this queue is empty, it attempts to steal work from another CpuMux. When the total number of Iterates to schedule is below a certain threshold, the CpuMux divides an Iterate, removing it from the queue only when all the partitions have completed. Upon completion of an Iterate, the scheduler marks that object with its processor number. This information will be used in the next iteration to decide which local queue should be preferred for this Iterate.
Iterates are created as the program executes and encounters parallel constructs. For example, the execution of a doall corresponds to the creation and scheduling of a collection of Iterates. Threads wait for a specific parallel construct to complete by blocking on a semaphore, and the continuation for the Iterate representing a doall releases that semaphore. The main program is represented by a Thread, and can create additional threads or iterates as needed. In fact, iterates can be used to create threads; this will be used to schedule threads to mask the latency of message passing applications.
4 Prior and Related Work
There are a number of existing runtime systems for parallel computers. Why should we build yet another runtime system? First, many existing runtime systems provide a limited set of abstractions
for concurrency. For example, the PTHREADS [25] and PRESTO [5] libraries only provide threads and traditional synchronization mechanisms such as barriers and locks. We feel these abstractions are too simple to allow a compiler to generate code that can efficiently manage the resources of a complex architecture, such as a distributed shared memory computer.
More recently, there have been a number of programming languages and runtime libraries stressing a diversity of parallelism constructs. The Chare language [18], and more recently the CHARM++ [19] system, use actor-like message semantics and a continuation computation model. In the Charm runtime system, used by the Chare language, tasks are spawned by sending initiation messages to processors. Tasks subdivide work by creating other tasks. The Chare system was originally designed to support parallel logic programming languages such as ROLOG, but has evolved into a more general programming tool. The charm system does not implement ‘threads’ in the conventional sense - state can not be saved and resumed at a later time. Furthermore, the runtime semantics are targeted towards message passing environments, and does not provide explicit support for shared address environments.
The Chores [10] and Filaments [11] systems are the most germane runtime systems we’ve encountered. Filaments [11] are extremely fine grain stateless threads consisting of a pointer to code and a list of arguments. Engler et al showed that filaments, due to their low overhead ($\approx 16$ bytes each), were suitable for exploiting fine grain parallelism on a variety of programs. For example, in the Red-Black SOR problem, each point in the matrix would be represented by a filament. This extremely fine granularity permits efficient load balancing, but has certain drawbacks. First, conventional compiler optimizations such as strength reduction, induction variable detection and common subexpression elimination can not be performed because the higher level looping is used to create filaments. Thus, it is difficult for each filament to take advantage of the state of the parent process; this results in good speedup, but poor performance. Furthermore, the overhead of representing filaments is considerable for large problems. Our application group wants to process matrices containing $1024^3$ elements - in the filaments model, the programmer (or compiler using the Filaments runtime system) decomposes the problem prior to execution. Thus, for fine-grained load balancing, we might break the problem into $1024^2$ components — at a cost of 16Mbytes of memory and an outer loop that creates and initializes one million filaments.
The Filament system provides three types of threads: barrier filaments, run-to-completion filaments, and fork-join filaments. DUDE presently provides two types of parallel objects: blocking threads with stacks and Iterates which roughly correspond to a combination of barrier and run-to-completion Filaments. The difference between Filaments and Iterates are that Iterates do not force a barrier synchronization. Instead, the system uses data dependence information to generate continuations when an Iterate completes, as described earlier. Also, Iterates are created once and rescheduled multiple times. The advantage of this is not only the reduction of time to create and destroy the objects but, it allows the scheduler to establish an affinity of an Iterate to a particular processor.
As mentioned above, Filaments have the problem of overly fine granularity. The Chore system eliminates the problems by allowing ‘atoms’ (a sequential unit of work) to be aggregated and split dynamically. The Chores system is an extension of the work heap model in which one worker per processor grabs work from a queue. It extends the work heaps model by providing a description of the iteration spaces of multiply-nested loops and a symbolic specification for data dependence. The Chore systems also dynamically partitions portions of the iteration space if they are partitionable
(i.e. not atoms). The Chore model provides an implicit barrier when all the portions of a chore have finished.
The DUDE Iterate class is also based on the idea of work heaps, as in Chores. However, DUDE allows the concurrent scheduling of multiple loops, supporting explicit parallelism while providing better scheduling opportunities. The Chores system can only schedule loops containing a single function. In many applications, loops contain multiple operations where each operation must wait for the completion of the previous operation(s). Traditional runtime libraries using threads simply insert a barrier synchronization between any two operations. The Chores system can eliminate barriers only if the operations between the barriers are the same, as in Gaussian elimination. We have extended this to eliminate barriers even when the barriers separate disparate operations. For example, in the multigrid solver, one must first smooth the even elements, then the odd elements, approximate, and finally restrict to the next level. The Chore system was designed with little regard for locality. Eager et al showed a improved performance due to a good load-balance on the Sequent Symmetry, an architecture that did not penalize severely for lack of data locality. The trend in more recent multiprocessor, such as the KSR-1, has been to increase the ratio of processor speed to communication speed, making good locality as important as good load-balance [24]. The DUDE runtime system allows an affinity between PObjects and processors.
Finally, the Chores system was intended to be used also by application programmers - to simplify programming, chores have the option of suspending execution, but the normal convention is that individual chores usually run to completion. If a chore blocks, a scheduler activation creates a new ‘worker thread’ to continue chore execution. By comparison, we are providing a library for an object-parallel language, and assume the compiler can distinguish between blocking and non-blocking operations. This simplifies the runtime system design and makes it more portable.
Graham et al [14] extended the design of an existing compiler to overlap the execution of different loop constructs. Thus, the doall operations in each thread may be combined, reducing the execution overhead and improving load balance. Our proposed runtime combines elements of the Chores system and the optimizations proposed by Graham et al. We extend these prior systems by combining the flexible intra-operation scheduling of the Chores system while the reducing the constraints of inter-operation scheduling, such as barrier operations.
5 Performance Results
In this section we describe the performance of the Red/Black SOR and the Multigrid solver on the KSR1, a parallel cache-only memory architecture [21].
5.1 Red Black SOR Problem
The successive over-relaxation (SOR) method is an iterative technique to solve a linear system of equations. A common implementation of this technique uses the “five point stencil” to compute the \((k+1)\)st iteration from the \(k\)th iteration by traversing the SOR iteration array in row major order. The Red-Black SOR algorithm [22, 23, 1] provides parallelism by dividing the mesh into “even” and “odd” meshes, like the red and black squares of a checkerboard. All even or odd elements can be processed concurrently.
We use the Red-Black SOR algorithm for two reasons. First, Red-Black SOR does not suffer from load-imbalance. As shown in the first method of Figure 6, the body of the loop to be exe-
cuted does not contain any conditional statements. This implies that if static scheduling is used, processors would arrive at the barriers at approximately the same time and therefore need not idle waiting for each other. Thus, if we can show that the DUDE runtime system can improve upon the static scheduling in this application, then we can show that the performance improvement would be greater on applications that suffer from load-imbalance in the body of the doall loops. Secondly, the Red-Black SOR algorithm is a kernel in the QGMG application, and the performance of this kernel is important for that application.
Figure 3 compares the performance of the Red-Black SOR algorithm running on a KSR-1 parallel computer using four methods: DUDE Threads with static scheduling, DUDE Iterates, DUDE Threads using dynamic block scheduling and the KSR PTHREADS package with static scheduling. The results shown in Figure 3 are for a 1000x1000 matrix. For each method, the graph shows the average speedup and the and the 95% confidence intervals for that data point.
There is a one-to-one mapping between the KSR-PTHREADS and processors. We used the PTHREADS bind function to prevent migration of threads during the entire execution. Each thread is statically assigned \( \frac{N}{p} \) rows of the input matrix. The PTHREADS barriers are used to insure that all the red computation are completed before any black computation is started (and vice versa). The DUDE threads used exactly the same scheduling as the KSR PTHREADS model. In both the thread methods, dividing the work into blocks instead of rows does not improve locality because the whole matrix must be traversed before the data is re-accessed; all the red computation must complete and synchronize at the barrier before any element is re-accessed by black computation. The improved performance is largely due to a more efficient barrier [16] which takes the hierarchical interconnect topology of the KSR1 into account. With dynamically scheduled DUDE threads, threads grab a block of the matrix from a global descriptor containing information on what work remains to be done. While data or work is not bound to a particular thread, the threads themselves are bound to processors as in the previous two methods. Consequently, there is a better load balance in this method at the expense of reference locality. Another disadvantage is that processors must contend for access to the global descriptor.
For the Iterates implementation, the matrix is broken into blocks of data, each of which is the responsibility of an Iterate object. The granularity is much smaller than in the case of either DUDE Threads or KSR PTHREADS. The main difference between this method and the dynamic thread method is that completed blocks can enable new blocks, overlapping the computation of the Red and Black computations. Locality is improved because Black operations may begin immediately after a Red operation if the precedence constraints are satisfied. This re-accesses the data for that region of the matrix before it has left the processors cache. Furthermore, each block may be further partitioned when the amount of work left is running low.
This experiment shows that dynamic scheduling can be detrimental on large shared memory multiprocessors. Notice that the performance of the dynamically scheduled computation becomes dramatically worse when more than 32 processors are used. The KSR-1 is structured as rings containing 32-processors; communication between rings is more expensive than communication within a ring. Thus, the improved load balance of dynamic scheduling comes at the cost of increased synchronization and communication overhead. It also shows that the efficiency of the native threads library, KSR-PTHREADS, can be less than than of a light-weight non-preemptive thread library. Lastly, it shows that the Iterate construct is even more efficient than the light-weight thread library. Both the Iterate and thread programs have a linear speedup, indicating they both scale well, but that the Iterate method has lower scheduling and work-sharing overhead.
5.2 Multigrid Solver
Figure 2 shows the performance of combined task and data parallelism that occurs when traversing two independent loops, each solving a 1024x1024 matrix using the multigrid solver. Each loop independently solves a matrix size of 1024x1024. The speedup for the Iterates method is superior to the Thread methods.
In the thread method the input matrix is divided into an equal number of rows among the threads which are bound to physical processors. A barrier synchronization is used between each of the operations: smooth even, smooth odd, approximate, relax/prolong and the next level operations. Due to the halving of matrix dimension at the next highest level, the number of processors participating is reduced when climbing up the pyramid. Non-participating processors simple idle at the higher levels. The performance graph shows poor speedup for both the native and DUDE thread packages.
By comparison, the Iterates implementation solves both multigrid problems concurrently, and can solve the Red-Black SOR problem in each relaxation step using the method described in the previous experiment. Although this provides better scheduling opportunities during execution, we recognize that the near-linear performance does not scale indefinitely, although it does scale to a larger number of processors. To achieve both task and data parallelism, two threads are created. Each thread starts a multigrid solver using the data parallelism offered by Iterates. An Iterate class is created for each of the 5 operations: smooth the even elements, smooth the odd elements, approximate, prolong, and restrict. Initially only the SmoothEven Iterate is created and added to the work queue. As these complete, and the precedence constraint are satisfied, SmoothOdd Iterate (as specified in the makeContinuation method of the SmoothEven Iterate class) are enabled. The later operations execute in a similar fashion. After the Approximate Iterate completes, a choice of either enabling the Restrict Iterate or the Prolong Iterate is made in the makeContinuation method of the Approximate Iterate. Figure 2 shows that Multigrid solver using Iterates achieves super-linear speedup due to locality for small number of processors and near linear speedup for higher number of processors.
This experiment shows that the overlapped computation at each level of the multigrid computation, and the opportunity to overlap the execution of different multigrid planes results in improved speedup. This occurs both because of increased scheduling opportunities but also because of improved data locality and reduced scheduling overhead.
6 Conclusions
We have described an extensible runtime system for shared address architectures that supports both task and data (or object) parallelism. Our current implementation allows applications to specify precedence constraints between tasks and between different data parallel computations. Preliminary results show that for a data parallel application, we achieve better performance using runtime representations of control and data dependence than by using conventional thread decompositions. We are currently integrating the DUDE runtime system with the pC++ object-parallel language, as part of a ARPA contract to develop a high-performance C++ infrastructure.
At the same time, our runtime system supports Threads, so we can express task or control parallelism between different sections of code that can execute in parallel. This is particularly important for "coupled" problems where we may be modeling two systems (structures & fluids,
oceans & weather) concurrently. Combined thread and object parallelism is important in programs such as adaptive mesh refinement, where data parallel operations are performed over a number of different arrays.
One feature not stress in this paper is that the DUDE runtime system is designed to be extensible, allowing the customization of scheduling policies and the introduction of new work-sharing structures. As parallel architectures are used for increasingly complex problems, extensible runtime systems that exploit additional degrees of parallelism within programs will be needed. We believe that this paper demonstrates that the object-oriented runtime systems offer excellent performance, and allow a great degree of extensibility.
6.1 Future Work: Profile-Driven Dynamic Scheduling Policies
Using information from profiling the application program, it is possible to determine the runtime behavior of programs and determine which scheduling policy is best suited for maximum load balance and parallelism in different sections of the program. For example, initialization of the elements of a huge matrix can be done in a statically scheduled parallel loop. A section of the program that has varying size code in different parallel Threads based on inputs to the program may perform best with an adaptive scheduling policy. Thus, for better load-balance and parallelism, it may be worthwhile to change the scheduling policy dynamically as the program executes. We are extending the DUDE runtime system to support dynamically changing scheduling policies, by customizing the scheduling function based on profiling information from the application program.
Acknowledgements
We are thankful to Harini Srinivasan for her comments on early version of this paper. We are also grateful to Clive Bailie of University of Colorado for taking the time to explain the QGMG application to us. This work was funded in part by NSF grant No. ASC-9217394, ARPA contract ARMY DABT63-94-C-0029 and an equipment grant from Digital Equipment Corporation.
References
|
{"Source-Url": "http://scholar.colorado.edu/cgi/viewcontent.cgi?article=1732&context=csci_techreports", "len_cl100k_base": 9292, "olmocr-version": "0.1.53", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 112626, "total-output-tokens": 12004, "length": "2e13", "weborganizer": {"__label__adult": 0.0003995895385742187, "__label__art_design": 0.0004351139068603515, "__label__crime_law": 0.00035572052001953125, "__label__education_jobs": 0.0008802413940429688, "__label__entertainment": 0.0001035928726196289, "__label__fashion_beauty": 0.0002084970474243164, "__label__finance_business": 0.0002856254577636719, "__label__food_dining": 0.0004014968872070313, "__label__games": 0.000873565673828125, "__label__hardware": 0.002521514892578125, "__label__health": 0.0006852149963378906, "__label__history": 0.0004401206970214844, "__label__home_hobbies": 0.00014913082122802734, "__label__industrial": 0.0008625984191894531, "__label__literature": 0.00027298927307128906, "__label__politics": 0.0003437995910644531, "__label__religion": 0.0006899833679199219, "__label__science_tech": 0.1422119140625, "__label__social_life": 9.882450103759766e-05, "__label__software": 0.00830841064453125, "__label__software_dev": 0.837890625, "__label__sports_fitness": 0.0004758834838867187, "__label__transportation": 0.0009756088256835938, "__label__travel": 0.0002758502960205078}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 52379, 0.03258]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 52379, 0.58592]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 52379, 0.89841]], "google_gemma-3-12b-it_contains_pii": [[0, 600, false], [600, 948, null], [948, 1139, null], [1139, 3632, null], [3632, 7337, null], [7337, 7576, null], [7576, 11425, null], [11425, 11526, null], [11526, 13793, null], [13793, 17635, null], [17635, 21648, null], [21648, 24934, null], [24934, 25822, null], [25822, 25881, null], [25881, 29778, null], [29778, 33833, null], [33833, 37394, null], [37394, 41524, null], [41524, 45113, null], [45113, 47936, null], [47936, 50640, null], [50640, 52379, null]], "google_gemma-3-12b-it_is_public_document": [[0, 600, true], [600, 948, null], [948, 1139, null], [1139, 3632, null], [3632, 7337, null], [7337, 7576, null], [7576, 11425, null], [11425, 11526, null], [11526, 13793, null], [13793, 17635, null], [17635, 21648, null], [21648, 24934, null], [24934, 25822, null], [25822, 25881, null], [25881, 29778, null], [29778, 33833, null], [33833, 37394, null], [37394, 41524, null], [41524, 45113, null], [45113, 47936, null], [47936, 50640, null], [50640, 52379, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 52379, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 52379, null]], "pdf_page_numbers": [[0, 600, 1], [600, 948, 2], [948, 1139, 3], [1139, 3632, 4], [3632, 7337, 5], [7337, 7576, 6], [7576, 11425, 7], [11425, 11526, 8], [11526, 13793, 9], [13793, 17635, 10], [17635, 21648, 11], [21648, 24934, 12], [24934, 25822, 13], [25822, 25881, 14], [25881, 29778, 15], [29778, 33833, 16], [33833, 37394, 17], [37394, 41524, 18], [41524, 45113, 19], [45113, 47936, 20], [47936, 50640, 21], [50640, 52379, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 52379, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
b8f6099734fd44835c260d828b44d7aa52ce06bd
|
Sleak: Automating Address Space Layout Derandomization
Christophe Hauser
Information Sciences Institute
University of Southern California
Jayakrishna Menon
Information Sciences Institute
University of Southern California
Yan Shoshitaishvili
Arizona State University
Ruoyu Wang
Arizona State University
Giovanni Vigna
University of California, Santa Barbara
Christopher Kruegel
University of California, Santa Barbara
ABSTRACT
We present a novel approach to automatically recover information about the address space layout of remote processes in the presence of Address Space Layout Randomization (ASLR). Our system, dubbed Sleak, performs static analysis and symbolic execution of binary executable programs, and identifies program paths and input parameters leading to partial (i.e., only a few bits) or complete (i.e., the whole address) information disclosure vulnerabilities, revealing addresses of known objects of the target service or application. Sleak takes, as input, the binary executable program, and generates a symbolic expression for each program output that leaks information about the addresses of objects, such as stack variables, heap structures, or function pointers. By comparing these expressions with the concrete output of a remote process executing the same binary program image, our system is able to recover from a few bits to whole addresses of objects of the target application or service. Discovering the address of a single object in the target application is often enough to guess the layout of entire sections of the address space, which can be leveraged by attackers to bypass ASLR.
CCS CONCEPTS
• Security and privacy → Logic and verification; Software reverse engineering.
KEYWORDS
Binary program analysis, vulnerability discovery, information leakage
ACM Reference Format:
1Without prior knowledge of the base address of the program, or any address such as a function pointer.
use after free, amongst others. Apart from leaking addresses, information disclosure vulnerabilities are also commonly exploited by attackers to leak sensitive data such as cryptographic keys or passwords, as in the famous case of the Heartbleed bug [17], for instance, which remained unnoticed for a long period of time before it was discovered.
Due to the complexity of the software involved, it is difficult for existing automated tools to detect deep bugs without additional knowledge of the specific target environment and specificities of the analyzed software, and, in practice, manual analysis along with fuzzing are used in most cases to discover new vulnerabilities [12].
However, the process of manual analysis is both non-exhaustive and error-prone, which results in a lot of vulnerabilities remaining (publicly) undiscovered. In addition to this, despite being a very active research topic, the coverage of fuzzing techniques remains limited (e.g., program branches depending on specific conditions such as a hash value are very unlikely explored), resulting in bugs being found in either shallow paths or specific program branches.
The situation is even more critical when it comes to binary software, for which the source code is not available, as it makes the manual analysis and reverse engineering processes considerably harder. Yet, proprietary software distributed in binary-only form is ubiquitous. An example of this is Adobe’s flash player, which because of its popularity and error-ridden implementation is regularly subject to new discovered vulnerabilities, e.g., CVE-2018-4944, CVE-2018-4934 or CVE-2018-4928.
Existing approaches to detect information leakage either focus on out-of-bound reads, type confusion or use-after-free using dynamic approaches or require source code [16, 21, 24, 26, 33]. However, as of today, little attention has been spent in the research community to address the detection of indirect information leaks in binary software. In this paper, we introduce Sleak, a novel approach to detecting and exploiting information disclosure vulnerabilities in binary software, based on static program analysis and symbolic execution. This approach targets unsafe pointer usage and manipulation leading to partial or complete address leakage.
The underlying intuition behind our approach is that, by observing outputs of a program, an attacker can infer information about randomized addresses corresponding to its internal objects (e.g., stack variables), if there exists any dependence between such an object and any output of the program. A dependence of this kind may be introduced accidentally (e.g., through a pointer manipulation error) or voluntarily (e.g., the programmer intended to use the address as a unique identifier). Either way, those information leaks are, by nature, indirect, and involve a number of operations transforming the address into a value. Without prior knowledge of the aforementioned transformations, it is very difficult for an attacker to differentiate such an output value, leaking sensitive information about the address space of the program, from a benign value, as only part of an address may be revealed. In other terms, transformations over partial pointer addresses “look” just like any other random value from an observer’s standpoint.
Our system addresses this problem by providing to the observer the ability to identify which bits of an output value correspond to transformations (or computations) over an address, or which bits of the output indirectly leak address information. Once such bits have been identified, our system builds symbolic expressions that represent the values of those bits in terms of addresses of the program. This accurate tracking of address information relies on symbolic execution of code at the binary level, by following paths from address definitions to the use of subsequent data as part of an output of the program. As a result, based on a symbolic expression and a concrete output value of a running instance of a program, our system is capable of reconstructing parts of the (indirectly) leaked address, up to the full address, depending on the nature of the transformations.
Our approach does not necessarily require the attacker to interact with the targeted remote process, and in that sense, can be done passively, as long as the attacker is able to observe its outputs (e.g., network packets). It only requires prior knowledge of the binary code of the corresponding program. In order to attack a remote process, the attacker needs to i) run our analysis locally so as to identify address leaks, and to obtain symbolic expressions of the relevant output values of the program, and ii) solve symbolic constraints on these expressions given the knowledge of a concrete output value. The approach we propose is fine-grained and detects indirect information leaks at the bit level. Depending on the “size” of the leak, that is, the number of bits of address that are revealed in a given output, the attacker may directly and precisely reconstruct the original address, or may need to brute force a range of addresses containing the original address. In the latter case, leaking bits of an address has the potential to significantly reduce the entropy of ASLR, especially since most current implementations use limited address pools due to practical constraints (e.g., user/kernel separation, stack located higher than heap, etc.).
<table>
<thead>
<tr>
<th>Architecture</th>
<th>Stack</th>
<th>Heap</th>
<th>mmap</th>
</tr>
</thead>
<tbody>
<tr>
<td>32-bit</td>
<td>19</td>
<td>13</td>
<td>8</td>
</tr>
<tr>
<td>64-bit</td>
<td>30</td>
<td>28</td>
<td>28</td>
</tr>
</tbody>
</table>
Figure 1: Bits of entropy per memory region (Linux 4.5.0)
Shacham et al. [36] have demonstrated that, in practice, a 16-bit entropy is not enough to prevent brute-force attacks against ASLR (the authors also estimated that 20-bit remains within the reach of practical attacks). As represented in Figure 1, current 32-bit Linux systems are exposed to derandomization attacks, while the entropy on current 64-bit Linux systems is significantly stronger. As these numbers [25] suggest, it is therefore necessary for an attacker to leak from 8 to 10 bits (roughly, a byte) of an address before considering a brute-force attack. However, the “effective” entropy may be reduced in practice, as shown by Herlands et al. [19] by leveraging lower-entropy regions\(^3\), which, if not directly exploitable, requires the attacker to learn only a few bits of address.
To the best of our knowledge, this present work is the first approach to automatically identify memory disclosures at the bit level granularity in off-the-shelf binary applications. In summary, our contributions are the following:
\(^2\)By objects, we refer to internal constructs such as stack variables, heap buffers or functions being accessed through their addresses.
\(^3\)As of today, non-PIE (Position Independent Executables) as well as backward compatibility with 32-bit applications still expose 64-bit systems to low-entropy regions.
• We leverage binary program analysis techniques in a novel way in order to detect sensible code paths leaking address information.
• We design a fine-grained model to identify address definitions in binary code, and to track address dependency in the arguments of output functions that external attackers may be able to observe.
• We present a prototype and evaluate it on user-space applications, a commonly used general purpose library as well as in a filesystem implementation shipped with the Linux kernel.
2 APPROACH OVERVIEW
Let us consider a vulnerable server-side authentication function. It first receives an authentication token through the network, and verifies its validity. The data structure representing the token, as shown in Figure 2, embeds a union auth within a token C structure, containing two fields of different sizes. Accidentally accessing the union using the wrong field leads to a (partial) address leak since its members have different sizes. It should be emphasized that this type of construct is commonly encountered in libraries, such as libSDL's event handling code, for instance, and that such vulnerabilities are common. In fact, similar type confusion vulnerabilities have been discovered in the past in Adobe Flash Player, in the PHP interpreter and in the Chrome browser, to mention only a few.
Analyzing code in binary form, as shown in Figure 3, is much less intuitive than the source code version of the same program, due to the lack of type information about the data structures. Figure 3 shows the relevant basic block involving the unsafe operation. In this basic block, the address of the token is first stored in the register rax, and then used to access the memory location of the auth union. The content of this memory location is then passed as a parameter to sprintf through the register rdx. It is important to note that, at the assembly level, no distinction is made between the members of the union as both correspond to the same underlying memory location. Another aspect to consider is that the generated output likely will not fall within the range of an address, since only part of the address is leaked: this bug causes the value of a char * pointer to be interpreted as an unsigned int. Such an address leak is not obvious when simply observing the output of the program.
For additional details, the source code of this network authentication example and a more detailed description of the vulnerability are provided in Appendix 8.2.
2.1 Challenges
While it is possible from the source code version of this program to determine the types of variables and the layout of data structures in memory, this information is absent from its assembly translation. This has a direct impact on the complexity of the analysis, whether it is manual or automatic. Without further information about memory and register content, it is difficult to detect a vulnerability that leaks information.
In order to retrieve information from binary programs, our approach leverages symbolic execution, and accurately tracks expressions that depend on addresses. Symbolic execution allows us to keep tracks of variable expressions and constraints, and makes it possible to quantify and determine leaked information at the bit level. However, one of the intrinsic limitations of symbolic execution is the problem of exponential path explosion. Even when using techniques such as Veritesting and path prioritization, it is often infeasible to analyze large programs symbolically. As a result, avoiding path explosion while keeping an acceptable coverage of the program represents a challenge.
Our approach, in response, leverages a combination of static and symbolic program analysis techniques in a novel way in order to focus on analyzing the relevant paths of the analyzed binary program, i.e., where information about addresses may leak. More precisely, our approach involves the following analysis phases.
2.2 Analysis phases
Path selection and address identification. The first phase of our analysis automatically identifies code paths and locations of interest within the binary. During this phase, Sleak operates as follows:
1) Control-flow recovery: Sleak starts by generating a control flow graph (CFG) of the analyzed binary program, in order to recover the location of output functions, which are later used as sinks in our analysis. During this step, a static control-flow graph is built, and program paths involving output functions are identified.
2) Address identification: On each identified program path, Sleak identifies addresses by using a number of inference rules described in Section 3.3. This step identifies and marks the set of program locations involving addresses on each path.
Leak detection and address reconstruction. The second phase of our analysis leverages symbolic execution and constraint solving in order to accurately detect and determine what is leaked and to reconstruct address information from the leaked program output.
We finally conclude in Section 8.
As previously mentioned in Section 2, adopting a purely symbolic ASLR by exposing the symbolic expressions and constraints over (whether it is hardcoded or generated dynamically) and the arguments of this variable, to infer the values of the program corresponding to this variable is able, based on the observation of a single concrete output of the program corresponding to this variable, to infer the values of the leaked bits of address by using a constraint solver, and therefore to de-randomize the base address of the corresponding object (main binary or library) within the address space of the remote process. This process is described in §4.7.
2.3 Automation, scope and objectives
Our approach to detect sensible code paths and to generate symbolic expressions of leaked addresses is entirely automated based on the knowledge of the binary executable image of the program to analyze. In order to de-randomize addresses, it requires, as input, the value of one instance of concrete output corresponding to one of the detected leaking paths. From this knowledge, our analysis returns a set of solution addresses corresponding to the leaked object.
The process of interacting with a remote service is outside of the scope of this work, and expected to be performed manually by a human operator. Similarly, while this approach may be leveraged in order to automate control-flow hijacking attacks (which typically require such an information leak in order to bypass ASLR), it is outside of the scope of this work. In this present work, we focus on defeating ASLR by exposing the symbolic expression and constraints over pointer addresses based on our automated binary-level approach.
The remainder of this paper presents our approach in more details. In Section 3, we present Sleak’s static analysis phase, which selects program paths of interest in a lightweight and scalable manner. Then, in Section 4, we introduce our symbolic execution model, along with our and address recovery mechanisms. We describe our evaluation on real-world software in Section 5, followed by a discussion of our approach in Section 6 and related work in Section 7. We finally conclude in Section 8.
3 PATH SELECTION AND ADDRESS IDENTIFICATION
As previously mentioned in Section 2, adopting a purely symbolic exploration of the target program is very likely to cause path explosion due to the large number of paths encountered by our analysis system. Furthermore a large amount of library code is involved in commodity software, which dramatically increases the amount of code to analyze. This involves analyzing complex code paths going back and forth between the main binary and the libraries. Code paths of interest involve data dependencies between an address (whether it is hardcoded or generated dynamically) and the argument of an output function. This section presents our approach to identify sensible code paths of interest on which to focus our analysis. The outcome of this is a set of program paths originating from sources and terminating in sinks.
3.1 Control-flow recovery
Sleak builds on top of standard techniques for static disassembly and control-flow recovery, as provided by [1], which it augments with novel insights and heuristics. The disassembled code is lifted to an Intermediate Representation (IR) as part of the disassembly process, and our analyses operate at this level of abstraction.
Sleak’s first analysis stage consists in recovering an interprocedural static Control Flow Graph (CFG) of the binary, along with basic coarse information about the program state at the entry point of each node. This analysis step is to be thought of as a static pre-filter: for efficiency reasons, the recovered CFG in this step is not context sensitive, this allows us to scale our analysis to larger code bases, at the cost of a limited accuracy. A context sensitive control-flow recovery would consider multiple possible call site contexts into account during the analysis, i.e., analyze each basic block considering the context of each potential caller when generating the CFG. However, such a context-sensitive analysis would also come with a considerable increase in terms of complexity. By omitting the context, Sleak reduces the complexity by an order of magnitude and allow our analysis to scale to larger binaries. On top of this context-insensitive CFG, Sleak performs lightweight data dependency tracking within the basic blocks surrounding output functions. In these blocks, constant values that are placed in the binary by the compiler (and loaded in registers or memory as immediate), as well as the result of trivial operations, are evaluated, and flagged as addresses, if they match one of the inference rules presented in paragraph 3.4. In the presence of such potential addresses, Sleak tracks dependency between registers and temporaries at the Intermediate Representation (IR) level and attempt to identify any data dependence towards the arguments of one of the output functions.
In summary, this initial analysis step is used to build a CFG and to identify constants and simple cases of address-dependent outputs of the program at a small computational cost. Based on this information, Sleak proceeds with the identification of output functions, and their call sites within the binary image of the program and its libraries.
3.2 Output functions identification
When a program depends on the code of shared libraries, its binary translation is either statically or dynamically linked to the corresponding library code. In the case of dynamically-linked binaries, output functions are, in the vast majority of cases, external to the binary and imported as part of a library. It is extremely unlikely to encounter a binary reimplementing its own output functions such as printf, therefore we assume that all output functions are part of external shared libraries. The addresses of such library functions are exposed in the global offset table of dynamically-linked binaries, and functions are called through their respective procedure linkage table (PLT) entries. In this case, extracting function information is trivial. However, in the case of statically-linked binaries, function calls are performed directly (as opposed to calling a stub for resolution), and if a binary does not contain symbol information (i.e., it
---
(3) Detecting address dependence: Sleak determines whether the arguments of output functions are data-dependent on marked addresses. The control-flow paths leading to each sink are analyzed individually. Each selected path is analyzed through a symbolic execution engine, which generates symbolic expressions and constraints on the program’s variables as the path is executed. This step allows us to precisely characterize the leaked address, as a symbolic expression, or formula.
(4) De-randomization: Once Sleak reveals the set of leaking outputs for a given program along with their formulas, an attacker is able, based on the observation of a single concrete output of the program corresponding to this variable, to infer the values of the leaked bits of address by using a constraint solver, and therefore to de-randomize the base address of the corresponding object (main binary or library) within the address space of the remote process. This process is described in §4.7.
---
6 That is, address information used as data.
was stripped), then no information about the location of functions is available. In this case, we perform a preliminary step of function identification, as described in Appendix 8.1.
In the reminder of this section, we assume that we are working with dynamically-linked binaries, and that program outputs rely on standard library functions. We also assume that we know the prototype of such standard functions (i.e., we know the type of their parameters). These assumptions are reasonable, since in practice, the vast majority of programs rely on the C standard library, which provides system-call wrappers and the implementation of the most common input/output functions.
Once output functions have been identified, Sleak locates their call sites by iterating through each node of the CFG while scanning for the targets of call instructions. We refer to this analysis step as SA1. At the end of this step, all reachable, known output functions as well as their call sites have been identified. Each call site represents a potential address sink in the following analysis steps.
### 3.3 Identifying addresses
The next step through identifying address leakage is to identify memory locations containing addresses, which we treat as sources. In binary form, addresses can be encoded in different manners: when the address of a symbol is known at compilation time, the compiler will substitute the symbol with the value of the instruction pointer ($rip + offset$), e.g., the address of a Procedure Linkage Table (PLT) stub, or a global variable. If the binary is compiled to be position independent (PIC), addresses will often be encoded as offsets from the current value of the instruction pointer (i.e., $rip + offset$ in x86-64 assembly). In other cases, addresses may be evaluated at runtime from simple (e.g., offset from stack pointer) to complex expressions that are either difficult to determine statically, or not statically computable without specific knowledge about the application (e.g., functions registered at runtime).
Sleak identifies addresses based on the set of inference rules described below in § 3.4. We refer to this analysis step as SA2.
### 3.4 Address inference
While destinations of jump targets can easily be flagged as being addresses, the distinction between addresses and data is not obvious in other situations. Consider the following assignment, for example, along with its assembly translation:
(C) \( x = \text{(char*) } \&\text{printf}; \)
(ASM) `mov QWORD PTR [rax],0x4003e0`
While it is clear that \( rax \) corresponds to an address (as its contained value is dereferenced), \( 0x4003e0 \) is an immediate value, and it may never be used as an address in the program. We may also encounter cases where the actual value of the operand is unknown by our analysis. For example, consider the following statement:
`mov rax, QWORD PTR [rdi+0x4]`
Here, the value located in memory at \( [rdi+0x4] \) is read, and stored in the register \( rax \). Without further analysis about the context, we cannot tell whether \( [rdi+0x4] \) corresponds to an address, or to some data. In order to cope with this lack of information, we use the following inference rules to determine whether a value potentially corresponds to a valid address:
1. **Semantic information:** when analyzing dynamically-linked binaries, we can generally extract information from the binary format, such as the location of Global Offset Table (GOT), or relocation information. The GOT contains addresses of external library functions that are called from within the binary, therefore we know that any memory read from these locations will contain an address.
2. **Value range:** if a value falls into the boundaries of the `.text`, `.data`, heap or stack regions of the address space, then this value is a potential address and flagged for further analysis.
3. **IR operations:** the set of operations available at the intermediate representation level expects values of different types. If a value is used as an address as part of an operation (e.g., a memory load or a jump instruction), then it is flagged accordingly.
4. **Return values:** the return values of libc functions or system calls, known to return pointers to memory locations, such as `malloc` or `mmap`, are tracked as part of our analysis.
### 3.5 Dynamic resolution
In some large software components such as operating system kernels or complex libraries implementing generic programmatic constructs (e.g., parsers), dynamic behavior such as runtime binding, asynchronous method invocations and polymorphism tend to challenge static analysis, and it is sometimes necessary to provide hints to static analysis methods in order to resolve part of the control flow. For instance, filesystems in the Linux kernel register a `inode_operations` structure when the initialization routines of the related modules are executed. Predicting such a behavior statically, without prior knowledge of the inner mechanisms of the kernel is not practical.
In order to reason about such programs exhibiting a highly dynamic behavior, we rely on partial concrete execution of the code in order to initialize the program to a reasonable state prior to proceeding with our analysis. In order to do so, we leverage dynamic testcases or known well-formed input to the program and collect execution traces. From these traces, we extract the execution context at the entry points of each encountered function, which we feed as initial state to our analysis. From there, our analysis proceeds with path selection and address identification in an identical manner to what is performed in a purely static setting.
In conclusion, the first phase of our analysis performs the recovery of a control flow graph of the binary program, and identifies sensible control-flow paths between potential sources and sinks. The next section describes the next phase of our analysis, based on symbolic execution.
### 4 LEAK DETECTION AND ADDRESS RECONSTRUCTION
Sleak leverages symbolic execution to accurately reason about address leaks. In this section, we describe why and how we use symbolic execution on vulnerable program paths (described in Section 3).
---
1. Depending on the architecture support for instruction pointer addressing.
and summarize the techniques involved. An overview is presented in Figure 4.
4.1 Identifying vulnerable paths
The key property of program paths leaking address information is that, within such paths, a data dependence exists in the program between a source and a sink. A source corresponds to an instruction \( i_1 \) where a location such as an instance of a register or a variable in memory containing the value of an address is accessed. A sink, i.e., another instruction \( i_2 \) where an argument is passed to an output function. In order to detect vulnerable paths, Sleak analyzes the existence of data dependence between sources and sinks which have been identified during the previous analysis phase presented in §3.
We now present Sleak’s data-dependence analysis in more detail. We first describe some limitations of static data-dependence tracking techniques at the binary-level, followed by our approach to overcome such limitations in the context of our analysis.
4.2 Limitations of static techniques
The generation of data dependence graphs is a common problem of data-flow analysis, and established algorithms exist in the compiler and source-level analysis literature [5, 42], to compute such graphs based on the computation of so-called def-use chains. A data dependence graph exposes the relations between statements of a program with respect to the definition and use (also sometimes called production and consumption) of data. However, when applying these principles to binary program analysis, this process is made difficult by the lack of accuracy of control flow graph techniques with respect to memory and register content, and the complexity of memory access patterns, which directly affects the accuracy of def-use chains. Each time such an access is incorrectly resolved, it breaks the chain into seemingly independent chains, and the data-dependence is lost between the corresponding statements. Therefore, the accuracy of the underlying memory model is critical in this context.
In order to cope with these limitations, we take inspiration from previous work [29] and leverage symbolic execution as a tainting engine.
4.3 Symbolic Execution
Dynamic symbolic execution is similar to dynamic emulation of a program with one main exception: instead of concrete ones and zeroes, the data in registers and memory are symbolic variables. This applies quite well to the evaluation of address leaks, since the variables in the expressions are retained through execution. For example, the x86 instruction add eax, ebx with eax containing the symbolic variable A and ebx containing the concrete value 1 will result in the expression A + 1 being stored in eax. Later, we will use this detail of symbolic execution to identify leaked program pointers.
We utilize a standard symbolic execution engine, based on common techniques in the field of symbolic execution [11, 14, 31]. Our symbolic execution engine interprets binary code and applies actions carried out by this code onto a symbolic state containing the values in memory and registers. As the analysis comes to a conditional jump, the constraint solver is queried for possible values of the jump condition. When a condition can be both true and false (because the symbolic variable included in its expression is not constrained or loosely constrained), the state splits and the engine continues the analysis of both paths, with new constraints added to the variables involved in the comparison.
4.4 Symbolic tracking
Dynamic symbolic execution suffers from path explosion. As previously mentioned, when a jump conditional cannot be shown to be explicitly true or false, then both branches must be followed. As the analysis continues through a program, more and more of these branches are spun off (in fact, each conditional has an exponential effect on the number of states) until the analysis becomes unmanageable.
To remedy this, we use the dynamic symbolic execution engine to perform selective symbolic tracking by constraining our symbolic execution engine to analyzing only certain paths. Specifically, we only trace the previously-identified potentially leaking paths (as described in Section 4.1). Tracking these paths instead of exploring the entire binary keeps our analysis from experiencing a path explosion in unrelated code, and allows us to focus the analysis on detecting leaks. Conceptually, this is similar to the symbolic component of concolic execution, when concrete inputs are symbolically traced [18].
During symbolic tracking, we represent all pointers in the program (for example, the stack pointer, any pointers returned from allocation functions such as malloc, a code pointer into the binary program and libraries, etc.) as expressions representing the addition of a fixed offset to a symbolic variable representing the base of that region. This allows us to detect leaked pointers in the next step.
4.5 Symbolic execution as a tainting engine
The rich state information extracted from symbolic execution is used by Sleak to compute data dependence in a precise manner. More precisely, Sleak keeps tracks of potential variables of interest (i.e., corresponding to addresses) by making these variables symbolic, and by symbolically executing the previously identified control-flow paths starting at each program point defining a variable of interest (which is determined through static analysis) and
until a sink (output function) is reached. In this context, we leverage under-constrained symbolic execution \cite{28} in order to start execution at these arbitrary program points. We refer to this execution step as SE1. Similarly to \cite{29}, we only explore loops one time during this analysis step in order to speed up the execution while retaining taint information. In addition to this, SA1 also applies the inference rules defined in §3.4 in order to identify additional sources that were missed during static analysis.
When the execution reaches a sink, each function argument is analyzed. The way arguments are passed to output functions depends on the architecture, and the convention used by the compiler: arguments may be placed on the stack or placed in architecture specific registers. Figure 5 shows an example of function call on x86_64. For each argument passed to an output function, Sleak analyzes the corresponding symbolic expression, formulated as an Abstract Syntax Tree (AST), in order to determine eventual dependence on the previous one. The previously marked symbolic variables corresponding to addresses.
When such a dependence exists, an address leak has been detected. For the sake of precision, the corresponding path is re-analyzed. For the sake of precision, the corresponding path is re-executed symbolically without the aforementioned optimizations tailored for tainting. The result of this analysis is the symbolic expression along with path constraints for each leaking path. We refer to this step of full symbolic execution as SE2.
From there, the process of reconstructing the original address based on the output is presented in Sections 4.6 and 4.7.
### 4.6 Recovering Address Information
Each path that is symbolically traced terminates at an output function. At this point, Sleak analyzes the final state of that path and checks the content of the output for the presence of symbolic variables corresponding to addresses. Let us describe it in an example. Consider the code in Figure 6.
```
1 // C code
2 int x = 1;
3 printf("X is \%d\n", &x);
4 // ASM translation
5 // MOV [RSP+8], x
6 mov rax [rbp+8] ; arg 1
7 mov rsi rax ; arg 2
8 mov rdi 0x40095d ; arg 3
9 call 0x4005b0 ; printf@PLT
Figure 5: Calling an output function
```
In this example, the bug is introduced by the incorrect referencing of \( x \): a pointer to it, rather than its value, is passed to printf. In the assembly code, this manifests as a lea (Load Effective Address) instruction, instead of the mov instruction that would be required to pass \( x \) by value. The lea instruction will, in this case, move \( \text{rsp}+8 \) into the \( \text{rsi} \) register to pass it as an argument to printf. Since Sleak initializes the stack pointer to the symbolic value \( \text{STACK\_BASE} \) at the beginning of the trace, \( \text{rsi} \) will contain some offset of that variable as well. For the sake of the example, let us suppose that the expression in \( \text{rsi} \) will be \( \text{STACK\_BASE} - 24 \).
When Sleak detects that an expression that depends on one of the base addresses is passed to an output function, it analyzes it to identify what parts of the base address can be recovered. In this simple example, it is a trivial matter of subtracting 24 from \( \text{rsi} \) to recover \( \text{STACK\_BASE} \) and defeat ASLR.
### 4.7 Addressing partial leaks
Not all arithmetic operations are directly reversible. While some operations such as adding/subtracting a constant or XORing a value with a constant are reversible, a number of other operations are not. In these cases, bits of the initial value are lost, which translates to multiple, possibly many solutions when trying to solve constraints on these expressions. Sleak leverages constraint solving to support a wide range of possible transformations on the pointer, including complex and/or irreversible arithmetic operations. By using constraint solving, our approach exercises the space of possible solutions, which constitutes the set of brute-force candidates for an attacker to use. Consider, for instance, the case of division on fixed-sized integers. This operation is not directly reversible. Let us assume that a program outputs an expression out as being \( \text{out} = x/4 \). When executing the program, if one observes that the output is 42, it is possible to obtain possible values for \( x \) using a constraint solver, e.g., Z3, as follows:
```
\[ z3.\text{solve(out== 42, out== x/4)} \]
```
Which will yield the solutions: \( (x = 168, \text{out} = 42) \), \( (x = 169, \text{out} = 42) \), \( (x = 170, \text{out} = 42) \), \( (x = 171, \text{out} = 42) \).
By leveraging constraint solving, Sleak is thereby able to automatically recover address information even in the case of subtle instances of partial pointer leaks (e.g., arithmetic transformations over pointer values leading to leak only a few bytes), based on the knowledge of a concrete output value of the target running program.
### 4.8 Concolic tracing
Sleak supports concolic tracing, that is, the ability to feed the symbolic engine with concrete inputs obtained from the environment of a dynamic execution trace. Our concolic tracing module takes, as input, both symbolic data and concrete data obtained from the input state of the augmented CFG presented in Section 3.
At any point in the trace, the state of the program (including all of its memory and registers contents) can be obtained, and fed into our symbolic execution engine. This ability to switch from dynamic (concrete) execution to symbolic execution allows us to analyze code paths using a hybrid state composed of concrete and symbolic data. Symbolic data is introduced by the following three operations: 1) reading an address depending on a symbolic base, as presented in Figure 6, 2) reading unconstrained data from the file system, the network, or any user input, 3) unsupported system calls.
\[ \text{Since we cannot reason about such input values, these are represented as unconstrained symbolic variables at the time they are read from user input.} \]
5 EVALUATION
Our evaluation of Sleak comprises a set of userspace applications and services, a complex userspace library and a linux kernel filesystem. In more detail, we analyzed:
- 80 binaries from Capture The Flag (CTF) competitions including Defcon’s final CTF and its qualifying events from years 2012 to 2018.
- libXSLT, a library specialized in the transformation of XML documents, which is part of many common software applications including Firefox and Chrome. The extensive size and the complexity of this library made it a good candidate for evaluating our system.
- The overlayfs filesystem from the Linux kernel (used by Docker virtualization containers).
5.1 Ground truth data
We compared our results against (1) CTF writeups and (2) existing vulnerabilities published in the Common Vulnerabilities and Exposures database available from NIST, as well as from manual verification of source and binary code, which motivated our choice for open-source projects in our evaluation.
5.1.1 CTF binaries. We gathered 80 userspace services from prior capture the flag competitions. Since it is a requirement for successful exploitation in the presence of address space layout randomization, 4 of these services are vulnerable to information leakage, where a pointer address is leaked. We collected ground truth data from CTF writeups and manual reverse engineering.
5.1.2 libXSLT. Earlier versions of the libXSLT library were vulnerable to an information disclosure vulnerability, reported in CVE-2011-1202. This bug has been fixed since 2011-1202. This bug was introduced 10 years earlier in commit ad1ac26f154e4ebbeb55263a2601b0e3a21e (line 3) and the name of the value copied to the local stack variable at rbp+0x4. After some error checking (omitted from Figure 8 for brevity), this value is then exposed to userland by invoking printf().
5.2 Experimental setup
Our system builds on top of the angr program analysis framework, which we extended (1500 lines of Python) with custom analyses and heuristics to detect information leakage vulnerabilities. Our dynamic trace collection mechanism is a custom implementation. We made light modifications to the Qemu emulator and leveraged its gdb stub to communicate with our analysis platform and to dump the memory of the stack, the heap, and the global data areas.
Our analysis runs on stripped binary executables. Therefore, in spite of the presence of open-source code in our evaluation dataset, Sleak operates identically regardless of whether the underlying code is open-source or closed-source.
For each binary, the number of basic blocks in the control-flow graph, the number of analyzed functions, and the number of sinks marked are logged. In the case of libXSLT and Overlayfs, we also leverage test cases to collect dynamic traces. For libXSLT, our dynamic phase consists of executing test cases which ship with the library. For the Linux kernel filesystem, it extracts a large archive containing the Linux kernel source tree while collecting dynamic traces. All our experiments were performed on a Dell Precision tower 5810 with 6 Xeon E5-1650 v4 @ 3.60GHz CPUs and 64GB of memory. See Section 6.1 for performance information.
5.3 Analysis results
We summarize our results (i.e., we only report binaries for which a leak exists or is reported by our system) in Table 1, where we compare against the ground truth, represented in the last column labelled GT. Sleak successfully detects all instances of leaks present in our dataset, with the addition of two additional leaks which correspond to false positives, i.e., where no leaks actually exist in the corresponding binaries. After investigation, we were able to determine that these false positives are caused by intentional stack manipulations attempting to obfuscate program behavior which do not follow standard practices (i.e., as found in benign programs compiled with standard compilers). In all cases, Sleak returns the symbolic expression of the leaked variable along with its path constraints. From this knowledge, an attacker who has observed a concrete output of the program can easily leverage constraint solving as described in §4.7 in order to reveal the solutions corresponding to the set of possible leaked address values (which corresponds to a single value in cases where a vulnerability exposes an entire address), and therefore bypass ASLR (i.e., a single address is sufficient to recover the base address of a loaded program or library).
### 5.3.1 libXSLT
Sleak detects this vulnerability in two different settings involving two different inference rules: 1) in a purely static setting, without any knowledge of the dynamic behavior of the library, and 2) by starting the analysis from a concrete state, as described in §3.5. In the following, we present the results for both.
#### (1) Static detection
In a purely static setting, Sleak identifies 27 potential sinks during its first analysis phase (SA1), called from 7 distinct functions out of 505, as represented in Table 1, including the vulnerable call to sprintf. In terms of address detection (SA2), the GenerateId function does not present any information which would enable our analysis to determine that one of the parameters of sprintf is a pointer. The reason for this is that generateId gets this pointer from its parameters, which types are unknown to our analysis. However, during the SE2 analysis phase, while the symbolic execution engine explores multiple paths of the function, the analysis hits an IR operation corresponding to a dereference (i.e., inference rule #3 from §3.4) in another function call happening in a parallel branch to the branch performing the vulnerable sprintf call. The call in question invokes the function int xmlXPathCmpNodes() which defreferences the same XMLNodePtr pointer. Sleak propagates this information back to the uppermost branching point one function higher in the call tree, and to infer that the parameter of sprintf is indeed a pointer. This ability to propagate address type information across branches is powerful, and combined with address inference rules, allows us to reason about complex code.
#### (2) Static detection augmented with traces
When neither the analyzed code paths nor their parallel branches expose sensible IR operations, it may not be possible to infer address information from the code statically. Therefore, Sleak also leverages execution traces as presented in Section 3.5. By doing so, Sleak obtains a concrete call context for each function invoked in the execution trace, which is then used as part of our static model. The developers of libXSLT distribute the library along with test cases, which we leverage as input data in order to generate dynamic traces. Our system successfully detects the vulnerability, in the basic block represented in Figure 7. The value in register rbp is identified as a heap variable by our analysis, and the call to sprintf as a dangerous use of an address. Sleak infers the presence of an address due to inference rule #2 presented in §3.4.
#### Symbolic expression of leaked data
Regardless of whether a static or dynamic detection method is used, when executing the detected path symbolically as part of SE4, at the beginning of the first instruction, our symbolic execution engine represents the content of register rbp as an unconstrained symbolic variable of the form heap_addr_uid where uid is a unique identifier. After executing instruction 3, the register rsi contains the format string id%ld. This represents the first argument to the call to sprintf. When reaching the call to sprintf, Sleak parses the format string in register rsi, and determines that the value of the next argument can be fetched directly from register rdx without dereferencing a pointer, since %ld expects a long integer. At this point, the value contained in register rdx is represented by our symbolic engine by an expression describing operations on a symbolic heap pointer. This expression is represented internally in terms of bit vector operations by our symbolic engine, and it is equivalent to a division by sizeof(xmlNode) of the symbolic heap variable. Since arithmetic division is not a reversible operation, multiple solutions are possible (i.e., it is a partial leak). As a result, Sleak outputs the symbolic expression of the leaked address along with its associated constraints.
<table>
<thead>
<tr>
<th>Challenge</th>
<th>CFG nodes</th>
<th>Functions</th>
<th>Sinks</th>
<th>Leak</th>
<th>GT</th>
</tr>
</thead>
<tbody>
<tr>
<td>CTF binaries</td>
<td>72</td>
<td>1</td>
<td>3</td>
<td>✓</td>
<td>✓</td>
</tr>
<tr>
<td>a5afebd29d5dc867e6d507d78853c691</td>
<td>496</td>
<td>16</td>
<td>11</td>
<td>✓</td>
<td>x</td>
</tr>
<tr>
<td>defcon_16_heapsfun4u</td>
<td>200</td>
<td>5</td>
<td>1</td>
<td>✓</td>
<td>✓</td>
</tr>
<tr>
<td>ex_pz</td>
<td>91</td>
<td>2</td>
<td>3</td>
<td>✓</td>
<td>✓</td>
</tr>
<tr>
<td>pwn1</td>
<td>318</td>
<td>1</td>
<td>1</td>
<td>✓</td>
<td>x</td>
</tr>
<tr>
<td>int3rupted</td>
<td>327</td>
<td>6</td>
<td>4</td>
<td>✓</td>
<td>✓</td>
</tr>
<tr>
<td>libXSLT</td>
<td>76842</td>
<td>505</td>
<td>27</td>
<td>✓</td>
<td>✓</td>
</tr>
<tr>
<td>Overlayfs</td>
<td>1981</td>
<td>191</td>
<td>27</td>
<td>✓</td>
<td>✓</td>
</tr>
</tbody>
</table>
Table 1: Analysis results (summarized)
---
13Following the calling convention of the ELF format on x86_64.
this vulnerability, but our approach of static detection augmented with traces is effective, following the same steps as the analysis of libXSLT described earlier. When reaching the call to printk at line 15 of Figure 8, our symbolic engine evaluates the content of register rsi to an unconstrained symbolic variable of the form heap_addr_uid, corresponding to the leakage of a full heap address. This expression indicates that the output value corresponds to an entire address.
6 DISCUSSION
6.1 Performance and scalability
In terms of scalability, a potential bottleneck of any approach relying on symbolic execution is peak memory usage. This is why Sleak carefully filters out the sets of paths to analyse based on static analysis. During our experiments, the peak memory consumption was below 20GB at all times, and the longest symbolic path was executed under 10 minutes. The overall analysis time was under 80 minutes for CTF binaries, and under 20 hours for libXSLT and the kernel filesystem together. We have not reached any memory bottleneck, even on kernel code, which demonstrates the effectiveness of our filtering approach.
6.2 Limitations
Sleak and its implementation rely on state of the art static analysis and symbolic execution technique. Despite this fact, our approach is subject to code coverage limitations and to state explosion, in some cases.
Code coverage: as discussed in Sections 3 and 4, the coverage of the static analysis techniques that we presented has some limitations. Due to the inherent difficulties of statically recovering a full control-flow graph, some parts of the analyzed binary application may not be reachable by Sleak, which potentially yields a number of false negatives. We mitigate this problem by leveraging dynamic resolution, as presented in Section 3. As a result, our approach provides a trade-off between coverage and accuracy.
State explosion: large code paths sometimes remain to be symbolically analyzed, even after the first phase of our analysis. This happens in particular when tracking heap pointers are initialized early, and used/leaked later in the program, after executing a long code path. Complex loops can also cause state explosion, regardless of the size of the analyzed paths. We partially mitigate this issue by leveraging veritesting and path prioritization techniques.
Implementation: empirically, while the implementation of our proof-of-concept is able to successfully analyze real-world binary applications, our modeling of the environment (e.g., system calls) as well as some complex standard library functions (e.g., string functions such as strcmp) is neither comprehensive nor completely accurate, which may impacts our coverage in some cases (e.g., by involving incorrect constraints on some symbolic variables, which may lead to unsatisfiable symbolic states.) This problem is not specific to our approach, and affects any binary analysis approach in general.
7 RELATED WORK
Information leakage. In the last decade, researchers have proposed several approaches to detect information leaks from kernel-space towards user-space at the source code level. Peiro et al. recently proposed an approach based on static analysis for detecting kernel stack-based information leaks [27]. The proposed approach is able to detect particular instances of information leaks (such as leaks caused by missed memory initializations and missing checks on user reads). The authors found five new vulnerabilities in the Linux kernel by analyzing its source code with their tool. However, one of the limitations of this approach is that the analysis is limited to single functions (i.e., inter-function analysis is not supported).
Johnson et al. [22] previously introduced a pointer bug detection model based on type qualifier inference. The authors extended the open-source tool CQUAL [2] for this purpose. This approach requires manual annotations of functions, such as system calls accepting user arguments, before the analysis can be performed. While such source-level approaches address a similar problem than the focus of our study, these are not applicable as-is at the binary level, due mainly to the lack of type information in the disassembly.
On the information theory side, past research focused on quantitative information flow [6, 8, 38], as initially proposed by Denning [30], where the objective is to measure the amount of secret information leaked by a program, by observing its outputs. In particular, Backes et al. [8] proposed a model based on equivalence relations to characterize partial information flow. This model is used to identify which information is leaked and provide a quantitative interpretation of the leaked variables. While this approach is generic and may be used to reason about information leakage of any sort, it does not focus on individual program runs, but instead generalizes the behavior of a program. In comparison to our approach, this model lacks information about the mapping between actually leaked bits, and their origin within the program’s memory.
In [32], Seibert et al. present a model for remote side channel attacks that allow attackers to exploit memory corruption vulnerabilities even in the presence of code diversification. The authors claim that the assumptions behind code diversity are broken, due to the fact that executing code leaks information about the code itself, which may allow the attacker, under certain circumstances, to recover the location of code gadgets. While this model achieves goals similar to ours, this requires the attacker to actively modify the state of the program, by overwriting data or crafting specific inputs, where our approach is passive and relies solely on the observation of the outputs of the program.
Type casting verification. Existing approaches to detect information leakage either focus on out-of-bound reads, type confusion or use-after-free using dynamic approaches or require source code [16, 21, 24, 26, 33]. In particular, Hextype leverages source-code analysis along with compiler-level techniques in order to replace static checks by runtime checks. Similarly, EffectiveSan [16] enforces type and memory safety in C and C++ programs by using a combination of low-fat pointers, type metadata and type/bounds check instrumentation.
However, as of today, little attention has been spent in the research community to address the detection of indirect information leaks in binary software.
Binary program analysis. At the binary level, a number of approaches based on symbolic execution have been proposed to detect memory corruption vulnerabilities in off-the-shelf applications [31, 39, 41], to analyze firmware [37, 44] in order to detect backdoors and logic bugs, and to analyze drivers [13, 23] for reverse engineering purposes and for detecting undesired behavior. Among these existing binary-level approaches, the BORG [26] focused on detecting buffer overreads in binary software using guided symbolic execution. Our approach draws on similar concepts, but focuses on addressing different challenges.
8 CONCLUSION
We presented Sleak, a system designed to recover information about the memory layout applications, even in the presence of address space randomization (ASLR). Our system analyzes applications at the binary level, and detects information disclosure vulnerabilities, regardless of how many bits of pointer addresses are leaked. Sleak leverages symbolic execution to craft precise symbolic expressions representing the addresses of known objects of the target application, such as stack or heap variables, or function pointers. As a result, even in the case of partial information disclosure, Sleak is able to recover useful information about the leaked address, and defeat ASLR.
AKNOWLEDGEMENTS
We would like to thank the anonymous reviewers for their valuable comments and input to improve our paper. This material is based on research sponsored by DARPA under agreement numbers HR001118C0060, FA8750-19-C-0003 and the NSF under Award number CNS-1704253. The U.S. Government is authorized to reproduce and distribute reprints for Governmental purposes notwithstanding any copyright notation thereon. The views and conclusions con-
8.1 Function identification
Binaries not exposing symbol information (i.e., stripped binaries), do not provide information about the location of functions, which prevents Sleak from directly identify output functions. However, the general problem of function identification in binary remains an open research challenge, and the accuracy of existing solutions is limited [9, 20]. Our approach to function identification takes different steps, and focuses on the particular case of detecting output functions. We first identify function boundaries by scanning the binary for function prologues and epilogues. Once the location of functions has been identified, we use the following heuristics to recognize output functions: if one of the identified function is invoking the write system call, and if one of the function’s arguments is passed to this system call, we consider it an output function. This allows us to detect the majority of output functions, given the fact that most of them are, in practice, implemented as writing to an underlying file descriptor as being an int value. The amount of leaked bits depends on the architecture. Looking at the C code in Figure 9, it is possible for an analyst to manually determine that the unguarded call to sprintf may leak an address. However, analyzing code in binary form, as shown in Figure 3, is much less intuitive than the source code version of the same program, due to the lack of type information about the data structures.
9 GROUND TRUTH/SOURCE CODE
9.1 libXSLT
Let us consider the code in Figure 10. This code snippet shows the vulnerable version of the function providing the XSLT transformation generate-id(). For the sake of brevity, we only represented the relevant parts of the function, w.r.t. the aforementioned vulnerability in Figure 10. The purpose of this function is to generate a unique identifier for a given XML node. In this function, the variable val represents the unique identifier. In order to generate a unique value, the value of cur is assigned to val, which is then divided by sizeof(int). Note that while cur is a stack variable, its content is the address of ctx->context->node, which contains the address of a heap pointer. The identifier contained in val is then appended to a string, which value is later exposed in the output document. Exploiting this bug reveals the heap location of the process running on x86_64 machines, this corresponds to a leak of 32 bits out of a 64-bit address.
this library. Among other applications using libXSLT, Chrome, Safari and Firefox were affected. This vulnerability was introduced by an intentional, but unsafe, pointer manipulation. Without security in mind, an address may seem like a good candidate, for its uniqueness, to generate an identifier. Looking at the comments in the vulnerable version of the code, we can also confirm that the programmer intentionally used the address of the XML node to compute a unique identifier. However, by multiplying the output value by sizeof(int), it is possible to fully recover the leaked heap address.
```
void GenerateId(ContextPtr ctxt, int nargs)
{
xmlNodePtr cur = NULL;
unsigned long val;
xmlChar str[20];
cur = ctxt->context->node;
val = (unsigned long)(char*)cur;
val /= sizeof(xmlNode);
sprintf((char*)str, "id%d", val);
}
```
Figure 10: Simplified version of CVE-2011-1202 in libXSLT.
9.2 OverlayFS
In recent years, because of the escalation of user-space security mechanisms and protections against memory corruption attacks, it became more and more difficult for attackers to reliably execute their exploits on remote systems. In contrast, the Linux kernel started to implement support for address space layout randomization later, and even when it is enabled, it provides less entropy than its user-space counterpart. As a result, attackers’ attention partially shifted towards kernel-exploits, which became more common.
```
struct dentry *ovl_lookup_temp(
struct dentry *workdir,
struct dentry *dentry)
{
struct dentry *temp;
char name[20];
snprintf(name, n, "#%lx", (unsigned long)dentry);
temp = lookup_one_len(name, workdir, strlen(name));
if (!IS_ERR(temp) && temp->d_inode)
{
pr_err(msg, name);
dput(temp);
temp = ERR_PTR(-EIO);
}
return temp;
}
```
Figure 11: Vulnerable function in the overlayfs filesystem.
9.2.1 Pointer disclosure. The implementation of the overlayfs filesystem in the Linux kernel (up to version 4.4.5) leaks the kernel memory address of a struct dentry pointer to userspace, as shown in Figure 11. This pointer address was meant to be used as a unique identifier (and to that extend, this bug is similar to the bug in libXSLT that we presented earlier).
|
{"Source-Url": "https://sites.cs.ucsb.edu/~vigna/publications/2019_ACSAC_Sleak.pdf", "len_cl100k_base": 12620, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 44916, "total-output-tokens": 13351, "length": "2e13", "weborganizer": {"__label__adult": 0.0004589557647705078, "__label__art_design": 0.00031113624572753906, "__label__crime_law": 0.0009708404541015624, "__label__education_jobs": 0.0004258155822753906, "__label__entertainment": 8.058547973632812e-05, "__label__fashion_beauty": 0.00017774105072021484, "__label__finance_business": 0.0001691579818725586, "__label__food_dining": 0.0003666877746582031, "__label__games": 0.0010623931884765625, "__label__hardware": 0.0017538070678710938, "__label__health": 0.0005502700805664062, "__label__history": 0.00026226043701171875, "__label__home_hobbies": 0.000102996826171875, "__label__industrial": 0.0004868507385253906, "__label__literature": 0.00026297569274902344, "__label__politics": 0.00031828880310058594, "__label__religion": 0.00045228004455566406, "__label__science_tech": 0.053924560546875, "__label__social_life": 8.767843246459961e-05, "__label__software": 0.0103302001953125, "__label__software_dev": 0.92626953125, "__label__sports_fitness": 0.0003170967102050781, "__label__transportation": 0.0005521774291992188, "__label__travel": 0.00017964839935302734}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 61518, 0.0337]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 61518, 0.45378]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 61518, 0.91889]], "google_gemma-3-12b-it_contains_pii": [[0, 2286, false], [2286, 9309, null], [9309, 14317, null], [14317, 21744, null], [21744, 28029, null], [28029, 33442, null], [33442, 39558, null], [39558, 42759, null], [42759, 48513, null], [48513, 54988, null], [54988, 56752, null], [56752, 59232, null], [59232, 61518, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2286, true], [2286, 9309, null], [9309, 14317, null], [14317, 21744, null], [21744, 28029, null], [28029, 33442, null], [33442, 39558, null], [39558, 42759, null], [42759, 48513, null], [48513, 54988, null], [54988, 56752, null], [56752, 59232, null], [59232, 61518, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 61518, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 61518, null]], "pdf_page_numbers": [[0, 2286, 1], [2286, 9309, 2], [9309, 14317, 3], [14317, 21744, 4], [21744, 28029, 5], [28029, 33442, 6], [33442, 39558, 7], [39558, 42759, 8], [42759, 48513, 9], [48513, 54988, 10], [54988, 56752, 11], [56752, 59232, 12], [59232, 61518, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 61518, 0.05691]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
c434edf58f18755f7cb4a835bd140c4271da19c2
|
Virtually Eliminating Router Bugs
Eric Keller* Minlan Yu* Matthew Caesar† Jennifer Rexford*
* Princeton University, Princeton, NJ, USA † UIUC, Urbana, IL, USA
ekeller@princeton.edu {minlanyu, jrex}@cs.princeton.edu caesar@cs.uiuc.edu
ABSTRACT
Software bugs in routers lead to network outages, security vulnerabilities, and other unexpected behavior. Rather than simply crashing the router, bugs can violate protocol semantics, rendering traditional failure detection and recovery techniques ineffective. Handling router bugs is an increasingly important problem as new applications demand higher availability, and networks become better at dealing with traditional failures. In this paper, we tailor software and data diversity (SDD) to the unique properties of routing protocols, so as to avoid buggy behavior at run time. Our bug-tolerant router executes multiple diverse instances of routing software, and uses voting to determine the output to publish to the forwarding table, or to advertise to neighbors. We design and implement a router hypervisor that makes this parallelism transparent to other routers, handles fault detection and booting of new router instances, and performs voting in the presence of routing-protocol dynamics, without needing to modify software of the diverse instances. Experiments with BGP message traces and open-source software running on our Linux-based router hypervisor demonstrate that our solution scales to large networks and efficiently masks buggy behavior.
Categories and Subject Descriptors
C.2.6 [Computer-Communication Networks]: Internetworking—Routers; C.4 [Performance of Systems]: [Fault tolerance, Reliability, availability and serviceability]
General Terms
Design, Reliability
Keywords
Routers, Bugs, Reliability, BGP
1. INTRODUCTION
The Internet is an extremely large and complicated distributed system. Selecting routes involves computations across millions of routers spread over vast distances, multiple routing protocols, and highly customizable routing policies. Most of the complexity in Internet routing exists in protocols implemented as software running on routers. These routers typically run an operating system, and a collection of protocol daemons which implement the various tasks associated with protocol operation. Like any complex software, routing software is prone to implementation errors, or bugs.
1.1 Challenges in dealing with router bugs
The fact that bugs can produce incorrect and unpredictable behavior, coupled with the mission-critical nature of Internet routers, can produce disastrous results. This can be seen from the recent spate of high-profile vulnerabilities, outages, and huge spikes in global routing instability [35, 33, 27, 21, 13, 31]. Making matters worse, ISPs often run the same protocols and use equipment from the same vendor worldwide, increasing the probability that a bug causes simultaneous failures or a network-wide crash. While automated systems can prevent misconfigurations from occurring [23, 24], these techniques do not work for router bugs, and in fact the state-of-the-art solution today for dealing with router bugs involves heavy manual labor—testing, debugging, and fixing code. Unfortunately operators must wait for vendors to implement and release a patch for the bug, or find an intermediate workaround on their own, leaving their networks vulnerable in the meantime.
Worse still, bugs are often discovered only after they cause serious outages. While there has been work on dealing with failures in networks [35, 33, 27], router bugs differ from traditional “fail-stop” failures (failures that cause the router to halt in some easily-detectable way) in that they violate the semantics of protocol operation. Hence a router can keep running, but behave incorrectly—by advertising incorrect information in routing updates, or by distributing the wrong forwarding-table entries to the data plane, which can trigger persistent loops, oscillations, packet loss, session failure, as well as new kinds of anomalies that can’t happen in correctly behaving protocols. This fact, coupled with the high complexity and distributed nature of Internet routing, makes router bugs notoriously difficult to detect, localize, and contain.
As networks become better at dealing with traditional failures, and as systems that automate configuration become more widely deployed, we expect bugs to become a major
roadblock in improving network availability. While we acknowledge the long-standing debate in the software engineering community on whether it is possible to completely prevent software errors, we believe unforeseen interactions across protocols, the potential to misinterpret RFCs, the increasing functionality of Internet routing, and the ossification of legacy code and protocols will make router bugs a “fact-of-life” for the foreseeable future and we proceed under that assumption.
1.2 The case for diverse replication in routers
Unlike fail-stop failures, router bugs can cause Byzantine faults, i.e., they cause routers to not only behave incorrectly, but violate protocol specification. Hence, we are forced to take a somewhat heavy-handed approach in dealing with them (yet as we will find, one that appears to be necessary, and one that our results indicate is practical). In particular, our design uses a simple replication-based approach: instead of running one instance of routing software, our design uses a router hypervisor\(^1\) to run multiple virtual instances of routing software in parallel. The instances are made diverse to decrease the likelihood they all simultaneously fail due to a bug. We leverage data diversity (to manipulate the inputs to the router, for example by jittering arrival time of updates, or changing the layout of the executable in memory) and software diversity (given multiple implementations of routing protocols already exist, running several of them in parallel). We then rely on Byzantine-fault tolerant (BFT) techniques to select the “correct” route to send to the forwarding table (FIB), or advertise to a neighbor.\(^2\)
The use of BFT combined with diverse replication (running multiple diverse instances) has proven to be a great success in the context of traditional software, for example in terms of building robust operating systems and runtime environments [18, 28, 36, 44, 12]. These techniques are widely used since heterogeneous replicas are unlikely to share the same set of bugs [18, 28, 44]. In this paper, we adapt diverse replication to build router software that is tolerant of bugs.
A common objection of this approach is performance overhead, as running multiple replicas requires more processing capacity. However, BFT-based techniques provide a simple (and low-cost) way to leverage the increasingly parallel nature of multicore router processors to improve availability without requiring changes to router code. Network operators also commonly run separate hardware instances for resilience, across multiple network paths (e.g., multihoming), or multiple routers (e.g., VRRP [27]). Some vendors also protect against fail-stop failures by running a hot-standby redundant control plane either on multiple blades within a single router or even on a single processor with the use of virtual machines [19], in which case little or no additional router resources are required. Since router workloads have long periods with low load [9], redundant copies may be run during idle cycles. Recent breakthroughs vastly reduce compute overhead [45] and memory usage [26], by skipping redundancy across instances.
1.3 Designing a Bug-Tolerant Router
In this paper, we describe how to eliminate router bugs “virtually” (with use of virtualization technologies). We design a bug-tolerant router (BTR), which masks buggy behavior, and avoids letting it affect correctness of the network layer, by applying software and data diversity to routing. Doing so, however, presents new challenges that are not present in traditional software. For example, (i) wide-area routing protocols undergo a rich array of dynamics, and hence we develop BFT-based techniques that react quickly to buggy behavior without over-reacting to transient inconsistencies arising from routing convergence, and (ii) our design must interoperate with existing routers, and not require extra configuration efforts from operators, and hence we develop a router hypervisor that masks parallelism and churn (e.g., killing a faulty instance and bootstrapping a new instance).
At the same time we leverage new opportunities made available by the nature of routing to build custom solutions and extend techniques previously developed for traditional software. For example, (i) routers are typically built in a modular fashion with well-defined interfaces, allowing us to adapt BFT with relatively low complexity, and implement it in the hypervisor with just a few hundred lines of code, (ii) using mechanisms that change transient behavior without changing steady-state outcomes are acceptable in routing, which we leverage to achieve diversity across instances, and (iii) routing has limited dependence on past history, as the effects of a bad FIB update or BGP message can be undone simply by overwriting the FIB or announcing a new route, which we leverage to speed reaction by selecting a route early, when only a subset of instances have responded, and updating the route as more instances finish computing. Moreover, router outputs are independent of the precise ordering and timing of updates, which simplifies recovery and bootstrapping new instances.
The next section discusses how diversity can be achieved and how effective it is, followed by a description of our design (Section 3) and implementation (Section 4). We then give performance results in Section 5, consider possible deployment scenarios in Section 6, contrast with related work in Section 7, and conclude in Section 8.
2. SOFTWARE AND DATA DIVERSITY IN ROUTERS
The ability to achieve diverse instances is essential for our bug-tolerant router architecture. Additionally, for performance reasons, it is important that the number of instances that need to be run concurrently is minimal. Fortunately, the nature of routing and the current state of routing software lead to a situation where we are able to achieve enough diversity and that it is effective enough that only a small number of instances are needed (e.g., 3–5, as discussed below). In this section we discuss the various types of diversity mechanisms, in what deployment scenario they are likely to be used, and how effective they can be in avoiding bugs.
Unfortunately, directly evaluating the benefits of diversity across large numbers of bugs is extremely challenging, as it requires substantial manual labor to reproduce bugs. Hence, to gain some rough insights, we studied the bug re-
ports from the XORP and Quagga Bugzilla databases [8, 5], and taxonomized each into what type of diversity would likely avoid the bug and experimented with a small subset, some of which are described in Table 1.\(^3\)
### 2.1 Diversity in the software environment
**Code base diversity**: The most effective, and commonly thought of, type of diversity is where the routing software comes from different code bases. While often dismissed as being impractical because a company would never deploy multiple teams to develop the same software, we argue that diverse software bases are already available and that router vendors do not need to start from scratch and deploy multiple teams.
First, consider that there are already several open-source router software packages available (e.g., XORP, Quagga, BIRD). Their availability has spawned the formation of a new type of router vendor based on building a router around open-source software [7, 8].
Additionally, the traditional (closed-source) vendors can make use of open-source software, something they have done in the past (e.g., Cisco IOS is based on BSD Unix), and hence may run existing open-source software as a “fallback” in case their main routing code crashes or begins behaving improperly. Router vendors that do not wish to use open-source software have other alternatives for code diversity, for example, router vendors commonly maintain code acquired from the purchase of other companies [38].
As a final possibility, consider that ISPs often deploy routers from multiple vendors. While it is possible to run our bug-tolerant router across physical instances, it is most practical to run in a single, virtualized, device. Even without access to the source code, this is still a possibility with the use of publicly available router emulators [1, 3]. This way, network operators can run commercial code along with our hypervisor directly on routers or server infrastructure without direct support from vendors. While intellectual property restrictions arising from their intense competition makes vendors reticent to share source code with one another, this also makes it likely that different code bases from different vendors are unlikely to share code (and hence unlikely to share bugs).
We base our claim that this is the most effective approach partially from previous results which found that software implementations written by different programmers are unlikely to share the vast majority of implementation errors in code [30]. This result can be clearly seen in two popular open-source router software packages: Quagga and XORP differ in terms of update processing (timer-driven vs. event-driven), programming language (C vs. C++), and configuration language, leading to different sorts of bugs, which are triggered on differing inputs. As such, code-base diversity is very effective and requires only three instances to be run concurrently.
However, effectively evaluating this is challenging, as bug reports typically do not contain information about whether inputs triggering the bug would cause other code bases to fail. Hence we only performed a simple sanity-check: we selected 9 bugs from the XORP Bugzilla database, determined the router inputs which triggered the bug, verified that the bug occurred in the appropriate branch of XORP code, and then replayed the same inputs to Quagga to see if it would simultaneously fail. We then repeated this process to see if Quagga’s bugs existed in XORP. In this small check, we did not find any cases where a bug in one code base existed in the other, mirroring the previous findings.
**Version diversity**: Another source of diversity lies in the different versions of the same router software itself. One main reason for releasing a new version of software is to fix bugs. Unfortunately, operators are hesitant to upgrade to the latest version until it has been well tested, as it is unknown whether their particular configuration, which has worked so far (possibly by chance), will work in the latest version. This hesitation comes with good reason, as often times when fixing bugs or adding features, new bugs are introduced into code that was previously working (i.e., not just in new features). This can be seen in some of the example bugs described in Table 1. With our bug-tolerant router, we can capitalize on this diversity.
For router vendors that fully rely on open-source software, version diversity will add little over the effectiveness of code-base diversity (assuming they use routers from three code bases). Instead, version diversity makes the most sense for router vendors that do not fully utilize code-base diversity. In this case, running the old version in parallel is protection against any newly introduced bugs, while still being able to take advantage of the bug fixes that were applied.
Evaluating this is also a challenge as bug reports rarely contain the necessary information. Because of this, to evaluate the fraction of bugs shared across versions (and thus, the effectiveness), we ran static analysis tools (splitsplint, uno, and its4) over several versions of Quagga, and investigated overlap across versions. For each tool, we ran it against each of the earlier versions, and then manually checked to see how many bugs appear in both the earlier version as well as the most recent version. We found that overlap decreases quickly, with 30% of newly-introduced bugs in 0.99.9 avoided by using 0.99.1, and only 25% of bugs shared across the two versions. As it is not 100% effective, this will most likely be used in combination with other forms of diversity (e.g., diversity in the execution environment, described next).
### 2.2 Execution environment diversity
Data diversity through manipulation of the execution environment has been shown to automatically recover from a wide variety of faults [12]. In addition, routing software specific techniques exist, two of which are discussed below. As closed-source vendors do not get the full benefit from running from multiple code bases, they will need to rely on data diversity, most likely as a complement to version diversity. In that case, around five instances will be needed depending on the amount of difference between the different versions. This comes from the result of our study which showed version diversity to be 75% effective, so we assume that two versions will be run, each with two or three instances of that version (each diversified in terms of execution environment, which as we discuss below can be fairly effective).
**Update timing diversity**: Router code is heavily concurrent, with multiple threads of execution and multiple processes on a single router, as well as multiple routers simultaneously running, and hence it is not surprising that this creates the potential for concurrency problems. Luckily, we
\(^3\)To compare with closed-source software, we also studied publicly available Cisco IOS bug reports, though since we do not have access to IOS source code we did not run our system on them.
The process of routing can be accomplished by a variety of different techniques, leading to multiple different routing protocols and algorithms, including IS-IS, OSPF, RIP, etc. While these implementations differ in terms of the precise mechanisms they use to compute routes, they all perform a functionally-equivalent procedure of determining a FIB that can be used to forward packets along a shortest path to a destination. Hence router vendors may run multiple different routing protocols in parallel, voting on their outputs as they reach the FIB. To get some rough sense of this approach, we manually checked bugs in the Quagga and XORP Bugzilla databases to determine the fraction that resided in code that was protocol independent. From our analysis, we estimate that at least 60% of bugs could be avoided by switching to a different protocol.
### 3. BUG TOLERANT ROUTER (BTR)
Our design works by running multiple diverse router instances in parallel. To do this, we need some way of allowing multiple router software instances to simultaneously execute on the same router hardware. This problem has been widely studied in the context of operating systems, through the use of virtual machine (VM) technologies, which provide isolation and arbitrate sharing of the underlying physical machine resources. However, our design must deal with two new key challenges: (i) replication should be transparent and hidden from network operators and neighboring routers (Section 3.1), and (ii) reaching consensus must handle the transient behavior of routing protocols, yet must happen quickly enough to avoid slowing reaction to failures (Section 3.2).
### 2.3 Protocol diversity
As network operators have the power to perform configuration modifications, something the router vendors have limited ability to do, there are additional forms of diversity that they can make use of. Here, we discuss one in particular. The process of routing can be accomplished by a variety of different techniques, leading to multiple different routing protocols and algorithms, including IS-IS, OSPF, RIP, etc. While these implementations differ in terms of the precise mechanisms they use to compute routes, they all perform a functionally-equivalent procedure of determining a FIB that can be used to forward packets along a shortest path to a destination. Hence router vendors may run multiple different routing protocols in parallel, voting on their outputs as they reach the FIB. To get some rough sense of this approach, we manually checked bugs in the Quagga and XORP Bugzilla databases to determine the fraction that resided in code that was protocol independent. From our analysis, we estimate that at least 60% of bugs could be avoided by switching to a different protocol.
### 3.1 Making replication transparent
First, our design should hide replication from neighboring routers. This is necessary to ensure deployability (to maintain sessions with legacy routers), efficiency (to avoid requiring multiple sessions and streams of updates between
<table>
<thead>
<tr>
<th>Bug</th>
<th>Description</th>
<th>Effective Diversity</th>
</tr>
</thead>
<tbody>
<tr>
<td>XORP 814</td>
<td>The asynchronous event handler did not fairly allocate its resources when processing events from the various file descriptors. Because of this, a single peer sending a long burst of updates could cause other sessions to time out due to missed keepalives.</td>
<td>Version (worked in 1.5, but not 1.6)</td>
</tr>
<tr>
<td>Quagga 370</td>
<td>The BGP default-originate command in the configuration file does not work properly, preventing some policies from being correctly realized.</td>
<td>Version (worked in 0.99.5, but not 0.99.7)</td>
</tr>
<tr>
<td>XORP 814</td>
<td>(See above)</td>
<td>Update (randomly delay delivery)</td>
</tr>
<tr>
<td>Quagga XX</td>
<td>A race condition exists such that when a prefix that is withdrawn and immediately re-advertised, the router only propagates to peers the withdraw message, and not the subsequent advertisement.</td>
<td>Update (randomly delay delivery)</td>
</tr>
<tr>
<td>Quagga XX</td>
<td>A peer that initiates a TCP connection and then immediately disconnects causes the BGP process to stop listening for incoming connections.</td>
<td>Connection (can delay disconnect)</td>
</tr>
<tr>
<td>Quagga 418</td>
<td>Static routes that have an unreachable next hop are correctly considered inactive. However, the route remains inactive even when the address of the network device is changed to something that would make the next hop reachable (e.g., a next hop of 10.0.0.1 and an device address that changed from 9.0.0.2/24 to 10.0.0.2/24).</td>
<td>Connection (can interpret change as reset as well)</td>
</tr>
</tbody>
</table>
Table 1: Example bugs and the diversity that can be used to avoid them. Note for the bug listed as Quagga XX, it was reported on the mailing list titled “quick route flap gets mistaken for duplicate, route is then ignored,” but never filed in Bugzilla.
peers), and ease of maintenance (to avoid the need for operators to perform additional configuration work). To achieve this, our design consists of a router hypervisor, as shown in Figure 1. The router hypervisor performs four key functions:

**Sharing network state amongst replicas:** Traditional routing software receives routing updates from neighbors, and uses information contained within those updates to select and compute paths to destinations. In our design, multiple instances of router software run in parallel, and somehow all these multiple router instances need to learn about routes advertised by neighbors. To compute routes, each internal instance needs to be aware of routing information received on peering sessions. However, this must happen without having instances directly maintain sessions with neighboring routers. To achieve this, we use a replicator component, which acts as a replica coordinator to send a copy of all received data on the session to each router instance within the system. Note that there may be multiple sessions with a given peer router (e.g., in the case of protocol diversity), in which case the replicator sends received data to the appropriate subset of instances (e.g., those running the same protocol). The replicator does not need to parse update messages, as it simply forwards all data it receives at the transport layer to each instance.
**Advertising a single route per prefix:** To protect against buggy results, which may allow the router to keep running but may cause it to output an incorrect route, we should select the majority result when deciding what information to publish to the FIB, or to advertise to neighbors. To do this, we run a voter module that monitors advertisements from the router instances, and determines the route the router should use (e.g., the majority result). Our design contains two instances of the voter: an update voter that determines which routing updates should be sent to neighbors, and a FIB voter that determines which updates should be sent to the router’s FIB (forwarding table). As with the replicator, the update voter may vote among a subset of instances, for example, those belonging to the same protocol. The FIB voter will vote among all instances, as all instances must come to the same decisions with regard to the FIB. To ensure advertisements are consistent with FIB contents, the update voter and FIB voter must select the same routes. To handle this, the same voting algorithm must be used on both updates and FIB changes.
To avoid introducing bugs, the voter should be as simple as possible (our voter implementation, containing multiple alternative voting strategies, is 514 lines of code). We assume the voter is trusted (since it is much simpler than router code, we expect it to have significantly fewer bugs and therefore the fact that it is a single point-of-failure is only a slight concern), and that replication is asynchronous (we do not assume all instances respond equally fast, as instances may be slow or mute due to bugs), and transparent (external routers do not interact directly with the multiple instances, so as to simplify deployment).
**Maintaining a set of running replicas:** BFT-based techniques rely on having a sufficient number of correctly-behaving replicas in order to achieve consensus. Hence, if an instance crashes or begins producing buggy output, we may wish to replace it with a new copy. To achieve this, our hypervisor is responsible for bootstrapping the new instance when it begins running. For traditional routers, bootstrapping involves establishing a session with a neighboring router, which causes the neighboring router to send out update messages for each of the prefixes it has an entry for in its RIB. To avoid introducing externally visible churn, the hypervisor keeps a history of the last update peers have sent for each prefix, and replays this for any new instance upon startup of that instance.
**Presenting a common configuration interface:** As there is no standardization of the configuration interface in routers, each router has ended up with its own interface. In the case where instances from different code bases are used, to keep the network operator from needing to configure each instance separately, a mechanism is needed to hide the differences in each configuration interface. Fortunately, this is not unlike today’s situation where ISPs use routers from multiple vendors. To cope with this, ISPs often run configuration management tools which automate the process of targeting each interface with a common one. As such, we can rely on these same techniques to hide the configuration differences.
### 3.2 Dealing with the transient and real-time nature of routers
The voter’s job is to arbitrate amongst the “outputs” (modifications to the FIB, outbound updates sent to neighbors) of individual router instances. This is more complex than simply selecting the majority result – during convergence, the different instances may temporarily have different outputs without violating correctness. At the same time, routers must react quickly enough to avoid slowing convergence. Here, we investigate several alternative voting strategies to address this problem, along with their tradeoffs.
**Handling transience with wait-for-consensus:** The extreme size of the Internet, coupled with the fact that routing events are propagated globally and individual events trigger multiple routing updates, results in very high update rates at routers. With the use of replication, this problem is potentially worsened, as different instances may respond at different times, and during convergence they may temporarily (and legitimately) produce different outputs. To deal with this, we use wait-for-consensus voting, in which the voter waits for all instances to compute their results before determining the majority vote. Because all non-buggy routers output the same correct result in steady-state, this approach can guarantee that if k or fewer instances are faulty with at least 2k + 1 instances running, no buggy result will reach the FIB or be propagated to a peer.
Note that in practice, waiting for consensus may also re-
duce instability, as it has an effect similar to the MRAI (Minimum Route Advertisement Interval) timer (routers with MRAI send updates to their neighbors only when a timer expires, which eliminate multiple updates to a prefix that occur between timer expiries). Namely, forcing the voter to wait for all instances to agree eliminates the need to advertise changes that happen multiple times while it is waiting (e.g., in the presence of unstable prefixes). However, the downside of this is that reaction to events may be slowed in some cases, as the voter must wait for the \( k + 1 \)th slowest instance to finish computing the result before making a decision.
**Speeding reaction time with master/slave:** Routers must react quickly to failures (including non-buggy events) to ensure fast convergence and avoid outages. At the same time, the effects of a bad FIB update or BGP message can be undone simply by overwriting the FIB or announcing a new route. To speed reaction time, we hence consider an approach where we allow outputs to temporarily be faulty. Here, we mark one instance as the master, and the other instances as slaves. The voter operates by always outputting the master’s result. The slaves’ results are used to cross-check against the master after the update is sent or during idle cycles. The benefit of this approach is that it speeds convergence to the running time of the master’s computation. In addition, convergence is no worse than the convergence of the master, and hence at most one routing update is sent for each received update. However, the downside of this approach is that if the master becomes buggy, we may temporarily output an incorrect route. To address this, when failing over to a slave, the voter re-advertises any differences between the slaves’ routing tables and the routing table computed by the master. Hence, temporarily outputting an incorrect route may not be a problem, as it only leads to a transient problem that is fixed when the slaves overthrow the master.
Finally, we consider a hybrid scheme which we refer to as **continuous-majority.** This approach is similar to wait-for-consensus in that the majority result is selected to be used for advertisement or for population into the FIB. However, it is also similar to master/slave in that it does not wait for all instances to compute results before selecting the result. Instead, every time an instance sends an update, the voter reruns its voting procedure, and updates are only sent when the majority result changes. The benefit of this approach is that it may speed reaction to failure, and the majority result may be reached before the slowest instance finishes computing. The downside of this approach is that convergence may be worsened, as the majority result may change several times for a single advertised update. Another downside of this approach is that voting needs to be performed more often, though, as we show in our experiments (Section 5) this overhead is negligible under typical workloads.
### 4. ROUTER HYPERVISOR PROTOTYPE
Our implementation had three key design goals: (i) not requiring modifications to routing software, (ii) being able to automatically detect and recover from faults, and (iii) low complexity, to not be a source of new bugs. Most of our design is agnostic to the particular routing protocol being used. For locations where protocol-specific logic was needed, we were able to treat messages mostly as opaque strings. This section describes our implementation, which consists of a set of extensions built on top of Linux. Our implementation was tested with XORP versions 1.5 and 1.6, Quagga versions 0.98.6 and 0.99.10, and BIRD version 1.0.14. We focused our efforts on supporting BGP, due to its complexity and propensity for bugs. Section 4.1 describes how we provide a wrapper around the routing software, in order for unmodified routing software to be used, and Section 4.2 describes the various faults that can occur and how our prototype detects and recovers from them.
#### 4.1 Wrapping the routing software
To eliminate the need to modify existing router software, our hypervisor acts as a wrapper to hide from the routing software the fact that it is a part of a bug-tolerant router, and allows the routing instances to share resources such as ports, and access to the FIB. Our design (Figure 2) takes advantage of the fact that sockets are used for communicating with peer routers, and for communicating forwarding table (FIB) updates to the kernel. Hence, our implementation intercepts socket calls from the router instances using the LD_PRELOAD environment variable and uses a modified libc library, called hv-libc, to redirect messages to a user-space module, called virtd, which manages all communication.

The two key functions the hypervisor then needs to manage are discussed below:
**Socket-based communications:** To connect to peer routers (with TCP) and for writing to the common FIB (with Netlink), the multiple routers need to share access to a common identifier space (e.g., port 179 in BGP). We handle this by intercepting socket system calls in hv-libc, performing address translation in hv-libc, and using virtd as a proxy (e.g., when a router instance listens on port 179, instead they are made to listen on a random port and virtd will listen on 179 and connect to each of the random ports when receiving an incoming connection).
**Bootstrapping new connections:** When the BTR initially starts up, the routing instances start with empty routing tables. In BGP, a session with a peer is established by creating a TCP connection, exchanging OPEN messages, and acknowledging the OPEN message with a KEEPALIVE message. After the session is established, the peers exchange routing information. However, when replacing a failed instance, we need to bootstrap it locally, to prevent the failure from being externally visible (e.g., sending a route-refresh to a peer). Additionally, we need to bootstrap it independently, to prevent the new instance starting in a faulty state (e.g., bootstrapping off another router instance). Since a router’s state only depends on the last received RIB advertised by its neighbors, we add some additional logic to the hypervisor...
to store the last-received update for each (prefix,neighbor) pair. Then when a new instance is started, the hypervisor replays its stored updates. To lower complexity, the hypervisor treats the (prefix, neighbor) fields and other attributes in the packets as opaque strings, and does not implement protocol logic such as route selection.
4.2 Detecting and recovering from faults
To deal with bugs, our hypervisor must detect which outputs are buggy (e.g., with voting), and recover from the buggy output (by advertising the voting result, and if necessary restarting/replacing the buggy instance).
Detection: One of our main goals is that the BTR should be able to automatically detect and recover from bugs affecting correctness of the router’s control or data planes. Since our design fundamentally relies on detecting differences in outputs of different instances, we need to handle every possible way their outputs could differ. All faults can be generalized to four categories: (i) an instance sending a message when it should not, (ii) an instance not sending a message when it should, (iii) an instance sending a message with incorrect contents, and (iv) bugs that cause a detectable faulty system event, such as process crashing or socket error. The first three categories are detected by using voting (the fourth category is easily detectable, so no further discussion is given). If an instance has a different output from the majority, we consider it a fault. For example, in case (i) above, the winning update will be the NULL update, in cases (ii) and (iii) the winning update will be the most-commonly advertised one. To avoid reacting to transient changes, voting is only performed across steady-state instance outputs, which have been stable for a threshold period of time. We then mark instances whose steady-state outputs differ from those of the majority or those that are not yet stable as being faulty (including in schemes like master/slave, which perform this step after advertising).
Recovery: In the common case, recovering from a buggy router simply involves using the output from the voting procedure. However, to deal with cases where the router is persistently buggy, or crashes, we need some way to kill and restart the router. As a heuristic, we modified our hypervisor with a fault threshold timeout. If an instance continues to produce buggy output for longer than the threshold, or if the router undergoes a faulty system event, the router is killed. To maintain a quorum of instances on which voting can be performed, the BTR can restart the failed instance, or replace it with an alternate diverse copy. In addition, to support the master/slave voting scheme, we need some way to overwrite previously advertised buggy updates. To deal with this, our implementation maintains a history of previously advertised updates when running this voting scheme. When the hypervisor switches to a new master, all updates in that history that differ from the currently advertised routes are sent out immediately.
4.3 Reducing complexity
It is worth discussing here the role the hypervisor plays in the overall reliability of the system. As we are adding software, this can increase the possibility of bugs in the overall system. In particular, our goals for the design are that (i) the design is simple, implementing only a minimal set of functionality, reducing the set of components that may contain bugs, and (ii) the design is small, opening the possibility of formal verification of the hypervisor – a more realistic task than verifying an entire routing software implementation. To achieve these goals, our design only requires the hypervisor to perform two functions: (i) acting as a TCP proxy, and (ii) bootstrapping new instances. Below, we described how these functions are performed with low complexity.
Acting as a TCP proxy: To act as a TCP proxy simply involves accepting connections from one end point (remote or local) and connecting to the other. When there is a TCP connection already, the hypervisor simply needs to accept the connection. Then, upon any exchange of messages (in or out) the hypervisor simply passes data from one port to another. In addition, our design uses voting to make replication transparent to neighboring routers. Here, the update messages are voted upon before being sent to the adjacent router. However, this is simply comparing opaque strings (the attributes) and does not involve understanding the values in the strings.
Overall, our implementation included multiple algorithms and still was only 514 lines of code. These code changes occur only in the hypervisor, reducing potential for new bugs by increasing modularity and reducing need to understand and work with existing router code. From this, we can see that the hypervisor design is simple in terms of functionality and much of the functionality is not in the critical section of code that will act as a single point of failure.
Bootstrapping new instances: To bootstrap new instances requires maintaining some additional state. However, bugs in this part of the code only affect the ability to bootstrap new instances, and do not affect the “critical path” of voting code. One can think of this code as a parallel routing instance which is used to initialize the state of a new instance. Of course, if this instance’s RIB is faulty, the new instance will be started in an incorrect state. However, this faulty state would either be automatically corrected (e.g., if the adjacent router sends a new route update that overwrites the local faulty copy) or it would be determined to be faulty (e.g., when the faulty route is advertised), in which case a new instance is started. Additionally, the RIB that needs to be kept is simply a history of messages received from the adjacent router and therefore is simple. Bootstrapping a new instance also requires intercepting BGP session establishment. Here, the hypervisor simply needs to observe the first instance starting a session (an OPEN message followed by a KEEPALIVE) and subsequent instances simply get the two received messages replayed.
5. EVALUATION
We evaluate the three key assumptions in our work:
It is possible to perform voting in the presence of dynamic churn (Section 5.1): Voting is simple to do on fixed inputs, but Internet routes are transient by nature. To distinguish between instances that are still converging to the correct output from those that are sending buggy outputs, our system delays voting until routes become stable, introducing a tradeoff between false positives (incorrectly believing an unstable route is buggy) and detection time (during which
---
5We do not address, for example, faults in logging.
6We consider legitimate route-flapping due to persistent failures and protocol oscillations to be rare. However, we can detect this is occurring as the majority of instances will not be stable and we can act accordingly.
time a buggy route may be used). Since these factors are independent of the precise nature of bugs but depend on update dynamics, we inject synthetic faults, and replay real BGP routing traces.
It is possible for routers to handle the additional overhead of running multiple instances (Section 5.2): Internet routers face stringent performance requirements, and hence our design must have low processing overhead. We evaluate this by measuring the pass-through time for routing updates to reach the FIB or neighboring routers after traversing our system. To characterize performance under different operating conditions, we vary the routing update playback rate, the source of updates (edge vs. tier-1 ISP), and the number of peers.
Running multiple router replicas does not substantially worsen convergence (Section 5.3): Routing dynamics are highly dependent on the particular sequence of steps taken to arrive at the correct route – choosing the wrong sequence can vastly increase processing time and control overhead. To ensure our design does not harm convergence, we simulate update propagation in a network of BTRs, and measure convergence time and overhead. For completeness, we also cross-validate these against our implementation.
5.1 Voting in the presence of churn
To evaluate the ability to perform voting in the presence of routing churn, we replayed BGP routing updates collected from Route Views [6] against our implementation. In particular, we configure a BGP trace replacer to play back a 100 hour long trace starting on March 1st 2007 at 12:02am UTC. The replayer plays back multiple streams of updates, each from a single vantage point, and we collect information on the amount of time it takes the system to select a route. Since performance is dependent only on whether the bug is detected by voting or not, and independent of the particular characteristics of the bug being injected, here we use a simplified model of bugs (based on the model presented in Section 4.2), where bugs add/remove updates and change the next-hop attribute for a randomly-selected prefix, and have two parameters: (i) duration, or the length of time an instance’s output for a particular prefix is buggy, (ii) interarrival time, or the length of time between buggy outputs. As a starting point for our baseline experiments, we assume the length of time a bug affects a router, and their interarrival times, are similar to traditional failures, with duration of 600 seconds, and interarrival time of 1.2 million seconds [34].
5.1.1 Comparison of voting strategies
There is a very wide space of voting strategies that could be used in our system. To explore tradeoffs in this space, we investigated performance under a variety of alternative voting strategies and parameter settings. We focus on several metrics: the fault rate (the fraction of time the voter output a buggy route), waiting time (the amount of time the voter waits before outputting the correct route) and update overhead (the number of updates the voter output).
Fault rate: We investigate the fault rate of the voting strategies by injecting synthetic faults and varying their properties. First, we varied the mean duration and interarrival times of synthetic faults (Figures 3 and 4). We found that for very high bug rates, wait-3 (waiting for $K = 3$ out of $R = 3$ copies to agree before selecting the majority result) outperformed master/slave. This happened because wait-3 is more robust to simultaneous bugs than master/slave, as master/slave takes some short time to detect the fault, potentially outputting an incorrect route in the meantime. In addition, unless the bug rate is extremely high, continuous-majority performs nearly as well as wait-3, with similar robustness and update overhead.
Overall, we found that recovery almost always took place within one second. Increasing the number of instances running in parallel ($R$) makes the router even more tolerant of faults, but incurs additional overheads. Also, wait-for-consensus and continuous-majority gain more from larger values of $R$ than the master/slave strategy. For example, when moving from $R = 3$ to $R = 4$ instances, the fault rate decreases from 0.088% to 0.003% with wait-for-consensus, while with master/slave the fault rate only decreases from 0.089% to 0.06%.
However, there may be practical limits on the amount of diversity achievable (for example, if there is a limited number of diverse code instances, or a bound on the ability to randomize update timings). This leads to the question—if we have a fixed number of diverse instances, how many should be run, and how many should be kept as standbys (not running, but started up on demand)? We found that standby routers were less effective than increasing $R$, but only for small values of $R$, indicating that for large numbers of diverse instances, most instances could be set aside as standbys to decrease runtime overhead. For example, if $R = 3$, under the continuous-majority strategy we attain a fault rate of 0.02%. Increasing $R$ to 4 reduced the fault rate to 0.006%, while instead using a standby router with $R = 3$ reduced the fault rate to 0.0008%. This happens because buggy outputs are detected quickly enough that failing over to a standby is nearly as effective as having it participate.
in voting at every time step. Because of this, operators can achieve much of the benefits of a larger number of instances, even if these additional instances are run as lower-priority (e.g., only updated during idle periods) standbys.
**Waiting time:** Different voting algorithms provide different tradeoffs between waiting time (time from when a new best-route arrives, to when it is output by the voter) and the fault rate. The master/slave strategy provides the smallest waiting time (0.02 sec on average), but incurs a higher fault rate (0.0006% on average), as incorrect routes are advertised for a short period whenever the master becomes buggy. Continuous-majority has longer wait times (0.035 sec on average), but lower fault rate (less than 0.00001% on average), as routes are not output until multiple instances converge to the same result. The wait-for-consensus strategy’s performance is a function of the parameter $K$—larger values of $K$ increase wait time but decreases fault rate. However, we found that increasing $K$ to moderate sizes incurred less delay than the pass-through time for a single instance, and hence setting $K = R$ offered a low fault rate with only minor increases in waiting time.
**Update overhead:** Finally, we compare the voting strategies in terms of their effect on update overhead (number of routing updates they generate), and compare them against a standard router (*std. router*). Intuitively, running multiple voters within a router might seem to increase update overhead, as the voter may change its result multiple times for a single routing update. However, in practice, we find no substantial increase, as shown in Figure 5, which plots a CDF of the number of updates (measured over one second intervals). For the master/slave strategy this is expected, since a single master almost always drives computation. In wait-for-consensus, no updates are generated until all instances arrive at an answer, and hence no more than one outbound update is generated per inbound update, as in a standard router. Interestingly, the continuous-majority strategy also does not significantly increase update overhead. This happens because when an update enters the system, the voter’s output will only change when the majority result changes, which can only happen once per update.
**5.1.2 Performance of fault detection**
Protocols today often incorporate thresholds (such as BGP’s MRAI timer) to rate-limit updates. To evaluate the level of protection our scheme provides against unstable instances, as well as the ability to distinguish steady-state from transient behavior, we incorporated a configurable timeout parameter ($T$) in fault detection to identify when a route becomes stable. Figure 6 shows the tradeoff as this parameter varies between the *false negative rate* (the number of times a non-buggy instance is treated as buggy), and the *fault rate* (i.e., the false positive rate of the voter, or the fraction of time a buggy route is treated as non-buggy). We found that as $T$ increases, the false negative rate decreases, as larger values of $T$ reduce the probability that transient changes will be considered when voting. The false negative rate does not vary among different voting strategies, as fault detection is only performed on steady-state outputs, and the algorithmic differences between the strategies disappear when performed on outputs that are not dynamically changing. The fault rate increases with $T$, as when a bug does occur, it takes longer to detect it. Interestingly, the fault rate initially decreases with $T$; this happens because for low values of $T$, more instances are treated as buggy, giving fewer inputs to the voter and increasing the probability of an incorrect decision. Overall, we found that it was possible to tune $T$ to simultaneously achieve a low fault rate, low false negative, and low detection time.
**5.2 Processing overhead**
We evaluate the overhead of running multiple instances using our hypervisor with both XORP- and Quagga-based instances running on single-core 3 Ghz Intel Xeon machines with 2 GB RAM. We measure the *update pass-through time* as the amount of time from when the BGP replayer sends a routing update to when a resulting routing update is received at the monitor. However, some updates may not trigger routing updates to be sent to neighbors, if the router decides to continue using the same route. To deal with this case, we instrument the software router’s source code to determine the point in time when it decides to retain the same route. We also instrument the kernel to measure the FIB *pass-through time*, as the amount of time from when the BGP replayer sends an update to the time the new route is reflected in the router’s FIB (which is stored as the routing table in the Linux kernel).
Figure 7 shows the pass-through time required for a routing change to reach the FIB. We replayed a Routeviews update trace and varied the number of Quagga instances from 1 to 31, running atop our router hypervisor on a single-core machine. We found the router hypervisor increases FIB pass-through time by 0.08% on average, to 0.06 seconds. Our router hypervisor implementation runs in user space, instead of directly in the kernel, and with a kernel-based implementation this overhead would be further reduced. Increasing the number of instances to 3 incurred an additional 1.7% increase, and to 5 incurred a 4.6% increase. This happens because the multiple instances contend for CPU resources (we found that with multicore CPUs this overhead was substantially lower under heavy loads).
evaluate performance under heavier loads, we increased the rate at which the replayer played back routing updates by a factor of 3000x. Under this heavy load, FIB pass-through times slow for both the standard router and BTR due to increased queuing delays. However, even under these heavy loads, the BTR incurs a delay penalty of less than 23%. To estimate effects on convergence, we also measured the update pass-through time as the time required for a received routing change to be sent to neighboring routers. We found this time to be nearly identical to the FIB pass-through time when the MRAI timer was disabled. As updates are sent immediately after updating the FIB. When MRAI was enabled (even when set to 1 second, the lowest possible setting for Quagga), the variation in delay across instances was dwarfed by delay incurred by MRAI. Finally, we found that switching to the master/slave voting strategy reduces pass-through delay, though it slightly increases the fault rate, as discussed previously in Section 5.1.
5.3 Effect on convergence
Next, we study the effect of our design on network-wide convergence. We do this by simulating a network of BTRs (each with eight virtual router instances) across three network-level graphs: the entire AS-level topology (labeled AS in Figure 8) sampled on Jan 20 2008, AS 3967’s internal network topology as collected from Rocketfuel (labeled 3967), and cliques (labeled CQ) of varying sizes (since a clique contains the “worst case” for routing, allowing potential to explore all n! possible paths in a clique of size n). To determine ordering of when BTRs respond, we run our implementation over routing updates, record pass-through times, and replay them within our simulation framework. Since for the master/slave approach there is no effect on network operation unless a bug is triggered (since the slaves only operate as standbys), we focus our evaluation on the other strategies.
We found several key results. First, as shown in Figure 8, the voting schemes do not produce any significant change in convergence beyond the delay penalty described in previous sections, as compared to a network only containing standard routers. We found this delay penalty to be much smaller than propagation delays across the network, and to be reduced further when MRAI is activated. As the number of instances increases (up to the number of processor cores), continuous-majority’s delay decreases, because it becomes increasingly likely that one will finish early. The opposite is true for wait-for-consensus, as the delay of the slowest instances becomes increasingly large. Next, while we have thus far considered a virtual router level deployment, where voting is performed at each router, we also considered a virtual network deployment, where voting is performed at the edges of the network. In our experiments we ran eight virtual networks and found that this speeds up convergence, as routers do not have to wait for multiple instances to complete processing before forwarding updates. Hence, for small numbers of diverse instances, voting per-router has smaller convergence delay. However, virtual-network approaches require substantially more control overhead than the virtual-router voting schemes. To address this, we found that simple compression schemes [11] that eliminate redundancy across updates could reduce the vast majority of this overhead. Finally, to validate our simulations, we set up small topologies on Emulab [2], injected routing events, and compared with simulations of the same topology. We found no statistically significant difference.
6. DISCUSSION
For simplicity, this paper discusses the one particular design point. However, our architecture is amenable to deployment on varying levels of granularity:
**Server-based operation:** Instead of running the diverse instances within a single router, their computations may be offloaded to a set of dedicated servers running in the network (e.g., an RCP-like platform [15]). These servers run the router software in virtualized environments, and cross-check the results of routers running within the network. When a buggy result is detected, virtual router instances may be migrated into the network to replace the buggy instance. Alternatively, the servers may be configured to operate in read-only mode, such that they may signal alarms to network operators, rather than participate directly in routing.
**Network-wide deployment:** Instead of running instances of individual router software in parallel, ensembles of routers may collectively run entire virtual networks in parallel. Here, the outputs of a router are not merged into a single FIB, or as a single stream of updates sent to its neighbors. Instead, each router maintains a separate FIB for each virtual network, and voting is used at border routers to determine which virtual network data packets should be sent on. The advantage of this approach is it allows different routing protocols to be used within each virtual network, making it simpler to achieve diversity. For example, OSPF may be run in one network and IS-IS in another. In addition, convergence speed may be improved, as individual physical routers do not have to wait for their instances to reach a majority before sending a routing update.
**Process-level deployment:** Our design runs multiple instances of routing software in parallel, and hence incurs some memory overhead. On many Internet routers this is not an issue, due to low DRAM costs, and the fact that
DRAM capacity growth has far exceeded that of routing table growth. That said, if it is still desirable to decrease memory usage, router software may be modified to vote on a shared RIB instead of a FIB. We found the RIB is by far the largest source of memory usage in both Quagga and XORP, incurring 99.3% of total memory usage. Voting on a shared RIB would reduce this overhead by eliminating the need to store separate copies of the RIB across router instances. Here, voting could be performed across multiple routing daemons (e.g., multiple BGP processes within a single instance of Cisco IOS) to construct a single shared RIB. In addition to reducing memory usage, finer-grained diversity may speed reaction (by only cloning and restarting individual processes or threads), and finer-grained control (during times of load, only mission-critical components may be cloned to reduce resource usage). However, code development may become more challenging, since this approach relies on knowing which parts of code are functionally equivalent. To address this, router software could be written to a common API, to allow replication and composition of modules from different code bases while sharing state.
**Leveraging existing redundancy:** Instead of running multiple instances in parallel, a router may be able to leverage redundant executions taking place at other routers in the network. For example, networks often provision redundant network equipment to protect against physical failures. For example, the VRRP protocol allows multiple routers to act collectively as a single router. Our architecture is amenable to leveraging physical redundancy, as the multiple instances may be deployed across the redundant router instances. In addition, all routers in the ISP compute the same egress set of BGP routes that are “equal” according to the first few steps of the decision process that deal with BGP attributes. To leverage this redundancy, it may be possible to extend our architecture to support voting across multiple router’s egress sets.
7. RELATED WORK
Software and data diversity has been widely applied in other areas of computing, including increasing server reliability [18], improving resilience to worm propagation [36], building survivable Internet services [28], making systems secure against vulnerabilities [20], building survivable overlay networks [44], building fault tolerant networked file systems [17], protecting private information [43], and recovering from memory errors [12]. Techniques have also been developed to minimize computational overhead by eliminating redundant executions and redundant memory usage across parallel instances [45, 26].
However as discussed in Section 1.3, routing software presents new challenges for SDD (e.g., routers must react quickly to network changes, have vast configuration spaces and execution paths, rely on distributed operations), as well as new opportunities to customize SDD (routers have small dependence on past history, can achieve the same objectives in different ways, have well-defined interfaces). We address these challenges and opportunities in our design. There has also been work studying router bugs and their effects [42, 32], and our design is inspired by these measurement studies. Also, [44] used a graph-theoretic treatment to study the potential benefits of diversity across physical routers (as opposed to diversity within a router). As work dealing with misconfigurations [23, 24] and traditional fail-stop failures [10, 35, 33, 27] becomes deployed we envision router bugs will make up an increasingly significant roadblock in improving network availability.
Our work can be contrasted to techniques which attempt to prevent bugs by formally verifying the code. These techniques are typically limited to small codebases, and often require manual efforts to create models of program behavior. For example, with manual intervention, a small operating system kernel was formally verified [29]. For routing, work has been done on languages to model protocol behavior (e.g., [25]), however the focus of this work is on algorithmic behaviors of the protocol, as opposed to other possible places where a bug can be introduced. In contrast, our approach leverages a small and low-complexity hypervisor, which we envision being possible to formally verify.
Our design leverages router virtualization to maintain multiple diverse instances. Router virtualization is an emerging trend gaining increased attention, as well as support in commercial routers. Our design builds on the high-level ideas outlined in [16] by providing a complete design, several algorithms for detecting and recovering from bugs, and an implementation and evaluation. In addition, our design is complementary to use of models of router behavior [23, 24] and control-plane consistency checks [41, 37], as these models/checks can be run in place of one or more of the router virtual instances. Finally, systems such as MARE (Multiple Almost-Redundant Executions) [45] and the Difference Engine [26] focus on reducing overheads from replication. MARE runs a single instruction stream most of the time, and only runs redundant instruction streams when necessary. The Difference Engine attains substantial savings in memory usage across VMs, through use of sub-page level sharing and in-core memory compression. These techniques may be used to further reduce overheads of our design.
8. CONCLUSIONS
Implementation errors in routing software harm availability, security, and correctness of network operation. In this paper, we described how to improve resilience of networks to bugs by applying Software and Data Diversity (SDD) techniques to router design. Although these techniques have been widely used in other areas of computing, applying them to routing introduces new challenges and opportunities, which we address in our design. This paper takes an important first step towards addressing these problems by demonstrating diverse replication is both viable and effective in building robust Internet routers. An implementation of our design shows improved robustness to router bugs with some tolerable additional delay.
9. REFERENCES
[1] Cisco 7200 simulator. (software to run Cisco IOS images on desktop PCs) [www.ipflow.utc.fr/index.php/Cisco_7200_Simulator].
|
{"Source-Url": "http://wiki.adaptive.cs.unm.edu/readings/Keller-2009-EliminatingRouterBugs.pdf", "len_cl100k_base": 13058, "olmocr-version": "0.1.50", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 42190, "total-output-tokens": 15657, "length": "2e13", "weborganizer": {"__label__adult": 0.0003561973571777344, "__label__art_design": 0.0003948211669921875, "__label__crime_law": 0.0003736019134521485, "__label__education_jobs": 0.0013494491577148438, "__label__entertainment": 0.0001672506332397461, "__label__fashion_beauty": 0.0001653432846069336, "__label__finance_business": 0.0003867149353027344, "__label__food_dining": 0.00035643577575683594, "__label__games": 0.0010395050048828125, "__label__hardware": 0.00521087646484375, "__label__health": 0.0005178451538085938, "__label__history": 0.0005125999450683594, "__label__home_hobbies": 0.00014579296112060547, "__label__industrial": 0.0006403923034667969, "__label__literature": 0.0003862380981445313, "__label__politics": 0.00020802021026611328, "__label__religion": 0.00045871734619140625, "__label__science_tech": 0.274169921875, "__label__social_life": 0.00011485815048217772, "__label__software": 0.044036865234375, "__label__software_dev": 0.66796875, "__label__sports_fitness": 0.0003020763397216797, "__label__transportation": 0.0006847381591796875, "__label__travel": 0.00024819374084472656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 71089, 0.02424]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 71089, 0.48756]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 71089, 0.91848]], "google_gemma-3-12b-it_contains_pii": [[0, 4426, false], [4426, 10882, null], [10882, 17929, null], [17929, 23063, null], [23063, 29329, null], [29329, 35633, null], [35633, 42583, null], [42583, 47904, null], [47904, 53523, null], [53523, 59052, null], [59052, 65747, null], [65747, 71089, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4426, true], [4426, 10882, null], [10882, 17929, null], [17929, 23063, null], [23063, 29329, null], [29329, 35633, null], [35633, 42583, null], [42583, 47904, null], [47904, 53523, null], [53523, 59052, null], [59052, 65747, null], [65747, 71089, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 71089, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 71089, null]], "pdf_page_numbers": [[0, 4426, 1], [4426, 10882, 2], [10882, 17929, 3], [17929, 23063, 4], [23063, 29329, 5], [29329, 35633, 6], [35633, 42583, 7], [42583, 47904, 8], [47904, 53523, 9], [53523, 59052, 10], [59052, 65747, 11], [65747, 71089, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 71089, 0.0442]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
9e6d7c9160a04cf399b4ce4da8321f496dbf49e2
|
Project 2: Tapestry
Due: 11:59 PM, Mar 15, 2020
Contents
1 Introduction 2
2 Tapestry Protocol 2
2.1 Identifying Nodes and Objects . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 2
2.2 Root Nodes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 2
2.2.1 Selecting Root Nodes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 2
2.2.2 Example: A Tapestry Network’s Objects and their Roots . . . . . . . . . . . 3
2.3 Tapestry Node State . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 3
2.3.1 Routing Tables . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 3
2.3.2 Backpointer Tables . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4
2.4 Prefix Routing . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 4
2.5 Publishing and Retrieving Objects . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5
2.6 Adding Tapestry Nodes . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5
2.6.1 Acknowledged Multicast . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 5
2.6.2 Backpointer Traversal . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 6
2.7 Graceful Exits . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 7
2.8 Fault Tolerance . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 7
2.8.1 Errors While Routing . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 7
2.8.2 Loss of Root Node . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 7
2.8.3 Loss of Replicas . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 8
2.8.4 Miscellaneous . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 8
3 The Assignment 8
3.1 Function Stubs . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 8
3.2 Provided Functions . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 9
3.3 Remote Procedure Call (RPC) . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 10
3.4 A Note About Context . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . . 11
4 Demo 11
1 Introduction
The final project for CS1380, PuddleStore, uses an underlying distributed object location and retrieval system (DOLR) called Tapestry to store and locate objects. This distributed system is similar to Chord in that it provides an interface for storing and retrieving key-value pairs. From an application’s perspective, the difference between Chord and Tapestry is that in Tapestry the application chooses where to store data, rather than allowing the system to choose a node to store the object at.
Tapestry is a decentralized distributed system. It is an overlay network that implements simple key-based routing. Each node serves as both an object store and a router that applications can contact to obtain objects. In a Tapestry network, objects are “published” at nodes, and once an object has been successfully published, it is possible for any other node in the network to find the location at which that object is published.
2 Tapestry Protocol
2.1 Identifying Nodes and Objects
Much like in other distributed systems, nodes and objects in the Tapestry network are each assigned their own globally unique identifier. In Tapestry, an ID is a fixed-length sequence of base-16 digits.
2.2 Root Nodes
In order to make it possible for any node in the network to find the location of an object, a single node is appointed as the “root” node for that object. The root node stores the reference to the node that actually stores the object.
Because Tapestry is decentralized, and no single node has a global perspective on the network, the root node for an object must be chosen in a globally consistent and deterministic fashion. The simplest choice of root node is the one which shares the same hash value as the object. However, it is common for there to be fewer nodes in the network than possible values in the space of hash values.
2.2.1 Selecting Root Nodes
For this reason, a “root” node for an object is chosen to be the one with a hash value that shares the most prefix digits with the object’s hash value.
Specifically, two hash values share a prefix of length \( n \) if, from left to right, \( n \) sequential digits starting from the leftmost digit are the same. For example, in a network with nodes 1a9c, 28ac, 2d39, and ae4f, the root node for an object with the hash 280c is 28ac and the hashes share a prefix of length 2, because the other nodes share a prefix of length 1 or 0. However, given this definition, the choice of root node (from the same set of nodes as is in the previous example) would be ill-defined for an object with the hash 2c4f because it shares a prefix of length one with both 28ac and 2d39. Therefore, we need a more general way of choosing the root node when a single match is unavailable.
Starting with the value \( v \) of the leftmost digit, we take the set of nodes that have this value as the leftmost digit of their hashes as well. If no such set of nodes exists, it is necessary to deterministically choose another set. To do this, we can try to find a set of nodes that share the value \( v + 1 \) as their hash’s leftmost value. Until a non-empty set of nodes is found, the value of the digit we are searching with increases (modulo the base of the hash-value). Once a set has been found, the same logic can be applied for the next digit in the hash, choosing from the set of nodes we identified with the previous digit. When this algorithm has been applied for every digit, only one node will be left and that node is the root.
### 2.2.2 Example: A Tapestry Network’s Objects and their Roots
To clarify, suppose a Tapestry network contains only the nodes 583f, 70d1, 70f5, and 70fa.
To find the root node for an object with a hash of 60f4, we first consider the leftmost digit’s value, 6. None of the network nodes share this leftmost value, so we check if any network nodes have the leftmost value 6 + 1 = 7. 70d1, 70f5 and 70fa do, so we take this set and go to the next digit. The object hash’s next digit, 0, is shared with all the network nodes in the current set, so we go to the next digit. The third digit of the object’s hash, f, is shared with only 70f5 and 70fa so we take this smaller set and go to the last digit. The object’s hash has a final digit of 4, which doesn’t match either 5 or a, so we try with 4 + 1 = 5. This matches the network node with a hash of 70f5, so this node is the object’s root node. If the object’s hash had been 60f6, its root node would be the network node with the hash 70fa.
The table below lists hypothetical object hashes and their corresponding root nodes within this network.
<table>
<thead>
<tr>
<th>Object Hash</th>
<th>3f8a</th>
<th>520c</th>
<th>58ff</th>
<th>70c3</th>
<th>60f4</th>
<th>70a2</th>
<th>6395</th>
<th>683f</th>
<th>63e5</th>
<th>63e9</th>
<th>beef</th>
</tr>
</thead>
<tbody>
<tr>
<td>Root Node</td>
<td>583f</td>
<td>583f</td>
<td>583f</td>
<td>70d1</td>
<td>70f5</td>
<td>70d1</td>
<td>70d1</td>
<td>70f1</td>
<td>70f5</td>
<td>70fa</td>
<td>583f</td>
</tr>
</tbody>
</table>
### 2.3 Tapestry Node State
Some state is maintained on each Tapestry node to carry out its ability to route to nodes and lookup objects.
#### 2.3.1 Routing Tables
In order to allow nodes to locate objects stored at other nodes, each node maintains a routing table that stores references to a subset of the nodes in the network.
A routing table has several levels; one level for each digit of the node’s ID. In a Tapestry mesh that uses 40-digit IDs, the routing table would thus have 40 levels. The level represents the size of the shared prefix with the local node; that is, a node on level $n$ of the routing table shares a prefix of length $n$ with the local node.
Each level of the table consists of several slots; one for each unique digit. In a tapestry mesh that uses base-16 digits, each level of the routing table would therefore have 16 slots. A node in the $d$th slot of the $n$th level has $d$ as its $n$th digit (keep in mind that $n$ is zero-indexed!). For example, in the table given, the entry at level 1 in slot 6 (362d) shares a prefix of length 1 (because it’s on level 1) and has 6 as its first digit (because it’s in slot 6). If there had been an ID of 3782, then on level 1 in slot 7 we would see this ID.
In summary, a routing table entry is defined by two numbers: its level and slot. The level represents the length of the shared prefix with the local node, and the slot represents the first digit of the remote node after the shared prefix.
An example routing table for a node with the hash 3f93 is shown below:
<table>
<thead>
<tr>
<th>Level</th>
<th>0</th>
<th>1</th>
<th>2</th>
<th>3</th>
<th>4</th>
<th>5</th>
<th>6</th>
<th>7</th>
<th>8</th>
<th>9</th>
<th>a</th>
<th>b</th>
<th>c</th>
<th>d</th>
<th>e</th>
<th>f</th>
</tr>
</thead>
<tbody>
<tr>
<td>0</td>
<td>1c42</td>
<td>2fe4</td>
<td>3f93</td>
<td>437e</td>
<td>5c2a</td>
<td>65bb</td>
<td>705b</td>
<td>8887</td>
<td>93cb</td>
<td>c3ca</td>
<td>d340</td>
<td>e9ce</td>
<td>f0d7</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>1</td>
<td>309c</td>
<td>362d</td>
<td>3c6f</td>
<td>3f93</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>2</td>
<td>3f93</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>3</td>
<td>3f93</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
If the local node knows about more than one node that fits into a cell, the one that is stored at each entry in the routing table is the closest one to the local node. In a production implementation, distance between nodes is measured by the network latency between them, but for this project, we arbitrarily define distance as the absolute value of the difference between hashes.
In addition, for robustness and redundancy, each slot of the routing table actually stores multiple references, typically three. The first one is the closest node, and the others are backups in case the first one fails to respond to requests. These are sorted by distance to the local node.
### 2.3.2 Backpointer Tables
For additional connectivity, each node also stores backpointers in addition to its routing table. Backpointers are references to every node in the network which refers to the local node in their own routing tables. These will become useful in maintaining routing tables in a dynamic network. When the local node adds or removes a remote node from its routing table, it notifies the remote node, who will then update its backpointer table.
### 2.4 Prefix Routing
The routing table at any given node does not store a reference to every other node in the network. Therefore, in order to find the root node for a particular ID, several nodes may be traversed until one is found that can definitively identify itself as the root node. The search for a root node may begin anywhere.
Using the same logic that is used to choose a root node globally from the network, a node that matches some number of digits from the object’s hash may be chosen from the routing table. In turn, the selected node’s routing table is inspected and the next node in the route to the root is chosen. At each successive node in the route, the number of digits that match the object’s hash
value increases until the last digit has been matched and the root node has been reached. This
type of routing is called “prefix routing”, and the maximum number of hops required to reach the
destination node is equal to the number of digits required to represent a hash value.
```go
func (node *Node) FindRoot(id)
nextHop = node.table.getNextHop(id)
root = nextHop.FindRoot(id) // recursive call
return root
```
In the version of Tapestry presented in the paper, when the location of an object is published to the
object’s root node, the nodes encountered along the path to the root node also have the location
information for that object cached at them. This allows object lookups to finish in fewer hops from
many starting locations in the network. Your implementation is not required to have this feature,
but it might be the starting point for an A-level extension to PuddleStore.
### 2.5 Publishing and Retrieving Objects
When an object is “published” by a node, that node routes towards the root node for the key, then
registers itself on that node as a location of the key. Multiple nodes can publish the same object. A
tapestry client wishing to lookup the object will first route to the root node of the object. The root
node then informs the client of which Tapestry nodes are the ones that have published the object.
The client then directly contacts one or more of those publishers to retrieve the actual object data.
### 2.6 Adding Tapestry Nodes
To accommodate an increased workload, it is possible to add nodes to a Tapestry network. To
perform this operation, the new node is assigned its ID and then routes towards the root node for
that ID. The root node initiates the transfer of all keys that should now be stored on the new node.
The new node then iteratively traverses backpointers, starting from the root node, to populate its
own routing table.
#### 2.6.1 Acknowledged Multicast
In Tapestry, when a new node joins the network, other nodes transfer object references to it, i.e. it
takes over and becomes the root for objects whose IDs now closely match its ID. It is possible for
multiple different nodes to be storing references that should now be transferred to the new node.
For example, suppose our Tapestry currently has nodes a23b, 285b and 289a, and our new node
is 221f. The root for the new node is thus 285b. However, both 285b and 289a could be storing
references that should be transferred to the new node. For example, 225f would be stored on 285b,
and object 229f would be stored on 289a.
In general, if the new node has a shared prefix of length \( n \) with the current root for its ID, then
any other node that also has a shared prefix of length \( n \) with the new node could have relevant
references. Such nodes are called need-to-know nodes.
To deal with this, the root node performs an acknowledged multicast when it is contacted by the
new node. The multicast eventually returns the full set of need-to-know nodes from the Tapestry.
The multicast is a recursive call — the root node contacts all nodes on levels $\geq n$ of its routing table; those nodes contact all nodes on levels $\geq n+1$ of their routing tables; and so on. A node that is contacted during the multicast will initiate a background transfer of relevant object references to the new node, trigger a multicast to the next level of its routing table, then merge and return the resulting lists of nodes (removing duplicates) while adding the new node to its routing table.
```python
AddNodeMulticast(newNode, level)
targets = routingtable.get(level) // Must include local node
results = []
for target in targets
results.append(target.AddNodeMulticast(newNode, level + 1))
self.addRoute(newNode)
transferRelevantObjects(newNode)
return merge(results, targets)
```
`AddNodeMulticast` should be called with all levels $\geq$ the level provided. The pseudocode above relies on RPC calls over the local node (ourselves) to continue executing `local.AddNodeMulticast(newNode, level+1)`. This leads to unnecessary network messages and makes the system slower and less robust. You could try to use recursive function calls instead of gRPC calls on the local node.
### 2.6.2 Backpointer Traversal
Backpointer traversal is used to find the best/closest set of nodes to fill the routing table with. This algorithm is different from the one in lecture, but as we already require you to use backpointers for the graceful exit, we also require you to use the backpointer based algorithm to fill the routing table while adding a node.
Once the multicast has completed, the root node returns the list of need-to-know nodes to the new node. The new node uses this list as an initial *neighbor set* to populate its routing table. The node iteratively contacts the need-to-know nodes, asking for their backpointers. Once the node has compiled backpointers from each of its need-to-know nodes, it is necessary to remove duplicate nodes, and trim the list of nodes to visit to $K$, as the number of nodes we search can get very large, but we only care about the closest few nodes. We give you the constant $K$ in `node_init.go`.
```python
TraverseBackpointers(neighbors, level)
while level >= 0
removeDuplicatesAndTrimToK(neighbors)
nextNeighbors = neighbors
for neighbor in neighbors:
nextNeighbors.append(neighbor.GetBackpointers(level))
AddAllToRoutingTable(nextNeighbors)
neighbors = nextNeighbors
level -= 1
```
2.7 Graceful Exits
A good implementation of Tapestry is extremely fault tolerant, so a node could leave without notifying any other nodes. However, a node can gracefully exit the Tapestry, too. When a node gracefully exits, it notifies all of the nodes in its backpointer table of the leave. As part of this notification, it consults its own routing table to find a suitable replacement for the routing tables of all the other nodes.
Objects stored at exiting nodes will be lost and no objects are transferred.
2.8 Fault Tolerance
The Tapestry network is designed to be extremely fault tolerant. As with any distributed system, some nodes may become unavailable unexpectedly. The mechanisms described in this section ensure that there is no single point of failure in the system. **You are expected to cleanly handle errors in this project, including the sudden crashing of nodes without cleanup.**
In this project, any time you make a remote method call you must check if an error is returned, and handle the error appropriately.
Note that when a node crashes, the objects stored at that node will be lost. However, it is the responsibility of the client application that uses the Tapestry network to put duplicate objects across the network. You don’t need to worry about preventing data loss in this case.
2.8.1 Errors While Routing
When routing towards a root node, it is possible that a communication failure with any of the intermediate nodes could impede the search. For this reason, routing tables store lists of nodes rather than a single node at each slot. If a failed node is encountered, the node that is searching can request that the failed node be removed from any routing tables it encounters, and resume its search at the last node it communicated with successfully. If the last node it communicated with successfully is no longer responding, it should communicate with the last successful node before that.
2.8.2 Loss of Root Node
Another potential loss from failure is the root node data. Two measures are taken to minimize the impact of failed root nodes.
First, published objects continually republish themselves at regular intervals. This ensures that if a root node goes down, a new root node will eventually take its place. Unfortunately, there will still be a period of time in which the location information for these objects is unavailable.
Second, applications built on top of Tapestry typically store each key multiple times with different salts, thereby offering backup locations when searching for an object. You do not have to implement salting in this assignment.
2.8.3 Loss of Replicas
Finally, applications built on top of Tapestry might wish to ensure that an object remains available at all times, even if the node that published it fails.
In the “Publishing and Retrieving Objects” section, it was mentioned that multiple tapestry nodes can publish the same object. This means that client applications can learn of multiple nodes storing a particular object. Thus, if the object becomes unavailable at one of these nodes, the client can simply contact another one of the nodes. On the root node for a key, when a long enough period of time has elapsed without receiving an object republish notification from a publishing node, the object expires and is removed.
2.8.4 Miscellaneous
The cases listed above are the common issues which can arise due to network errors. There are other more obscure ways in which roots may become unreachable for a short time when nodes join or fail in a certain order. Tapestry’s method for dealing with this is to assume that there are enough salted hash values for a given object that not all salts will become unreachable due to such errors, and those which do become unreachable will be corrected when the replica performs its periodic republishing.
3 The Assignment
A large amount of support code has been given to you for this assignment. All of the required data structures are implemented in the support code. The code you will write is related to routing in the network, storing and retrieving object location data, and coping with failures. Please become very familiar with all of the support code before beginning to implement any of the features. The comments for each method that you will fill in should give you a good idea of how to proceed.
3.1 Function Stubs
The code you must write is marked with // TODO students should implement this comments and is spread across the Go files in the tapestry directory. Feel free to add helper functions. You must implement the following functions:
- id.go
- func SharedPrefixLength(a ID, b ID) int
- func (id ID) BetterChoice(first ID, second ID) bool
- func (id ID) Closer(first ID, second ID) bool
- routing_table.go
- func (t *RoutingTable) Add(node RemoteNode) (added bool, previous *RemoteNode)
- func (t *RoutingTable) Remove(node RemoteNode) (wasRemoved bool)
- func (t *RoutingTable) GetLevel(level int) (nodes []RemoteNode)
func (t *RoutingTable) FindNextHop(id ID, level int) (node RemoteNode)
• node_init.go
Functions for creating a Tapestry node and joining an existing network
func (local *Node) Join(otherNode RemoteNode) (err error)
func (local *Node) AddNodeMulticast(newnode RemoteNode, level int)
(neighbors []RemoteNode, err error)
func (local *Node) addRoute(node RemoteNode) (err error)
• node_core.go
Functions for publishing and looking up objects in the network
func (local *Node) Publish(key string) (done chan bool, err error)
func (local *Node) Lookup(key string) (nodes []RemoteNode, err error)
func (local *Node) FindRoot(id ID, level int)
(root RemoteNode, toRemove *NodeSet, err error)
func (local *Node) Register(key string, replica RemoteNode)
(isRoot bool, err error)
func (local *Node) Fetch(key string)
(isRoot bool, replicas []RemoteNode, err error)
func (local *Node) Transfer(from RemoteNode, replicamap map[string][]RemoteNode)
(err error)
func (local *Node) findRootOnRemoteNode(start RemoteNode, id ID) (RemoteNode, error)
• node_exit.go
Functions for notified leave
func (local *Note) Leave() (err error)
func (local *Node) NotifyLeave(from RemoteNode, replacement *RemoteNode) (err error)
A partial implementation of Join in node_init.go is provided to demonstrate invocation of local and remote methods, and error handling.
3.2 Provided Functions
The TAs have provided you with a sufficient amount of supporting data structure. Below are some of them.
<table>
<thead>
<tr>
<th>struct</th>
<th>Backpointers</th>
<th>BlobStore</th>
<th>LocationMap</th>
</tr>
</thead>
<tbody>
<tr>
<td>Functions</td>
<td>- Add</td>
<td>- Get</td>
<td>- Register/RegisterAll</td>
</tr>
<tr>
<td></td>
<td>- Remove</td>
<td>- Put</td>
<td>- Unregister/UnregisterAll</td>
</tr>
<tr>
<td></td>
<td>- Get</td>
<td>- Delete</td>
<td>- Get</td>
</tr>
<tr>
<td></td>
<td></td>
<td>- DeleteAll</td>
<td>- GetTransferRegistrations</td>
</tr>
</tbody>
</table>
3.3 Remote Procedure Call (RPC)
RPC is a technique that allows programs to call procedures on other machines. When one machine calls a procedure on another machine using RPC, the execution is suspended on the first machine until the call on the second machine returns and its return value is received by the original machine. In the stencil code, tapestry_rpc_client.go contains the functions that handle calling procedures on other nodes. tapestry_rpc_server.go contains the local implementations of the functions being called on a machine.
RPCs for Tapestry are handled by the gRPC library which runs on top of Protocol Buffers, a way of generating communication files from a .proto file. You will find this code pre-generated for you in tapestry_rpc.pb.go, and the source file it was generated from in tapestry_rpc.proto. In future projects, you will be asked to do more of this yourself, so it is worth taking a glance at both these files.
We divide RPCs into two categories, client functions and server functions. Server functions expose local methods to other nodes, and are listed in tapestry_rpc_server.go. Client functions handle connection to a remote node and calling a function on it, and are listed in tapestry_rpc_client.go. To expose local functions as RPCs, your Node object needs to implement the TapestryRPCServer interface generated by gRPC. These methods follow a very particular signature:
```go
func (local *Node) XxxCaller(ctx context.Context, req *Request) (*Reply, error)
```
We’ve adopted a convention of using the suffix “Caller” to differentiate these methods from the other methods of Node. You are responsible for implementing around half of them in tapestry_rpc_server.go, but they have all been outlined for you there. Each of these Caller functions acts on the local node after receiving a request from a remote node. So each needs to:
1. Unpack the arguments from its request struct.
2. Call the corresponding local method.
3. Pack the results into a reply struct.
4. Return the reply struct and any error.
Client functions are methods of the RemoteNode struct, and handle invoking a method over a network connection. We use the “RPC” suffix to denote these methods, and you will also be implementing about half of them in tapestry_rpc_client.go. Each client function needs to do the following:
1. Pack its arguments into the appropriate request struct.
2. Obtain a network connection to the remote node.
3. Invoke the method over the network connection, and receive a reply struct and an error.
4. Unpack the reply struct, return these values and any error and if there was an error, close the network connection.
We’ve given you a ClientConn method of RemoteNode that will establish or reuse a connection to a remote node, and return a TapestryRPCClient that will let you call the “Caller” functions, as
well as several other utility functions in `tapestry_rpc_client.go` and `tapestry_rpc_server.go` to convert between message types and to error check RPCs. Feel free to use these in your implementations, and to copy the patterns from the other RPC client and server functions.
These `XxxRPC` and `XxxCaller` functions shouldn’t contain any application logic inside them; all they should need to handle is unpacking arguments and passing them to a different function. For instance: one node invokes `AddNodeRPC`, which obtains a client connection and calls `AddNodeCaller`. On the remote node, a new Go routine begins `AddNodeCaller`, which calls `AddNode` on its local node. In general, `<function>RPC` calls `<function>Caller`, which calls `<function>`.
### 3.4 A Note About Context
All of the functions generated by gRPC take a `context` parameter, which we aren’t using for this project. Feel free to use `context.Background()` whenever you need to provide one.
### 4 Demo
A TA implementation of Tapestry is available at
```
/course/cs1380/pub/tapestry/{linux,darwin,windows}/tapestry
```
Once your implementation is sufficiently functional, you should test with the TA implementation for interoperability.
### 5 Testing
We expect to see several good test cases. This is going to be worth a portion of your grade. Exhaustive Go tests are sufficient. You can check your test coverage by using Go’s coverage tool.
A number of Tapestry constants are defined in `node_init.go`. You can change these constants during development to simplify debugging. For your own unit tests, you may assume we will use the default values specified in `node_init.go`. However, our own testing suite may use different values for these parameters, so do not hard-code values in your implementation.
When writing your unit tests, you may run into an error from gRPC along the lines of `socket: too many open files`. Each network connection your computer maintains uses up a file descriptor, as if you had opened a file for reading or writing, and there’s a limit to how many you are allowed to have open at once. On macOS, this limit is relatively low, at 256 per terminal window. If your tests give you the `Too many open files` error, try increasing the limit with
```
ulimit -n <amount>
```
and document this need in your README.
This is a Go program that serves as a console for interacting with Tapestry, creating nodes, and querying state on the local node. We have kept the CLI simple but you are welcome to improve it as you see fit.
You can build and run the CLI as follows:
```
$ cd $GOPATH/src/github.com/brown-csci1380-s20/<your-repo>/cmd/tapestry
$ go install
$ tapestry
```
Note: if running `tapestry` doesn’t run the CLI, you can run `$GOPATH/bin/tapestry`.
You can pass the following arguments:
- `-p(ort) <int>`: The port to start the server on. By default selects a random port.
- `-c(onnect) "host:port"`: Address of an existing Tapestry node to connect to
- `-d(ebug)=true`: Enable or disable debug
You have the following set of commands built into the CLI:
- `table`
- Print this node’s routing table
- `backpointers`
- Print this node’s backpointers
- `objects`
- Print the object replicas stored on this node
- `put <key> <value>`
- Stores the provided key-value pair on the local node and advertises the key to the Tapestry
- `lookup <key>`
- Looks up the specified key in the tapestry and prints its location
- `get <key>`
- Looks up the specified key in the tapestry, then fetches the value from one of the returned replicas
- `remove <key>`
- Remove the value stored locally for the provided key and stops advertising the key to the Tapestry
- `list`
- List the keys currently being advertised by the local node
- `debug on|off`
- Turn debug messages on or off
- `leave`
- Instructs the local node to gracefully leave the Tapestry
- `kill`
- Leaves the tapestry without graceful exit
– exit
- Quit the CLI
If you are confused about the behavior of any of these commands, feel free to refer to the demo at /course/cs1380/pub/tapestry.
You are encouraged, but not required, to write client applications (that is, applications that use your Tapestry implementation to store objects), using the tapestry/client package and its provided methods, to test your implementation.
6 Getting Started
Remember, if you write code on a department machine, you must use go1.13 instead of just go (ie go1.13 install or go1.13 test). Add alias go=go1.13 to your ~/.bashrc for convenience.
go1.13 get -u -d github.com/brown-csci1380-s20/tapestry-<TeamName>
Use the command above to get your repo from GitHub Classroom. Fix the import path in cmd/tapestry/main.go. Use go1.13 get -u ./../ to fetch all dependency. Before you get started, please make sure you have read over, set up, and understand all the support code.
We highly encourage you to work in groups of two, but we understand that in some situations a group of three may be necessary. If you work in a group of three, you must implement an additional feature. Additional features include publishing path caching, hotspot caching, hash salting, object re-replication, and erasure coding. Stop by TA hours to learn more about what these are! If you work in a group of three, you must contact the TAs and let them know you intend to work in a group of three, and if you will be implementing additional features.
Working alone is not allowed for this project. If you do not have a partner for any reason, please attempt to find one thru the piazza partner search functionality, and if this is not successful please email the itas for assistance.
7 Handing in
You need to write a README documenting any bugs in your code, any extra features you added, and anything else you think the TAs should know about your project. Document the test cases you created and briefly describe the scenarios you covered.
When you are done, push to your GitHub repo. We will pull your latest commit in the master branch for grading.
Please let us know if you find any mistakes, inconsistencies, or confusing language in this or any other CS1380 document by filling out the anonymous feedback form:
http://cs.brown.edu/courses/cs138/s20/feedback.html
|
{"Source-Url": "http://cs.brown.edu/courses/cs138/s20/content/projects/tapestry.pdf", "len_cl100k_base": 8461, "olmocr-version": "0.1.53", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 30846, "total-output-tokens": 8881, "length": "2e13", "weborganizer": {"__label__adult": 0.0003654956817626953, "__label__art_design": 0.00032138824462890625, "__label__crime_law": 0.0002624988555908203, "__label__education_jobs": 0.0052490234375, "__label__entertainment": 7.778406143188477e-05, "__label__fashion_beauty": 0.0001552104949951172, "__label__finance_business": 0.00017058849334716797, "__label__food_dining": 0.0004973411560058594, "__label__games": 0.0007114410400390625, "__label__hardware": 0.0008258819580078125, "__label__health": 0.0003209114074707031, "__label__history": 0.0002579689025878906, "__label__home_hobbies": 0.0001347064971923828, "__label__industrial": 0.0004012584686279297, "__label__literature": 0.0002956390380859375, "__label__politics": 0.00019049644470214844, "__label__religion": 0.0004544258117675781, "__label__science_tech": 0.00586700439453125, "__label__social_life": 0.00016450881958007812, "__label__software": 0.004978179931640625, "__label__software_dev": 0.97705078125, "__label__sports_fitness": 0.0003192424774169922, "__label__transportation": 0.0005278587341308594, "__label__travel": 0.00023365020751953125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 32708, 0.02828]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 32708, 0.5539]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 32708, 0.86074]], "google_gemma-3-12b-it_contains_pii": [[0, 2249, false], [2249, 4434, null], [4434, 7609, null], [7609, 11013, null], [11013, 14016, null], [14016, 16548, null], [16548, 19157, null], [19157, 21538, null], [21538, 23596, null], [23596, 26445, null], [26445, 28803, null], [28803, 30410, null], [30410, 32657, null], [32657, 32708, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2249, true], [2249, 4434, null], [4434, 7609, null], [7609, 11013, null], [11013, 14016, null], [14016, 16548, null], [16548, 19157, null], [19157, 21538, null], [21538, 23596, null], [23596, 26445, null], [26445, 28803, null], [28803, 30410, null], [30410, 32657, null], [32657, 32708, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 32708, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 32708, null]], "pdf_page_numbers": [[0, 2249, 1], [2249, 4434, 2], [4434, 7609, 3], [7609, 11013, 4], [11013, 14016, 5], [14016, 16548, 6], [16548, 19157, 7], [19157, 21538, 8], [21538, 23596, 9], [23596, 26445, 10], [26445, 28803, 11], [28803, 30410, 12], [30410, 32657, 13], [32657, 32708, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 32708, 0.05245]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
c7f77ada49b024a3795879ed9410c7728449027d
|
Heuristic-driven Techniques for Test Case Selection
J.C. Burguillo, M. Llamas, M.J. Fernández 1
Departamento de Ingeniería Telemática
Universidad de Vigo
Vigo, Spain
T. Robles 2
Departamento de Ingeniería Telemática
Universidad Politécnica de Madrid
Madrid, Spain
Abstract
We propose an approach to testing that combines formal methods with practical criteria, close to the testing engineer’s experience. It can be seen as a framework to evaluate and select test suites using formal methods, assisted by informal heuristics. We also introduce the formalism of enriched transition systems to store information obtained during the testing phase, and to adapt classical test generation techniques to take advantage of the possibilities of the new formalism.
1 Introduction
In the context of Protocol Engineering, test generation algorithms are used to obtain a set of test cases from a given specification, intended to detect errors in non-conforming implementations. However, the number of test cases needed to guarantee an exhaustive coverage may be too large, even infinite. Therefore, execution of all potential test cases may be infeasible. As a consequence, in practical cases it is necessary to select a subset of all possible test cases prior the test execution phase. The reduction of the initially generated test case set is known in the literature as test case selection.
Test case selection should not be performed at random. An appropriate strategy should be applied to obtain a valuable test case collection, in the sense
1 Email: [jrial,martin,manolo]@det.uvigo.es
2 Email: robles@dit.upm.es
©2002 Published by Elsevier Science B. V.
that it should detect as many non-conforming implementations as possible. For software testing, some criteria are available, like the division in equivalence partitions [12] or the test proposal selection in protocol testing [9].
On the other side, test case selection should not be based only on the system’s formal specification. To select the most valuable test cases, additional information, external to the corresponding specification formalism, should also be used. Such information may consider most frequent errors committed by implementors, most harmful errors, most difficult to implement features, critical basic functionalities, etc.
In the field of Formal Description Techniques some proposals have been made to address the test case selection problem, key results may be found in [6,15,16,17]. T. Robles [13] introduced concepts for risk, cost and efficiency for a test case collection, which are revisited in this paper. This approach is based on the estimation, from the testing engineer’s experience, of the risk involved when testing a system implementation. It formalises and simplifies the selection of test cases, and can be applied to most practical problems. This approach is similar to that presented in [15].
Thus, this paper proposes a method to evaluate and select test cases from practical criteria, close to the testing engineer’s experience. Our aim is to provide implementable, and computationally feasible criteria. Additionally, we want the proposed methodology to be easily configurable for testing engineers, who can provide their experience through the introduction of heuristics to facilitate the testing of key aspects in a system, or specific parts of a system that are more prone to errors.
The next two sections discuss the theoretical background that serves as the foundation of our experience. Section 2 presents some general definitions and notation about the supporting representation framework and formal testing, and Section 3 presents our approach to test case selection. Finally, Section 4 offers a summary of the work described and some conclusions.
2 General Definitions and Notation
Along the next paragraphs we discuss basic theoretical concepts and notation related to testing and test case selection. First, we briefly introduce Labelled Transition Systems. Then, we provide some basic concepts from formal testing. After this, we introduce risk, coverage, cost and efficiency as the supporting heuristics to assist the testing engineer along test case selection.
2.1 Labelled Transition Systems
Labelled Transition Systems (LTS) will be the basic model to describe the behaviour of processes, such as specifications, implementations and tests.
Definition 1 A labelled transition system is a 4-tuple $<\text{Stat},L,T,s_0>$ where $\text{Stat}$ is a countable, non-empty set of states; $L$ is a countable set of
labels; $T \subseteq \text{Stat} \times (L \cup \{i\}) \times \text{Stat}$ is the countable set of transitions and $i$ denotes a special internal action, referred as $\tau$ in some models [11]; and $s_0 \in \text{Stat}$ is the initial state.
An element $(s, \mu, s') \in T$ is represented as $s-\mu \rightarrow s'$. We use the following notations (sets) derived (constructed) from the transition relation:
- $s = \epsilon \Rightarrow s'$: $s = s'$ or $s - i - \ldots \rightarrow s'$
- $s = a \Rightarrow s'$: $\exists s_1, s_2 \in \text{Stat}$ such that $s = \epsilon \Rightarrow s_1 - a \rightarrow s_2 = \epsilon \Rightarrow s'$
- $s = \sigma \Rightarrow s'$: $\exists\{s_1, \ldots, s_{n-1}\} \subseteq \text{Stat}$, and a trace $\sigma = a_1 \ldots a_n$
such that $s = a_1 \Rightarrow s_1 = \cdots \Rightarrow s_{n-1} = a_n \Rightarrow s'$.
- $s = \sigma \Rightarrow$: $\exists s' \in \text{Stat}$ such that $s = \sigma \Rightarrow s'$
- $s \neq \sigma \Rightarrow$: $\forall s' \in \text{Stat}$ such that $s = \sigma \Rightarrow s'$
$\text{Tr}(P)$: $\{\sigma \in L^* \mid P = \sigma \Rightarrow\}$
$\text{Init}(P)$: $\{a \in L \mid P = a \Rightarrow\}$
$P \text{ after } \sigma$: $\{s' \mid P = \sigma \Rightarrow s'\}$
$\text{Ref}(P, \sigma)$: $\{A \subseteq L \mid \exists s' \in (P \text{ after } \sigma) \text{ and } \forall a \in A, s' \neq a \Rightarrow\}$
$\text{Path}(P)$: $\{\varphi \in T^* \mid P - \varphi \rightarrow s', s' \in \text{Stat}\}$
The symbol $L^*$ (respectively $T^*$) denotes the set of strings (sequences, traces) constructed using elements from $L$ (respectively $T$). A trace $\sigma \in L^*$ is a finite sequence of observable actions over $L$, where $\epsilon$ denotes the empty sequence. The special label $i \notin L$ represents an unobservable, internal action, used to model non-determinism. Thus $= \epsilon \Rightarrow$ represents a null transition or a sequence of transitions including only internal actions (i.e. traces do not have internal actions). We use $t \ll \varphi$ to denote that transition $t$ appears in the path $\varphi$.
We represent an LTS by a tree or a graph, where nodes represent states and edges represent transitions. Given an LTS $P =< \text{Stat}, L, T, s_0 >$, we write $P = \sigma \Rightarrow$ to represent transitions from the initial state of $P$ and must be considered as a syntax sugar. When a given state does not accept further actions (i.e. deadlock state), we label it as $\text{stop}$.
$\text{Tr}(P)$ is the set of traces accepted by process $P$, $\text{Init}(P)$ the set of labels from $L$ accepted by $P$, and $\text{Ref}(P, \sigma)$ is the set of refusals of $P$ after trace $\sigma$. Finally, $\text{Path}(P)$ is the set of transition sequences accepted by $P$. We denote the class of all labelled transition systems over $L$ by $\text{LTS}(L)$. LTS model the semantics of languages used to describe distributed and concurrent systems like LOTOS [8], CSP [1] or CCS [11], among others.
2.2 Formal Testing Concepts
Concerning testing, it is important to define a relation to model the conformance of an implementation with its specification. There are several relations in the literature that may be selected [14]. As we want to compare our framework with other approaches and reuse the existing theory, we selected the conformance relation $\text{conf}$ described in [2,14]. It has the advantage that only the behaviour contained in the specification must be tested, reducing the test space. The relation $\text{conf}$ is defined as follows:
**Definition 2 (Conformance: $\text{conf}$)** Let $I, S \in \text{LTS}(L)$, we say that $I \text{conf} S$ if and only if for every trace $\sigma \in \text{Tr}(S)$ and for every subset $A \subseteq L$ the following proposition holds: If $A \in \text{Ref}(I, \sigma)$ then $A \in \text{Ref}(S, \sigma)$
In case $\sigma \notin \text{Tr}(I)$ we assume $\text{Ref}(I, \sigma)$ is empty.
To decide about the success of a test case we use *verdicts*. Reference [10] proposes three possible verdicts: **Pass** ($\text{pass}$, when the observed behaviour satisfies the test), **Fail** ($\text{fail}$, when the observed behaviour is an invalid specification behaviour) and **Inconclusive** ($\text{inc}$, the observed behaviour is valid so far, but it has not been possible to complete the test). These concepts are formalised below [14]:
**Definition 3 (Test case)** A test case $tc$ is a 5-tuple $< \text{Stat}, L, T, v, s_0 >$, such that $< \text{Stat}, L, T, s_0 >$ is a deterministic transition system with finite behaviour, and $v : \text{Stat} \rightarrow \{\text{fail, pass, inc}\}$ is a function to assign verdicts.
**Definition 4 (Test suite)** A test suite or test collection $ts$ is a set of test cases: $ts \in \text{PowerSet}(\text{LTS}(L))$
The execution of a test case is modelled by the parallel synchronous execution of the test case with the implementation under test (IUT). Such execution continues until there are no more interactions, i.e. a deadlock is reached. Such deadlock may appear because the test case $tc$ reaches a final state, or when the combination of $tc$ and the IUT reaches a state where the actions offered by $tc$ are not accepted.
An implementation passes the execution of a test case if and only if the verdict of the test case is **pass** when reaching a deadlock. As the implementation may have nondeterministic behaviour, different executions of the same test case with the same IUT may reach different final states, and as a consequence different verdicts. An implementation passes a test case $tc$ if and only if all executions of $tc$ produce a **pass** verdict. This means that we should execute every test case several times to obtain a final verdict, ideally an infinite number of times.
Test generation algorithms provide test suites from specifications. Ideally, an implementation must pass a test suite if and only if it conforms. Unfortunately, in practice, such test suite would have infinitely many test cases. As a consequence, in the real world we have to restrict ourselves to (finite-size) test suites that can only detect non-conformance, but cannot detect conformance.
Table 1
Error Weighting
<table>
<thead>
<tr>
<th>Target</th>
<th>Parameter</th>
<th>Range</th>
</tr>
</thead>
<tbody>
<tr>
<td>Event</td>
<td>$R_I(e) = E_I(e) \times I_I(e)$</td>
<td>$(0, \infty)$</td>
</tr>
<tr>
<td>Implementation</td>
<td>$R_I(S)$</td>
<td>$(0, \infty)$</td>
</tr>
<tr>
<td>Measurement, Event</td>
<td>$MR_I(e, ts)$</td>
<td>$[0, \infty)$</td>
</tr>
<tr>
<td>Measurement, Implementation</td>
<td>$MR_I(S, ts)$</td>
<td>$[0, \infty)$</td>
</tr>
</tbody>
</table>
**Legend.** $I$: implementation under test; $e$: event in $I$; $ts$: test suite; $S$: specification corresponding to $I$.
Such test suites are called *sound*.
2.3 Risk, Coverage, Cost and Efficiency
Through the next few paragraphs we introduce the concepts of error weight or risk, coverage, cost and efficiency, which will support the comparison and selection of test cases to be passed to an implementation.
To analyse the coverage obtained after testing an implementation we have to take into account several factors. On one side, test cases are derived from a formal object, i.e. the formal specification. As a consequence, after testing an implementation we get a specific coverage level for the behaviours in the specification. On the other side, coverage depends on the implementation itself because, given a formal specification, the selected implementation technology (i.e. programming language or programming tools) will be more or less prone to errors.
Table 1 proposes some heuristics to *a priori* evaluate the influence of errors in a given implementation, which will be used to select an adequate test suite. $R_I(e)$ assigns a weight to a (possible) error, i.e. estimates the risk involved in committing errors when implementing event $e$. It is calculated from two values: an estimation of the chances of event $e$ being erroneously implemented ($E_I(e)$), and an estimation of the impact of the corresponding error in the rest of the system ($I_I(e)$). $R_I(S)$ estimates the chances for the implementation not to conform to the corresponding specification, and measures the risk of erroneously implementing $S$.
$MR_I(e, ts)$ represents the amount of risk for event $e$ that can be detected through a testing process using test suite $ts$, and $MR_I(S, ts)$ represents the amount of risk for implementation $I$ that can be detected using test suite $ts$. Risk measurement for a single test case is a particular case where suite $ts$ is composed by a single test case. Note that, from the definitions above, $MR_I(e, ts) \leq R_I(e)$ and $MR_I(S, ts) \leq R_I(S)$.
The underlying mathematical model we need is considerably simplified through the assumption of independence among errors. However, in prac-
Burguillo, Llamas, Fernández, Robles
Table 2
Coverage Parameters
<table>
<thead>
<tr>
<th>Target</th>
<th>Parameter</th>
<th>Range</th>
</tr>
</thead>
<tbody>
<tr>
<td>Event</td>
<td>$K_I(e, ts) = \frac{MR_I(e, ts)}{R_I(e)}$</td>
<td>$[0, 1]$</td>
</tr>
<tr>
<td>Implementation</td>
<td>$K_I(S, ts) = \frac{MR_I(S, ts)}{R_I(S)}$</td>
<td>$[0, 1]$</td>
</tr>
</tbody>
</table>
Table 3
Cost Parameters
<table>
<thead>
<tr>
<th>Target</th>
<th>Parameter</th>
<th>Range</th>
</tr>
</thead>
<tbody>
<tr>
<td>Event</td>
<td>$C_I(e) = P_I(e) + X_I(e)$</td>
<td>$(0, \infty)$</td>
</tr>
<tr>
<td>Implementation</td>
<td>$C_I(S, ts)$</td>
<td>$(0, \infty)$</td>
</tr>
</tbody>
</table>
tice, errors are not independent from each other, as erroneous sentences in a program may affect the evolution of other parts of the program. As a solution, correlation among errors is reflected in our model as error weight values, that is, we model such interdependence through parameter $I_I(e)$. Then, testing engineers will estimate the correlation among errors, using available error statistics and their own expertise, to define $I_I(e)$ accordingly.
This can be seen as a compromise between a convenient mathematical foundation and the need to consider error correlation in real cases. Note that, independently of being supported by the underlying mathematical model or through explicit parameters, getting the correlations between failures right is crucial to get the most of the approach discussed in this paper.
From the parameters above, we define coverage as the quotient between a measurement of the detection power of a test suite and a measurement of the risk (c.f. table 2). $K_I(e, ts)$ represents the coverage for event $e$ using test suite $ts$, whereas $K_I(S, ts)$ represents the coverage for implementation $I$, corresponding to specification $S$, using test suite $ts$.
When executing a test suite $ts$ on an IUT we are checking whether some of the error possibilities estimated have been materialised into actual errors. If errors appear, they should be corrected. Conversely, if errors are not found, our confidence increases. Given two test suites $ts_1$ and $ts_2$, using the parameters above we can compare their coverage, and therefore their ability to detect errors in an IUT. However, there is another factor when comparing test suites that should be taken into account: the resources needed. To estimate this aspect, we introduce a new parameter: the cost (c.f. table 3). $C_I(e)$ estimates the cost of testing event $e$ as the sum of the cost due to its implementation in a test case ($P_I(e)$) and the cost of executing that event on the implementation ($X_I(e)$). $C_I(S, ts)$ represents the cost of testing an implementation $I$ using test suite $ts$ generated from specification $S$.
Using cost values we can better discriminate among several test suites. Therefore, the next step will be to relate the parameters defined above to
obtain another reference to facilitate the selection of test cases. For this, we define the efficiency of a test suite ts obtained from \( S \) \( (F_I(S,ts)) \) as the quotient between the coverage of that suite and the cost associated to its use to test \( I \).
\[
F_I(S,ts) = \frac{K_I(S,ts)}{C_I(S,ts)}
\]
The values of this new parameter are in the range \([0, \infty)\). Its values increase when coverage increases and with cost reduction.
We need a procedure to calculate values for the heuristics above taking into account our representation formalism, namely Labelled Transition Systems. We try to assess conformance for a system implementation from its formal specification. Thus, we will take as a reference the risk involved when implementing all events in the specification. In this way, we can formulate the risk for a IUT as the sum of the risk values for its events.

On the other side, due to nondeterminism, practical test cases should be executed several times to gain confidence on the testing process. For example, consider the specification \( S \) in figure 1 and its implementations \( I_1 \) and \( I_2 \). While the implementation \( I_1 \) is equal to \( S \) and will always accept event \( a \) as stated by \( S \), implementation \( I_2 \) sometimes executes an internal action and then refuses event \( a \). Obviously, this latter implementation does not conform with \( S \).
If we are testing a physical implementation, which may behave as \( I_1 \) or \( I_2 \), we will need to execute several times \( a \) from the initial state in order to discover if it conforms with \( S \). Each time event \( a \) is accepted we increase our confidence on the implementation. Conversely, if we obtain a single refusal we
can guarantee that the IUT does not conform. In other words, measurement risk values vary along the testing process.
Additionally, the presence of recursive behaviours makes testing dependent on the level of recursion where the test is passed. We name recursive behaviours those ones that are self-instantiated. Consequently, the recursion level will be the number of times a behaviour has been instantiated. For instance, specification $S$ in Figure 2 contains a recursive behaviour and never stops. Again, to check a physical implementation of $S$ that behaves as $I$ in Figure 2, we might need to execute many times event $send$ to detect that sometimes such event is refused. As a consequence, the risk measurement involved when testing an event is spread along the successive levels of recursion (i.e. successive event instantiations).
Taking into account both aspects, we can decompose the risk of every event in an LTS (i.e. the weight assigned to errors in events) as:
$$R_I(e) = \sum_{r=1}^{\infty} \sum_{n=1}^{\infty} R_{r,n}^I(e) \leq \infty$$
where $R_{r,n}^I(e)$ represents the risk of event $e$ when being tested for the $n$-th time at recursion level $r$ using a given test case. Then, the risk detection power of a test suite $ts$ becomes:
$$MR_I(S,ts) = \sum_{tc \in ts} \sum_{e \in E(tc)} \sum_{r=1}^{Rc_e} \sum_{n=0}^{N_e(r)} R_{r,n}^I(e)$$
where $Rc_e$ and $N_e(r)$ are respectively the deepest recursion level where event $e$ has been tested and the number of times we tested such event for every recursion level. If test cases $tc \in ts$ have a tree structure we can obtain several possible values for every successful run of the test case. So, we may measure the risk, a priori, using available statistics.
2.4 A Priori and a Posteriori Values
As the IUT is an entity whose behaviour is unknown, there may be differences between what we desire to test and what we really test in practice. These differences may be due to:
- **Nondeterminism**: due to nondeterministic behaviour in the implementation, it is possible that, in a first try, we cannot test those behaviours we are interested in. Because of this, it may be needed to execute test cases several times until we reach an appropriate result. New executions modify coverage values.
- **Failures**: if we detect a non-conforming implementation, it may not be possible to achieve the expected coverage because some test cases may not be executable due to errors in the implementation.
As a consequence we can identify [7] two classes of cost and coverage values:
• **A priori values**, which are obtained when we estimate the risk measurement and the cost to execute a test case $tc$ assuming all possible implementation responses, as defined by the corresponding specification.
• **A posteriori values**, which are obtained after executing the test case $tc$.
### 3 Test Case Selection
Now, we will discuss our approach to test case selection, which is based on a classical approach, as discussed below. But first we introduce Enriched Transition Systems as a way to keep track of the structural information needed to know those parts of the specification already tested.
#### 3.1 Enriched Transition Systems
When we try to execute several test cases over an implementation, it would be desirable to have access to the values of risk, cost and coverage obtained along the process. For this, as discussed above, we need information about recursion levels and testing iterations. Besides, if these values were available, we could select new test cases depending on the results obtained from the ones that have been already executed.
To maintain the information gathered after the execution of test cases we define a new type of transition systems [5]:
**Definition 5 (Enriched Transition System)** An enriched transition system (ETS) is a 5-tuple denoted by $S = \langle \text{Stat}, L, T, N(t, r), s_0 \rangle$, such that $\langle \text{Stat}, L, T, s_0 \rangle$ is a labelled transition system and $N(t, r)$ is the number of times transition $t \in T$ is executed at recursion level $r \in [1, \infty)$.
The set of enriched transitions systems over the label set $L$ is denoted by $ETS(L)$. Available notation and definitions for $LTS(L)$ are extended to $ETS(L)$ defining them over the underlying transition system. Unlike classical LTS, ETS are dynamic, i.e. for every transition $t \in T$, function $N(t, r)$ changes its values along the test process.
When we execute a test case on an implementation $I$ generated from a specification $S$, events in the enriched specification $S_E \in ETS(L)$ are updated with the number of executions in every recursion level. In this way, we maintain information concerning which behaviours or specification parts have not been sufficiently tested. Note that from the specifications described as ETS we can easily obtain risk and coverage values.
We assume that every transition has its own risk value. We also assume the existence of an heuristic function for measuring risks $f_{MR}(e, r, n) \rightarrow [0, R_I(e)]$ provided by the test engineer. This function will provide the risk measured for individual executions in a given level of recursion. This function must be convergent, and the sum over $r$ and $n$ of all risk measurements for a single event $e$ must be less than or equal to the risk of that event.
**Example 1** A suitable risk measurement function can be defined as
\[ MR_r^n(e) = \frac{R_I(e)}{2^{r+n}} \text{ for } r, n \geq 1 \]
Up to now, we have been considering transition systems without any additional information about which parts may be recursively called, which parts correspond to the main process, etc. In other words, when we traverse a plain LTS we do not know which states are recursively accessed from other states. With ETS, we consider every transition as a potential process (i.e. as a potential destination for a recursive call). Every time we reach a previously visited state, we assume that we have increased by one the recursive level for the next transition. In this way, we just need to check how many times we have visited a state to obtain the level of recursion.

**Example 2** Suppose that we have the recursive specification \( S_E \in ETS(L) \) appearing in Figure 3. Function \( N(t,r) \) appears next to the corresponding label for every transition. We have represented the function \( N(t,r) \) as a sequence where the first element is the number of times we executed the transition in the first recursion level, the second element corresponds to the second level of recursion and so on. Initially, all values in the sequence are zero because we did not execute any test yet. Suppose also that we have a physical object \( I \) that implements correctly the behaviour described in the specification, i.e. \( I = S_E \), and that we want to execute test cases \( tc_1 \) and \( tc_2 \) described in Figure 3.
\( S_{bis} \) represents a snapshot of \( S_E \in ETS(L) \) after the execution of both test cases. Event \( a \) has been tested twice in the first level of recursion, one for each test case. Besides, this event has also been tested in the second level of recursion, which corresponds to the last transition of \( tc_1 \). The rest of the events have been executed only once in the initial recursion level.
3.2 Algorithms for Risk-driven Test Case Selection
For test generation and selection, we firstly adopted a classical testing algorithm and modified it to take into account risk and coverage values. The classical approach selected was Tretmans’ [14], which constructs tree-like deterministic test cases recursively selecting at random a subset of all possible specification transitions from a given state.
| Table 4
Generating test cases for $S$
| Given $S \in ETS(L)$, we construct a test case $tc := \sum\{a; tc_a | a \in A_{MR}\}$ recursively as follows:
| (i) Construct the set $C_S := \{Init(S') | S = \epsilon \Rightarrow S'\}$
| (ii) Among all possible sets $A \subseteq Init(S)$, select the set $A_{MR}$ having a maximum value of $\frac{\sum_{e \in A_{MR}}^\epsilon r, t(e)}{\text{Card}(A)}$ and satisfying one of the following:
| (a) $\forall C \in C_S : A_{MR} \cap C \neq \emptyset$ and $v(tc) = \text{fail}$, or
| (b) $\emptyset \in C_S$ and $A_{MR} = Init(S)$ and $v(tc) = \text{pass}$, or
| (c) $A_{MR} = \emptyset$ and $v(tc) = \text{pass}$
| (iii) Construct recursively $tc_a$ as a test case for $\sum\{i; S' | S = a \Rightarrow S'\}$
(*) When representing a test case, $\sum$ represents branching and $a; s$ is short notation for transitions (i.e. $-a \rightarrow s$).
In our case (c.f. Table 4). We modified Tretmans algorithm to select (considering the conditions expressed in [14]) the set $A_{MR} \subseteq Init(s)$ that maximizes the mean risk measurement.
Concerning the test generation process and the ETS formalism, before we generate any test case, we make a copy of $S_E \in ETS(L)$ and name it $S_{bkp}^E$. During the generation process we will work with $S_{bkp}^E$ instead of $S_E$. Then, each time a new set $A_{MR}$ is selected, the values of $N(t, r)$ in copy $S_{bkp}^E$ are updated accordingly as they are executed. For example, if due to recursion the same transition is selected for a second time in the being generated test case, the corresponding value for $N(t, r)$ will reflect that now we are in the second level of recursion. These values are updated in $S_{bkp}^E$ and are considered a priori values (c.f section 2.4). In other words, a priori values are updated along the generation of a test case over the copy, and they guide the construction of the test case in a dynamic fashion.
Once a test case has been completely generated, we recover the original ETS specification, formerly $S_E$, and execute the test case. After the execution of the test case, values of $N(t, r)$ in $S_E$ are updated according to the execution sequence obtained a posteriori.
This cycle (i.e. test generation using a priori values, test execution to obtain a posteriori values, which are used as the initial values for the next iteration) is repeated until test cases with the desired coverage or cost are
obtained. This way, we construct dynamically test cases to cover those parts less adequately tested so far. This approach has been illustrated recently with a case study [4] and described extensively in [5].
Nevertheless, the algorithm in table 4 has two drawbacks:
(i) **Unnecessary cost increments**: the algorithm generates test cases with a tree structure introducing additional branches to cover non deterministic behaviours. When executing such test cases, they might examine certain parts of the implementation already tested, while others might not be covered enough, originating extra cost and decreasing effectiveness.
(ii) **Partial selection versus global selection**: the selection of $A_{MR}$, along the test case generation, has not considered any prediction level. This means that there could be cases where the chosen transitions have not been previously tested, but which drive to behaviours with a reduced impact over the global risk.
### Table 5
Generating test cases using prediction.
Given $S \in ETS(L)$, $i_p$, $l_{max}$ and $s_x = s_0$. A test case $tc$ of $S$ is:
$$tc := \{a; tc_a | \text{PathTr}(\varphi_{opt}) = a.a'\}$$
with $\varphi_{opt} \in \Gamma : \Gamma = \{\varphi \in \text{Path}(s_x) : |\varphi| \leq i_p\}$ that satisfy:
1. $MR_I(S, \varphi_{opt}) \geq MR_I(S, \varphi), \forall \varphi \in \Gamma$.
2. $|tc| \leq l_{max}$
3. Using $\text{PathTr}(tc) = \sigma.a$ we assign verdicts with:
a) if $L \in \text{Ref}(S, \sigma)$ then $v(tc) = \text{pass}$;
b) if $\{a\} \in \text{Ref}(S, \sigma)$ then $v(tc) = \text{inc}$;
c) if $\{a\} \notin \text{Ref}(S, \sigma)$ then $v(tc) = \text{fail}$;
being $MR_I(S, \varphi) = MR_{ini}(S, \varphi) + \frac{MR_{end}(S, \varphi)}{1+\alpha.N_{inc}}$ and divided in $\varphi = \varphi_{ini} \cdot \varphi_{inc}$ where $\varphi_{ini}$ is the initial subpath $\varphi$ without inc verdicts and $\varphi_{end}$ is the ending subpath from the first inc verdict. $\alpha \in [0, 1]$ is a parameter we may select and $N_{inc}$ is the number of verdicts inc that have appeared. We calculate:
$$MR_{ini}(S, \varphi) = \sum_{t \in \varphi_{ini}} R^r_{t}(t)$$
$$MR_{end}(S, \varphi) = \sum_{t \in \varphi_{end}} R^r_{t}(t)$$
$tc_a$ is the test case generated from the state $s_y$ such that $s_x - a \rightarrow s_y$. Therefore, we want to complement the possibility of generating test cases
with a tree structure, e.g., the ones appearing in table 4, with the generation of test cases oriented to check certain behaviours poorly tested so far. The algorithm presented in table 5 can be used in the later test phases when some specification parts still have a low level of coverage. In such table, function \texttt{PathTr(})(\varphi)\texttt{)} returns the trace \(\sigma \in \text{Tr}(P)\), obtained following path \(\varphi\). Again, during test case generation we must use a copy \((S_{E}^{dikp})\) to modify its a priori \(N(t, r)\) values. The main properties of this new algorithm are:
(i) We introduce a \textit{prediction parameter} \((i_p)\) and a \textit{maximum length} \((l_{\text{max}})\).
(ii) From state \(s' \in \text{Stat}(S)\) we evaluate the risk of all possible transition paths \(\varphi \in \text{Path}(s')\) such that \(|\varphi| \leq i_p\), i.e., paths with less length than the prediction parameter.
(iii) We choose the path \(\varphi_{opt}\) that, a priori, measures more risk. Concerning risk measurement, we take care of the presence of verdicts \texttt{inc} using the parameter \(\alpha \in [0, 1]\). This parameter allows to penalize test cases that may end without a conclusive verdict, but generating a cost. If \(\alpha = 0\) then the presence of inconclusive verdicts is not considered. If \(\alpha = 1\), we reduce the \textit{a priori risk measurement}, computing the risk contained in the nondeterministic sequence and dividing its value by\((1 + N_{\text{inc}})\). A typical initial value may be \(\alpha = 0.5\).
(iv) Once \(\varphi_{opt}\) has been chosen, we take its first transition \(t\) and update the value of \(N(t, r)\) in \(S\) to model its execution, advance to the next state and repeat step 2 until the test case \(tc\) reaches the desired length.
Changing the prediction parameter \(i_p\) we may tune the precision when generating the test case. With \(i_p = 1\) we have the same information than in the algorithm presented in table 4. With \(i_p = \infty\) we will choose the \textit{(a priori) best test case}. The price we have to pay when increasing the value of \(i_p\) is the computational cost needed to evaluate all possible paths from a given state and the inherent risk measurement computations. Our experience shows that \(i_p\) values around 3 to 5 are quite feasible and specification realistic.
\textbf{Example 3} Figure 4 shows the specification \(S\). The risk values estimated for its events are: \(R_a = 2\), \(R_b = 1\), \(R_c = 5\), \(R_d = 3\) y \(R_e = 1\). Considering there is no recursion, we select the next function to measure the risk:
\[ f_{MR}(e,n) = \frac{R_i(e)}{2^n} \]
Such function satisfies:
\[ R_f(e) = \sum_{n=1}^{\infty} R_i^{n}(e) = \sum_{n=1}^{\infty} f_{MR}(e,n) = \sum_{n=1}^{\infty} \frac{R_i(e)}{2^n} \]
Therefore, in every execution we measure part of the risk for an event, and the global risk we may measure is equal to the risk of failure for the event.
Using the algorithm that appears in table 4 we may select for the set \(A\) one of the sets \(\{a\}\), \(\{d\}\) or \(\{a,d\}\). Their respective values for risk measurement are:
• $f_{MR}(a, 1) = 2/2 = 1$
• $f_{MR}(d, 1) = 3/2 = 1.5$
• $\frac{f_{MR}(a, 1) + f_{MR}(d, 1)}{\text{Card}([a, d])} = \frac{1+1.5}{2} = 1.25$
Hence, using this algorithm, we would choose $A = \{d\}$. Following the steps described in table 4, we obtain the test case $tc_1$, which appears in figure 4.
On the other hand, we will use the predictive algorithm of table 5 with a prediction parameter $i_p = 2$. In the initial state of $S_{bkp}^E$ we must calculate all transition sequences of length $i_p = 2$ and determinate their risk measurement. There are three cases:
(i) $a; b$: with the risk measurement $f_{MR}(a, 1) + f_{MR}(b, 1) = 2/2 + 1/2 = 1.5$
(ii) $a; c$: with the risk measurement $f_{MR}(a, 1) + f_{MR}(c, 1) = 2/2 + 5/2 = 3.5$
(iii) $d; e$: with the risk measurement $f_{MR}(d, 1) + f_{MR}(e, 1) = 3/2 + 1/2 = 2$
As the bigger measurement of risk is present in the second option, we take its first transition, modify a priori the values of $N(t, r)$ in $S_{bkp}^E$ for that transition, advance to the next state and repeat the procedure.
After the first transition there are only two possibilities $b$ or $c$, both of length 1. We proceed to determinate their risk measurement, which are: $f_{MR}(b, 1) = 1/2 = 0.5$ and $f_{MR}(c, 1) = 5/2 = 2.5$. Therefore, we choose the transition with $b$ obtaining the test case $tc_2$ in figure 4.
The a priori global risk measurement for $tc_1$ is $MR_I(S, tc_1) = 2$ and for $tc_2$ is $MR_I(S, tc_2) = 3.5$. The second test case is clearly better than the first concerning risk measurement. Thus, if the election of transitions is done with a certain level of prediction we can take advice of the information that an enriched transition system offers.
4 Conclusions
We have presented in this paper an approach to testing supported by formal methods, which also includes non-formal heuristics to introduce the experience of the testing engineer to evaluate the costs of the testing process.
Our experience showed us that this approach, based on error weighting and cost values, provides a way to assign values to different test cases, which
permits to classify them according to different criteria, taking into account the desired coverage and supported cost. Test generation can be directed by these heuristics to obtain context-adapted test suites.
This proposal has been experimented recently with a practical case study: the testing of a protocol for mobile auctions in a distributed, wireless environment [4]. LOTOS was selected as the supporting formal language. Nevertheless, the ideas discussed here are not specific to LOTOS, but applicable to a wide range of formal techniques, with comparable expressive power.
References
|
{"Source-Url": "http://www.researchgate.net/profile/Juan_Burguillo-Rial/publication/220368947_Heuristic-driven_Techniques_for_Test_Case_Selection/links/0c960527fda7ddc075000000.pdf", "len_cl100k_base": 9344, "olmocr-version": "0.1.50", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 51408, "total-output-tokens": 10945, "length": "2e13", "weborganizer": {"__label__adult": 0.00043487548828125, "__label__art_design": 0.0004699230194091797, "__label__crime_law": 0.0004391670227050781, "__label__education_jobs": 0.0015382766723632812, "__label__entertainment": 8.624792098999023e-05, "__label__fashion_beauty": 0.00023353099822998047, "__label__finance_business": 0.00040221214294433594, "__label__food_dining": 0.0004322528839111328, "__label__games": 0.0008912086486816406, "__label__hardware": 0.0013551712036132812, "__label__health": 0.0009388923645019532, "__label__history": 0.0003495216369628906, "__label__home_hobbies": 0.00014984607696533203, "__label__industrial": 0.0005846023559570312, "__label__literature": 0.0005273818969726562, "__label__politics": 0.000308990478515625, "__label__religion": 0.0006089210510253906, "__label__science_tech": 0.07421875, "__label__social_life": 0.0001252889633178711, "__label__software": 0.00678253173828125, "__label__software_dev": 0.90771484375, "__label__sports_fitness": 0.00041961669921875, "__label__transportation": 0.00064849853515625, "__label__travel": 0.00024437904357910156}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 39291, 0.01953]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 39291, 0.46832]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 39291, 0.83915]], "google_gemma-3-12b-it_contains_pii": [[0, 1657, false], [1657, 4532, null], [4532, 7507, null], [7507, 10694, null], [10694, 13305, null], [13305, 16167, null], [16167, 17953, null], [17953, 20506, null], [20506, 23311, null], [23311, 25287, null], [25287, 28131, null], [28131, 30507, null], [30507, 33660, null], [33660, 35764, null], [35764, 38251, null], [38251, 39291, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1657, true], [1657, 4532, null], [4532, 7507, null], [7507, 10694, null], [10694, 13305, null], [13305, 16167, null], [16167, 17953, null], [17953, 20506, null], [20506, 23311, null], [23311, 25287, null], [25287, 28131, null], [28131, 30507, null], [30507, 33660, null], [33660, 35764, null], [35764, 38251, null], [38251, 39291, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 39291, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 39291, null]], "pdf_page_numbers": [[0, 1657, 1], [1657, 4532, 2], [4532, 7507, 3], [7507, 10694, 4], [10694, 13305, 5], [13305, 16167, 6], [16167, 17953, 7], [17953, 20506, 8], [20506, 23311, 9], [23311, 25287, 10], [25287, 28131, 11], [28131, 30507, 12], [30507, 33660, 13], [33660, 35764, 14], [35764, 38251, 15], [38251, 39291, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 39291, 0.06796]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
51275e653bff0edfeae332ea150b61d8cbf849ac
|
[REMOVED]
|
{"Source-Url": "https://staff.fnwi.uva.nl/c.u.grelck/publications/2010_IFL08_snet_rts.pdf", "len_cl100k_base": 10496, "olmocr-version": "0.1.50", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 58688, "total-output-tokens": 12432, "length": "2e13", "weborganizer": {"__label__adult": 0.00041747093200683594, "__label__art_design": 0.00043892860412597656, "__label__crime_law": 0.00032138824462890625, "__label__education_jobs": 0.00033164024353027344, "__label__entertainment": 8.016824722290039e-05, "__label__fashion_beauty": 0.0001885890960693359, "__label__finance_business": 0.00022125244140625, "__label__food_dining": 0.00040268898010253906, "__label__games": 0.000675201416015625, "__label__hardware": 0.00331878662109375, "__label__health": 0.0004773139953613281, "__label__history": 0.0003261566162109375, "__label__home_hobbies": 0.0001327991485595703, "__label__industrial": 0.0007033348083496094, "__label__literature": 0.00023186206817626953, "__label__politics": 0.00033020973205566406, "__label__religion": 0.0006861686706542969, "__label__science_tech": 0.0439453125, "__label__social_life": 6.592273712158203e-05, "__label__software": 0.005641937255859375, "__label__software_dev": 0.939453125, "__label__sports_fitness": 0.0004038810729980469, "__label__transportation": 0.0009622573852539062, "__label__travel": 0.0002727508544921875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 52874, 0.01253]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 52874, 0.48291]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 52874, 0.89191]], "google_gemma-3-12b-it_contains_pii": [[0, 2509, false], [2509, 5090, null], [5090, 8098, null], [8098, 11641, null], [11641, 15162, null], [15162, 17377, null], [17377, 19734, null], [19734, 22875, null], [22875, 23585, null], [23585, 25487, null], [25487, 28977, null], [28977, 30663, null], [30663, 34223, null], [34223, 36988, null], [36988, 39666, null], [39666, 41078, null], [41078, 44156, null], [44156, 46577, null], [46577, 49414, null], [49414, 52874, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2509, true], [2509, 5090, null], [5090, 8098, null], [8098, 11641, null], [11641, 15162, null], [15162, 17377, null], [17377, 19734, null], [19734, 22875, null], [22875, 23585, null], [23585, 25487, null], [25487, 28977, null], [28977, 30663, null], [30663, 34223, null], [34223, 36988, null], [36988, 39666, null], [39666, 41078, null], [41078, 44156, null], [44156, 46577, null], [46577, 49414, null], [49414, 52874, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 52874, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 52874, null]], "pdf_page_numbers": [[0, 2509, 1], [2509, 5090, 2], [5090, 8098, 3], [8098, 11641, 4], [11641, 15162, 5], [15162, 17377, 6], [17377, 19734, 7], [19734, 22875, 8], [22875, 23585, 9], [23585, 25487, 10], [25487, 28977, 11], [28977, 30663, 12], [30663, 34223, 13], [34223, 36988, 14], [36988, 39666, 15], [39666, 41078, 16], [41078, 44156, 17], [44156, 46577, 18], [46577, 49414, 19], [49414, 52874, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 52874, 0.12821]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
d10f6bba89ec0aada554a276d2e3ef83a30e9e60
|
B-Tropos
Agent-Oriented Requirements Engineering Meets Computational Logic for Declarative Business Process Modeling and Verification*
Volha Bryl¹, Paola Mello², Marco Montali², Paolo Torroni², and Nicola Zannone¹
¹ DISI, University of Trento – Via Sommarive 14, 38100 Povo (TN), Italy
{bryl,zannone}@disi.unitn.it
² DEIS, University of Bologna – V.le Risorgimento 2, 40136 Bologna, Italy
{pmello,mmontali,ptorroni}@deis.unibo.it
Abstract. The work presented in this paper stands at the intersection of three diverse research areas: agent-oriented early requirements engineering, business process requirements elicitation and specification, and computational logic-based specification and verification. The analysis of business requirements and the specification of business processes are fundamental steps in the development of information systems. The first part of this paper presents B-Tropos as a way to combine business goals and requirements with the business process model. B-Tropos enhances a well-known agent-oriented early requirements engineering framework with declarative business process-oriented constructs, inspired by the DecSerFlow and ConDec languages. In the second part of the paper, we show a mapping of B-Tropos onto SCIFF, a computational logic-based framework for properties and conformance verification.
1 Introduction
This work proposes an integration of different techniques for information systems engineering, with the aim to reconcile requirements elicitation with declarative specification, prototyping, and analysis inside a single unified framework.
In tackling the requirements elicitation part, we take an agent-oriented perspective. Modeling and analyzing requirements of IT systems in terms of agents and their goals is an increasingly popular approach [20] which helps understanding the organizational setting where a system operates, as well as modeling the stakeholders’ strategic interests, and finally documenting the rationale behind the design choices made. After system requirements elicitation is complete, one must define a corresponding business process. A very important issue that must be addressed at this stage is how to link the “strategic” business goals and
* This work has been partially funded by EU SENSORIA and SERENITY projects, by the national MIUR-FIRB TOCAL.IT and MIUR-PRIN 2005-011293 projects, and by the PAT MOSTRO project.
requirements with the business process model \cite{23}. Many problems arise from organizational theory and strategic management perspectives due to limits on particular resources (e.g., cost, time, etc.). Business strategies have a fundamental impact on the structure of enterprises leading to efficiency in coordination and cooperation within economic activities.
For our purpose, we have chosen Tropos \cite{8}, an agent-oriented software engineering methodology which uses the concepts of agent and goal from the early phases of the system development. Tropos has a number of interesting features, such as its goal- and agent-orientation, intuitive and expressive modeling notation, etc., which have made it to become popular. However, a drawback of Tropos and a number of similar methodologies is that they do not clearly define how to move from a requirements model to a business process model. For example, Tropos does not allow the modeling of temporal and data constraints between tasks assigned to agents: this means that when developing a business process, the corresponding Tropos model does not have enough information to define a temporal ordering between activities. Likewise, start and completion times, triggering events, deadlines, and many other aspects not necessarily related to the temporal dimension are essential elements in the description of a business process model, but they are not represented in Tropos models.
How to enhance Tropos with information that can be automatically used in the generation of a business process model is one of the aspects we address in this work. In particular, we have extended Tropos with declarative business process-oriented constructs, inspired by two recent graphical languages: DecSerFlow \cite{34} and ConDec \cite{33}. We enhance the characteristic goal-oriented approach of Tropos agents by introducing a high-level reactive, process-oriented dimension. We refer to the extended framework as to $B$-Tropos. Furthermore, we show how both these complementary aspects could be mapped onto the SCIFF language \cite{4}, which sits at the basis of a computational logic-based framework for the specification and verification of interaction protocols in open multi-agent systems. In the presentation of this work, we discuss the issue of time (ordering, deadlines, etc.) because it is an essential part of business process modeling, and because it is easy to explain by intuitive examples. However, $B$-Tropos is not only a temporal extension of Tropos, but it covers also the treatment of conditions on process input/output data and other constraints.
The marriage of $B$-Tropos with SCIFF sets a link between specification, prototyping and analysis: in fact, SCIFF specifications can be used to implement and animate logic-based agents \cite{1}, as well as to perform different verification tasks, such as properties verification \cite{2} and conformance verification of a given execution trace \cite{4}. Prototyping (animation) and analysis (properties and conformance verification) add value to $B$-Tropos and can make it appealing to a large set of potential users. Early requirements engineers and process engineers will be able to test their models directly and get an immediate picture of the system being developed. Engineers testing the properties of the models will not have to resort to ad-hoc, error-prone translations of high-level models into the languages used to feed specifications into model checkers, since $B$-Tropos can
directly generate SCIFF programs. Managers who need to monitor the correct behavior of a running system will have a SCIFF specification of the system generated out of a B-Tropos model automatically, and based on this specification they will be able to automatically check the compliance of the system using the SOCS-SI runtime and offline checking facilities.
In this work, we focus on specific aspects of this global vision. We define B-Tropos and the mapping of B-Tropos constructs onto the SCIFF framework. To make the discussion more concrete, the proposed approach is applied to modeling and analyzing an intra-enterprise organizational model, focusing on the coordination of economic activities among different units of an enterprise collaborating to produce a specific product. The organizational model is an excerpt of a large case study under consideration within the national FIRB TOCAI.IT project.
The structure of the paper is as follows. Section 2 briefly presents the Tropos methodology. Section 3 describes B-Tropos. The SCIFF framework is presented in Section 4 whereas Section 5 defines the mapping of B-Tropos concepts to SCIFF specifications. The paper ends with the overview of related work in Section 6 and conclusive remarks in Section 7.
2 The Tropos Methodology
Tropos is an agent-oriented software engineering methodology tailored to describe and analyze socio-technical systems along the whole development process from requirements analysis up to implementation. One of its main advantages is the importance given to early requirements analysis. This allows one to capture why a piece of software is developed, behind what or how.
The methodology is founded on models that use the concepts of actors (i.e., agent and roles), goals, tasks, resources, and social dependencies between two actors. An actor is an active entity that has strategic goals and performs actions to achieve them. A goal represents a strategic interest of an actor. A task represents a particular course of actions that produce a desired effect. A resource represents a physical or an informational entity without intentionality. A dependency between two actors indicates that one actor depends on another in order to achieve some goal, execute some task, or deliver some resource. The former actor is called the depender, while the latter is called the dependee. The object around which the dependency centers, which can be a goal, a task, or a resource, is called the dependum. In the graphical representation, actors are represented as circles; goals, tasks and resources are respectively represented as ovals, hexagons and rectangles; and dependencies have the form depender $\rightarrow$ dependum $\rightarrow$ dependee.
From a methodological perspective, Tropos is based on the idea of building a model of a system that is incrementally refined and extended. Specifically, goal
---
1 The TOCAI.IT project (RBNE05BFRK, [http://www.dis.uniroma1.it/~tocai/](http://www.dis.uniroma1.it/~tocai/)) is a three-year, 4.5 Ml euro project on “Knowledge-oriented technologies for enterprise aggregation in Internet.” It involves a consortium of 11 Italian universities, the National Research Council, and three industrial partners in the ICT, engineering, and manufacturing sectors.
Goal analysis consists of refining goals and eliciting new social relationships among actors. Goal analysis is conducted from the perspective of single actors using means-end analysis and AND/OR decomposition. Means-end analysis aims at identifying tasks to be executed in order to achieve a goal. Means-end relations are graphically represented as arrows without any label on them. AND/OR decomposition combines AND and OR refinements of a root goal or a root task into subparts. In essence, AND-decomposition is used to define the high-level process for achieving a goal or a task, whereas OR-decomposition defines alternatives for achieving a goal or executing a task. Fig. 1 presents the Tropos diagram representing an excerpt of the product development process studied in the course of the TOCAI project.
**Example 1.** Different divisions of a company have to cooperate in order to produce a specific product. The Customer Care division is responsible for deploying products to customers, which it refines into subgoals manufacture product, for which Customer Care depends on the Manufacturing division, and present product, for which it depends on the Sales division. In turn, Manufacturing
decomposes the appointed goal into subgoals define solution for product, for which it depends on the Research & Development (R&D) division, and make product that it achieves through task execute production line. To achieve goal define solution for product, R&D has to achieve goals provide solution, which it achieves by executing task design solution, evaluate solution, and deploy solution, which it achieves through task define production plan. The evaluation of the solution is performed in terms of costs and available resources. In order to evaluate costs, R&D executes task assess costs, which consists of calculate bill of quantities and evaluate bill of quantities. Moreover, this division depends on the Warehouse for the goal evaluate available resources. The Warehouse either queries the databases to find available resources or asks the Purchases division to buy resources from external Supplier. The Purchases division searches in company’s databases for possible Suppliers and selects the one who provides the best offer.
3 Towards Declarative Process-Oriented Annotations
How business processes can be obtained from requirements analysis is an urgent issue for the development of a system. Unfortunately, Tropos is not able to cope with this issue mainly due to the lack of temporal constructs. In this section we discuss how Tropos can be extended in order to deal with high-level process-oriented aspects. The proposed extensions intend to support designers in defining durations, absolute time and domain-based constraints for goals and tasks, as well as declaratively specifying relations between them. These extensions are based on the DecSerFlow and ConDec graphical languages for the declarative representation of service flows and flexible business processes. The enhanced Tropos is called B-Tropos.
3.1 Some Definitions
For the sake of clarity, we now give some informal definitions, which will be used to describe the Tropos extensions introduced in this section.
Definition 1 (Time interval). A time interval is a definite length of time marked off by two (non negative) instants \((T_{\min} \text{ and } T_{\max})\), which can be considered both in an exclusive or inclusive manner. As usually, we use parentheses \((\ldots)\) to indicate exclusion and square brackets \([\ldots]\) to indicate inclusion.
Definition 2 (Relative time interval). A time interval is relative if initial instant and final instant are defined in function of another instant. Given a time interval \(TI\) marked off by \(T_{\min}\) and \(T_{\max}\) and a time instant \(T\), two relative time intervals could be defined w.r.t. \(T\)
\[-TI^{-T}\] to denote the time interval marked off by \(T - T_{\max}\) and \(T - T_{\min}\).
\[-TI^{+T}\] to denote the time interval marked off by \(T + T_{\min}\) and \(T + T_{\max}\).
For example, \([10, 15]^{+T_1}\equiv[T_1 + 10, T_1 + 15]\) and \((0, 7)^{-T_2}\equiv(T_2 - 7, T_2)\).
Definition 3 (Absolute time constraint). An absolute time constraint is a constraint of the form \( T \ OP \ Date \), where \( T \) is a time variable, \( Date \) is a date and \( OP \in \{ at, after, after\ or\ at, before, before\ or\ at \} \) (with their intuitive meaning).
Definition 4 (Domain-based constraint). A domain-based constraint formalizes specific application domain requirements and is specified using CLP constraints \( [21] \) (e.g., \( >, <, =, \ etc. \)) or Prolog predicates.
Definition 5 (Condition). A condition is a conjunction of domain-based and absolute time constraints.
For example, condition \( T \ before\ or\ at \ 11.26.2007 \land workingDay(T) \) states that \( T \) has 26 November 2007 as deadline and that it must be a working day.
3.2 Tasks/Goals Extension
In order to support the modeling and analysis of process-oriented aspects of systems, we have annotated goals and tasks with temporal information such as start and completion times (the notation is shown in Fig. 2). Each task/goal can also be described in terms of its allowed duration \( ([D_{min}, D_{max}] \) in Fig. 2). This allows one to constrain, for instance, the completion time to the start time, i.e., completion time \( \in [D_{min}, D_{max}] + \) start time. Additionally, absolute temporal constraints can be used to define start and completion times of goals and tasks.
A goal/task can also be described in terms of the resources needed and produced by the goal/task itself. We represent the resources needed by a goal/task through attribute input and the resources produced by a goal/task through attribute output. Finally, tasks can be annotated with a fulfillment condition, which defines when they are successfully executed.
3.3 Process-Oriented Constraints
To refine a requirements model into a high-level and declarative process-oriented view, we have introduced different connections between goals and tasks, namely relation, weak relation, and negation (see Table 1). These connections allow
Table 1. Tropos extensions to capture process-oriented constraints (grouped negation connections share the same intended meaning, as described in [34]).
<table>
<thead>
<tr>
<th>relation</th>
<th>weak relation</th>
<th>negation</th>
</tr>
</thead>
<tbody>
<tr>
<td>responded presence</td>
<td><img src="image1" alt="Diagram" /></td>
<td><img src="image2" alt="Diagram" /></td>
</tr>
<tr>
<td>co-existence</td>
<td><img src="image4" alt="Diagram" /></td>
<td><img src="image5" alt="Diagram" /></td>
</tr>
<tr>
<td>response</td>
<td><img src="image7" alt="Diagram" /></td>
<td><img src="image8" alt="Diagram" /></td>
</tr>
<tr>
<td>precedence</td>
<td><img src="image10" alt="Diagram" /></td>
<td><img src="image11" alt="Diagram" /></td>
</tr>
<tr>
<td>succession</td>
<td><img src="image13" alt="Diagram" /></td>
<td><img src="image14" alt="Diagram" /></td>
</tr>
</tbody>
</table>
Designers to specify partial orderings between tasks under both temporal and domain-based constraints. To make the framework more flexible, connections are not directly linked to tasks but to their start and completion times. This solution, for instance, enables the representation of interleaving concurrency. A small circle is used to denote the connection source, which determines when the triggering condition is satisfied (co-existence and succession connections associate the circle to both end-points, since they are bi-directional).
Relation and negation connections are based on DecSerFlow [34] and ConDec [33] template formulas, extended with constraints on execution times (e.g., deadlines) as well as domain-based and absolute time constraints. Conditions can be specified on both start and completion times and are delimited by curly braces \( \{c\}, \{r\} \text{ and } \{cr\} \) in Table 1; the source condition is a triggering condition, whereas the target imposes restrictions on time and/or data.
The intended meaning of a responded presence relation is: if the source happens such that \( c \) is satisfied, then the target should happen and satisfy \( r \). The co-existence relation applies the responded presence relation in both directions, by imposing that the two involved tasks, when satisfying \( cr_1 \) and \( cr_2 \), should co-exist (namely either none or both are executed).
Other relation connections extend the responded presence relation by specifying a temporal ordering between source and target events; optionally, a relative time interval (denoted with \( T_b \) in Table 1) could be attached to these connections, bounding when the target is expected to happen with respect to the time at which the source happened. In particular, the response relation constrains the target to happen after the source. If \( T_b \) is specified, the minimum and maximum times are respectively treated as a delay and a deadline, that is, the target should occur between the minimum and the maximum time after the source \( (target time \in T_b^{source time}) \). The precedence relation is opposite to response relation, in the sense that it constrains the target to happen before the
\[^2\] If \( T_b \) is not specified, the default interval is \((0, \infty)\).
source. A *succession* relation is used to mutually specify that two tasks are the response and precedence of each other. By mixing different relation connections, we can express complex temporal dependencies and orderings, such as Allen’s intervals [5] (Fig. 3). For example, Allen’s *duration* relation is formalized by imposing that A’s start should happen after B’s start and A’s completion should happen before B’s completion (Fig. 3(a)), whereas *meets* relation is formalized by imposing that A’s completion should be equal to B’s start (Fig. 3(b)).
As in DecSerFlow and ConDec, we assume an open approach. Therefore, we have to explicitly specify not only what is expected, but also what is forbidden. These “negative” dependencies are represented by negation connections, the counter-part of relation connections. For example, negation co-existence between two tasks states that when one task is executed, the other task shall never be executed, neither before nor after the source.
Summarizing, through relation and negation connections designers can add a horizontal declarative and high level process-oriented dimension to the vertical goal-directed decomposition of goals and tasks. It is worth noting that, in presence of OR decompositions, adding connections may affect the semantics of the requirements model. The decomposition of task A in Fig. 4(a) shows that its subtask C can be satisfied by satisfying D or E. On the contrary, the response relation between B’s completion and D’s start makes D mandatory (B has to be performed because of the AND-decomposition, hence D is expected to be performed after B). This kind of interaction is not always desirable. Therefore, we have introduced weak relation connections with the intent of relaxing relation connections. Their intended meaning is: whenever both the source and the target happen and the trigger condition is satisfied, the target must satisfy the restriction condition. The main difference between relations and weak relations is that in weak relations the execution is constrained a posteriori, after both source and target have happened. Differently from Fig. 4(a) in Fig. 4(b) the response constraint between B and D should be satisfied only if D is executed.
Finally, B-Tropos permits to constrain non-leaf tasks, leading to the possibility of expressing some process-oriented patterns \[35\]. For instance, a relation connection whose source is the completion of a task, which is AND-decomposed into two subtasks, triggers when both subtasks have been executed. Therefore, the connection resembles the concept of a synchronizing merge on the leaf tasks.
To show how process-oriented constraints could be added to a Tropos model, we extend a fragment of the diagram represented in Fig. 1 the result is shown in Fig. 5. The first extension concerns the decomposition of task **assess costs**: the bill of quantities can be evaluated only after having been calculated. Such a constraint could be modeled in B-Tropos by (1) indicating that the calculation produces a bill of quantities, whereas the evaluation takes a bill as an input, and (2) attaching a response relation connection between the completion of task **calculate bill of quantities** and the start of task **evaluate bill of quantities**. The second extension has the purpose of better detailing task **find resources in Warehouse**, namely representing that (1) task duration is at least 10 time units, (2) the task produces as an output a datum (called **Found**), which describes whether or not resources have been found in the Warehouse, and (3) the task is considered fulfilled only if resources have been actually found, that is, **Found** is equal to **yes**. Finally, one can notice the absence of constraints between goals **evaluate costs** and **evaluate resources**. Such an absence enables the two sets of activities aimed at achieving those goals to be executed concurrently.
### 4 SCIFF
SCIFF \[34\] is a formal framework based on abductive logic programming \[22\], developed in the context of the SOCS project\[3\] for specifying and verifying interaction protocols in an open multi-agent setting. SCIFF introduces the concept of event as an atomic observable and relevant occurrence triggered at execution.
time. The designer has the possibility to decide what has to be considered as an event; this generality allows him to decide how to model the target domain at the desired abstraction level, and to exploit SCIff for representing any evolving process where activities are performed and information is exchanged.
We distinguish between the description of an event, and the fact that an event has happened. Happened events are represented as atoms \( H(Ev, T) \), where \( Ev \) is a term and \( T \) is an integer, representing the discrete time point at which the event happened. The set of all the events happened during a protocol execution constitutes its log (or execution trace). Furthermore, the SCIff language supports the concept of expectation as first-class object, pushing the user to think of an evolving process in terms of reactive rules of the form “if \( A \) happened, then \( B \) is expected to happen”. Expectations about events come in form \( E(Ev, T) \) where \( Ev \) and \( T \) are variables, eventually grounded to a particular term/value.
The binding between happened events and expectations is given by means of Social Integrity Constraints (ICs). Such constraints are forward rules of the form \( Body \rightarrow Head \), where \( Body \) can contain literals and (conjunctions of happened and expected) events and \( Head \) can contain (disjunctions of) conjunctions of expectations. CLP constraints and Prolog predicates can be used to impose relations or restrictions on any of the variables, for instance, on time (e.g., by expressing orderings or deadlines). Intuitively, \( IC \) allows the designer to define how an interaction should evolve, given some previous situation represented in terms of happened events; the static knowledge of the target domain is instead formalized inside the SCIff Knowledge Base. Here we find pieces of knowledge on the interaction model as well as the global organizational goal and/or objectives of single participants. Indeed, SCIff considers interaction as goal-directed, i.e., it envisages environments in which each actor as well as the overall organization could have some objective only achievable through interaction; by adopting such a vision, the same interaction protocol could be seamlessly exploited for achieving different strategic goals. This knowledge is expressed in the form of clauses (i.e., a logic program); a clause body may contain expectations about the behavior of participants, defined literals, and constraints, while their heads are atoms. As advocated in [17], this vision reconciles in a unique framework forward reactive reasoning with backward, goal-oriented deliberative reasoning.
In SCIff an interaction model is interpreted in terms of an Abductive Logic Program (ALP) [22]. In general, an ALP is a triple \( \langle P, A, IC \rangle \), where \( P \) is a logic program, \( A \) is a set of predicates named abducibles, and \( IC \) is a set of Integrity Constraints. Roughly speaking, the role of \( P \) is to define predicates, the role of \( A \) is to fill in the parts of \( P \) that are unknown, and the role of \( IC \) is to control the way elements of \( A \) are hypothesized, or “abducted”. Reasoning in abductive logic programming is usually goal-directed, and accounts for finding a set of abducted hypotheses \( \Delta \) built from predicates in \( A \) such that \( P \cup \Delta \models G \) (being \( G \) a goal) and \( P \cup \Delta \models IC \). The idea underlying SCIff is to adopt abduction to dynamically generate the expectations and to perform the conformance checking between expectations and happened events (to ensure that they are following the interaction model). Expectations are
defined as abducibles: the framework makes hypotheses about how participants should behave. Conformance is verified by trying to confirm the hypothesized expectations: a concrete running interaction is evaluated as conformant if it fulfills the specification. Operationally, expectations are generated and verified by the SCIFF proof procedure,[4] a transition system which has been proved sound and complete with respect to the declarative semantics [4]. The proof procedure is embedded within SOCS-SI [3], a JAVA-based tool capable of accepting different event sources (or previously collected execution traces) and checking if the actual behavior is conformant with respect to a given SCIFF specification.
5 Mapping B-Tropos Concepts to the SCIFF Framework
In this section we present the mapping of B-Tropos concepts into SCIFF specifications, briefly describing how the obtained formalization is used to implement the skeleton of logic-based agents. The idea behind the mapping is to define a formal statement in SCIFF for each B-Tropos graphical element. This allows for the automatic generation of SCIFF specifications from B-Tropos models.
Table 2 summarizes the formalization of the goal-oriented part of B-Tropos in SCIFF. This part represents the static knowledge of the application domain, so it is modeled inside the SCIFF knowledge base. Two fundamental concepts are goal achievement and task execution. These concepts are modeled in SCIFF using the 6-ary predicates achieve and execute. Intuitively, achieve($x, g, t_i, t_f, i, o$) is true if actor $x$ achieves goal $g$ where $t_i$ is the start time and $t_f$ is the completion time. execute($x, a, t_i, t_f, i, o$) holds if actor $x$ executes task $a$ where $t_i$ and $t_f$ are start and completion time, respectively. Parameters $i$ and $o$ represent the resources respectively needed and produced by the execution of the task or achievement of a goal. Start and completion times should satisfy both duration and absolute time constraints (ac in Table 2) eventually associated to a goal/task.
The execution of tasks is also determined by the satisfaction of fulfillment conditions and the generation of task start and completion events. These events are represented using literals of the form event($ev, x, a, r$) where $ev \in \{start, end\}$, $a$ is the task that has generated the event, $x$ is the actor who has executed the task, and $r$ is a list of resources. In particular, resources associated with start events represent the input of the task, whereas resources associated with completion events refer to the output.
In some cases the designer may prefer to keep the model at an abstract level, so goals can be neither refined nor associated to tasks. Abduction allows us to face such a lack of information by reasoning on goal achievement in a hypothetical way. In particular, we have introduced a new abducible called achieved to hypothesize that the actor has actually reached the goal.
Tropos relations are then formalized in SCIFF as rules on the basis of the following concepts:
---
Table 2. Mapping of the goal-oriented proactive part of $\mathcal{B}$-Tropos onto SCIFF
<table>
<thead>
<tr>
<th></th>
<th>AND/OR-decompositions and means-end are trivially translated to SCIFF.</th>
</tr>
</thead>
<tbody>
<tr>
<td>Leaf goal</td>
<td>achieve($X, G, T_i, T_f, I, O$) ← achieved($X, G, T_i, T_f, I, O$), $T_f \in [D_{\text{min}}, D_{\text{max}}] + T_i, ac_1, ac_2$.</td>
</tr>
<tr>
<td>Leaf task</td>
<td>execute($X, A, T_i, T_f, I, O$) ← $E$(event(start, $X, A, I$), $T_i$), $E$(event(end, $X, A, O$), $T_f$), $T_f \in [D_{\text{min}}, D_{\text{max}}] + T_i, ac_1, ac_2$, fulfillment condition.</td>
</tr>
<tr>
<td>AND decomposition</td>
<td>achieve($X, G, T_i, T_f, I, O$) ← achieve($X, G_1, T_i, T_f, I_1, O_1$), ..., achieve($X, G_n, T_i, T_f, I_n, O_n$), $T_i = \min{T_{i1}, ..., T_{in}}$, $T_f = \max{T_{f1}, ..., T_{fn}}$, $I = I_1 \cup ... \cup I_n$, $O = O_1 \cup ... \cup O_n$.</td>
</tr>
<tr>
<td></td>
<td>execute($X, A, T_i, T_f, I, O$) ← execute($X, A_1, T_i, T_f, I_1, O_1$), ..., execute($X, A_n, T_i, T_f, I_n, O_n$), $T_i = \min{T_{i1}, ..., T_{in}}$, $T_f = \max{T_{f1}, ..., T_{fn}}$, $I = I_1 \cup ... \cup I_n$, $O = O_1 \cup ... \cup O_n$.</td>
</tr>
<tr>
<td>OR decomposition</td>
<td>achieve($X, G, T_i, T_f, I, O$) ← achieve($X, G_1, T_i, T_f, I, O$), ... achieve($X, G_n, T_i, T_f, I, O$).</td>
</tr>
<tr>
<td></td>
<td>execute($X, A, T_i, T_f, I, O$) ← execute($X, A_1, T_i, T_f, I, O$), ... execute($X, A_n, T_i, T_f, I, O$).</td>
</tr>
<tr>
<td>Means-end</td>
<td>achieve($X, G, T_i, T_f, I, O$) ← execute($X, A, T_i, T_f, I, O$).</td>
</tr>
<tr>
<td>Goal dependency</td>
<td>achieve($X, G, T_i, T_f, I, O$) ← $E$(delegate($X, Y, G, T_f$), $T_d$), $T_d > T_i, T_d < T_f$.</td>
</tr>
<tr>
<td>Task dependency</td>
<td>execute($X, A, T_i, T_f, I, O$) ← $E$(delegate($X, Y, A, T_f$), $T_d$), $T_d > T_i, T_d < T_f$.</td>
</tr>
</tbody>
</table>
- AND/OR-decompositions and means-end are trivially translated to SCIFF.
- In goal (task) dependencies, it is expected that the depender appoints the dependee to achieve a goal (execute a task) before a certain time instant. To this end, we have introduced event $\text{delegate}(x, y, g, t)$ to indicate that actor $x$ delegates the achievement of goal $g$ to actor $y$ and $y$ have to achieve $g$ by time $t$. A delegation is observable and so it is kept trace of in the execution trace.
Table 3. Mapping of B-Tropos response connections onto SCIff
<table>
<thead>
<tr>
<th>Response</th>
<th>Expression</th>
</tr>
</thead>
<tbody>
<tr>
<td>Weak Response</td>
<td>( \text{hap}(\text{event}(Ev, X_1, A_1, R_1), T_1) \land c ) \rightarrow \text{exp}(\text{event}(Ev, X_2, A_2, R_2), T_2) \land r \land T_2 \in T^{+T_1}_b. )</td>
</tr>
<tr>
<td>Negation Response</td>
<td>( \text{hap}(\text{event}(Ev, X_1, A_1, R_1), T_1) \land c ) \land \text{hap}(\text{event}(Ev, X_2, A_2, R_2), T_2) \rightarrow r \land T_2 \in T^{+T_1}_b. )</td>
</tr>
</tbody>
</table>
The reactive part of B-Tropos encompasses both the reaction to a dependency and process-oriented constraints. As already pointed out, process-oriented constraints are inspired by DecSerFlow/ConDec template formulas, for which a preliminary mapping to SCIff has been already established [11]. Connections are translated using ICS. For the sake of space, we refer to [4] for a detailed description on how SCIff handles constraints. Here we present some examples of how process-oriented constraints are formalized (Table 3). Such formulas specify the informal description given in Section 3. Response connection constraint states that if the source, \( \text{event}(Ev, X_1, A_1, R_1) \), happens and the trigger condition, \( c \), is satisfied, then the target, \( \text{event}(Ev, X_2, A_2, R_2) \), is expected to happen and the restriction condition imposed on the target, \( r \), must be satisfied. In addition, the target is expected to occur within \( T^{+T_1}_b \). Weak response constraints are verified a posteriori. In particular, when the connected events happen and the triggering condition is satisfied, the restriction imposed by the target must be satisfied. Similarly to response connections, the constraint is verified if the target event occurs within \( T^{+T_1}_b \). Negative response constraints spot an inconsistency when the connected events happen and all conditions are satisfied.
We remark that the framework allows one to constrain non-leaf tasks and goals, but only start and completion events of leaf tasks are considered as observable events. To address this issue, we have introduced intensional predicates hap and exp to represent the happening and expectation of (possibly) composite events. For instance, a leaf task starts (or is completed) only if there is evidence for it (i.e., the corresponding event happened). Accordingly, for a leaf-task \( A \):
\[
\text{hap}(\text{event}(Ev, X, A, R), T) \leftarrow H(\text{event}(Ev, X, A, R), T).
\]
\[
\text{exp}(\text{event}(Ev, X, A, R), T) \leftarrow E(\text{event}(Ev, X, A, R), T).
\]
Composite events recursively follow the goal analysis approach:
- the start/completion of an OR-decomposed task happen (resp. is expected to happen) when one of its (sub)tasks start/completion happens (resp. is expected to happen);
- the start of an AND-decomposed task happens (resp. is expected to happen) when its first (sub)task starts (resp. expected to start);
- the completion of an AND-decomposed task happens (resp. is expected to happen) when its last (sub)task is completed (expected to be completed).
To model the reaction to a dependency, we assume that when a dependee $Y$ receives from a depender $X$ a request for achieving a goal $G$, $Y$ reacts by undertaking the commitment of achieving $G$:
$$H(\text{delegate}(X,Y,G,T_f), T_d) \rightarrow \text{achieve}(Y,G,T_i,T_f,I,O) \land T_i > T_d.$$
The provided formalization can be used to directly implement the skeleton of logic-based agents, as for example the ones described in [1]. Such agents follow the Kowalsky-Sadri cycle for intelligent agents, by realizing the think phase with the SICIF proof-procedure and the observe and act phases in JADE. The proof-procedure embedded in SICIF-agents is equipped with the possibility to transform expectations about the agent into happened events, and with a selection rule for choosing a behavior when several choices are available. In particular, each actor represented in a $B$-Tropos model can be mapped into a SICIF-agent whose deliberative pro-active part (formalized in the agent’s knowledge base) is driven by the goal/task decomposition of its root goal, and whose reactive behavior (formalized as a set of ICs) is determined by the delegation mechanism and the process-oriented constraints. The agent that wants to achieve the global goal (e.g., Customer Care in Fig. 1) starts by decomposing it, whereas other agents wait until an incoming request is observed. In this case, the dependency reactive rule of the agent is triggered, and the agent attempts to achieve the assigned goal. This goal may be either decomposed or delegated to other agents until expectations proving its achievement are generated. Such expectations thus are transformed to happened events, that is, actions performed by the agent.
Figure 6 presents the SICIF formalization corresponding to the $B$-Tropos diagram of Fig. 5. Here Research & Development and Warehouse are respectively represented as r&d and wh, and symbol $\equiv$ is used to denote unification. In that figure one can see how the formalized SICIF specification is assigned to the Warehouse and R&D units. To have an intuition about how the two agents act and interact, let us consider the case in which the R&D unit intends to achieve the goal assigned by the Manufacturing division. The unit decomposes goal evaluate solution in its subparts until a set of expectations, which lead to the achievement of the goal, is determined. Below we list a possible set of expectations:
- $E(\text{event}(\text{start}, r&d, \text{calc bill}, [], T_{scb}), \ldots$,
- $E(\text{event}(\text{end}, r&d, \text{calc bill}, [\text{Bill}]), T_{ccb})$, $T_{ccb} > T_{scb}$,
- $E(\text{event}(\text{start}, r&d, \text{eval bill}, [\text{Bill}]), T_{seb}), T_{seb} > T_{ccb}$,
- $E(\text{event}(\text{end}, r&d, \text{eval bill}, []), T_{ceb}), T_{ceb} > T_{seb}$,
- $E(\text{delegate}(r&d, wh, \text{eval resources}, T_{cer}), T_{ser})$.
This set of expectations can be read as an execution plan, consisting of two concurrent parts: (1) a sequence of events related to start/completion of leaf tasks, ordered by the response relation which constrains the bill calculation and evaluation; (2) the delegation of resources evaluation, which should be communicated
5 For the sake of brevity we do not present here the reaction rule for task dependency that has the same intuition as the one for goal dependency.
to the Warehouse. In particular, when the expectation about the delegation is transformed to a happened event by the R&D agent, the Warehouse agent is committed to achieve the delegated goal inside the time interval \( (T_{ser}, T_{cer}) \). It is worth noting that the framework can identify inconsistencies in temporal and/or data requirements specification by means of unsatisfiable constraints. This is, for instance, the case in which the R&D unit requires an evaluation of the availability of resources, e.g., in 5 time units, whereas the Warehouse needs at least 10 time units to verify the presence of resources. In these situations, the designer needs either to relax constrains (e.g., extending the time) or to adopt new solutions for increasing the performance of the system (e.g., providing the Warehouse with a more efficient search application).
Besides the implementation of logic-based agents, SCIFF can also be used to perform different kinds of verification, namely performance verification and conformance verification. Performance verification aims at proving that stakeholders can achieve their strategic goals in a given time. Such verification can also be used to evaluate different design alternatives in terms of system performance. For
example, one could ask SCIFF to verify whether an execution exists such that
the top goal of one of the stakeholders is achieved within a given deadline. SCIFF
will then try to generate such an execution, by means of an intensional (i.e., par-
tially specified) execution trace; generally speaking, this is achieved by transform-
ing expectations into happened events. Conformance verification \cite{4} is related to
the auditing measures that can be adopted for monitoring the activities performed
by actors within the system. The idea underlying conformance verification is to
analyze system logs and compare them with the design of the system, to verify
whether the actual behavior of a system effectively complies with model expecta-
tions. This allows system administrators to understand whether or not stakehold-
ers have achieved their goals and, if it is not the case, to predict future actions.
6 Related Work
While the literature on single aspects of the framework is huge (many ref-
erences can be found to the papers describing Tropos, SCIFF, and DecSer-
Flow/CondDec), not much work has been done at the intersection of the corre-
sponding domains. Several formal frameworks have been developed to support
the Tropos methodology. For instance, Giorgini et al. \cite{19} proposed a formal
framework based on logic programming for the analysis of security requirements.
However, the framework does not take into account temporal aspects of the sys-
tem. In \cite{9} a planning approach has been proposed to analyze and evaluate design
alternatives. Though this framework explores the space of alternatives and deter-
mines a (sub-)optimal plan, that is, a sequence of actions, to achieve the goals of
stakeholders, it is limited in defining temporal constraints among tasks. Fuxman
et al. \cite{18} proposed Formal Tropos that extends Tropos with annotations that
characterize the temporal evolution of the system, describing, for instance, how
the network of relationships evolves over time. Formal Tropos provides a tempo-
ral logic-based specification language for representing Tropos concepts together
with temporal constructs, which are verified using a model-checking technique
such as the one implemented in NuSMV. This framework has been used to verify
the consistency of requirements models \cite{18} as well as business processes against
business requirements and strategic goal model \cite{23}. However, Formal Tropos
does not support abduction, and thus, it is not able to generate expectations
and perform conformance checking between expectations and happened events.
Finally, we mention the work by Cares et al \cite{10}, who proposed to implement
software agents in Prolog starting from Tropos models. In particular, they pro-
posed to specify the programming activation time through four implementation
attributes, namely at begin, at end, at call, and always. The difference with our
proposal lies in the generation of implementation besides the employed tempo-
ral constructs. Actually, they do not provide an encoding of Tropos models into
Prolog so that the implementation is manual.
The last years have seen the need for bridging the gap between requirements
engineering and business process design by providing support for developing busi-
ness processes on top of requirements models and verifying whether a business
process actually meets its business goals. For instance, Lapochnian et al. [25] proposed a systematic requirements-driven approach for business process design and configuration management, which adopts goal models to capture alternative process configurations. Differently from our work, they do not consider the relationships between agents so that framework is inadequate to describe business processes spanning across multi-agent systems. Frankova et al. [16] have used the SI* modeling language [27], an extension of Tropos addressing security and privacy issues, as a basis for the definition of Secure BPEL, a specification language that extends WS-BPEL [6] for modeling secure business processes. The objective of this framework is to assist business process analysts in deriving the skeleton of secure business processes from early requirements analysis. Finally, López et al. [26] presented a reasoning method for verifying the consistency between SI* models and BPMN specifications [7]. In particular, the authors have investigated the connection between business processes and requirements models, introducing the notion of goal equivalence based on trace semantics.
Several works also attempt to define a formal semantics underlying graphical business process models and to design agent systems. In the business process domain, Wong et al. [37] provided a formal semantics for a subset of BPMN in terms of the process algebra CSP [30], whereas Dijkman et al. [15] used Petri Nets [28]. Their objective is to formally analyze and compare business process models. We differ from these proposals since the objective of our work is to provide a requirements-driven framework for business process and agent system design. The use of computational logic for the flexible specification and rigorous verification of agent interaction is adopted by many proposals. While other works (e.g., [36]) use temporal logic to model the temporal dimension of interaction, SCIFF exploits a constraint solver and adopts an explicit representation of time.
Event Calculus [24] was introduced by Kowalsky and Sergot as a logic programming formalism for representing events and their effects. This formalism explicitly reasons upon properties (fluenst) holding during time intervals. Differently from Event Calculus, our framework treats time like other variables, in association with domains, which makes it possible to express constraints (e.g., deadlines) and to exploit an underlying constraint solver. Among the works based on Event Calculus, we cite the work by Shanahan [32], who proposed the abductive event calculus that includes the concept of expectation, and the work by Cicekli et al. [12], who formalized workflows using Event Calculus. In Shanahan’s work events and expectations are of the same nature and both are abduced, while our expectations should match the actual events. This is due to the different underlying assumptions and, consequently, the different focus: while we assume that the history is known, Shanahan proposes to abduce events. Similarly to [15,37], Cicekli et al. focus on the execution of business processes, whereas the reconciliation between a business process and the business goals that have motivated the process definition are completely ignored.
Finally we mention that a mapping of DecSerFlow into Linear Temporal Logic (LTL) [29] has been proposed in [34]. It can be used to verify or enforce conformance of service flows and also to directly enact their execution. The advantages of using
SCIFF instead of LTL is that SCIFF can handle time and data in an explicit and quantitative way, exploiting CLP to define temporal and data-related constraints.
7 Conclusions
In this work we have proposed to integrate a number of techniques for information systems engineering, with the aim to reconcile requirements elicitation with specification, prototyping and analysis, inside a single unified framework. We have presented $B$-Tropos, an extension of Tropos with declarative process-oriented constraints, and its mapping into the SCIFF language. We have mainly focused on the modeling and mapping of aspects related to declarative business processes using connections inspired by DecSerFlow and ConDec languages. Augmenting a Tropos model with such constraints has the effect that both the proactive and the reactive, process-oriented agent behavior could be captured within the same diagram.
The mapping of $B$-Tropos onto SCIFF makes it possible to directly implement logic-based agents starting from the enhanced Tropos model, as well as to perform different kinds of verification, namely to check if the model satisfies a given property and to monitor if the execution trace of a real system is actually compliant with the model.
The work presented here is a first step towards the integration of a business process in the requirements model. We are currently running experiments on prototyping as well as on property and conformance verification. Some results are presented in [13], where $B$-Tropos models are also used to generate possible executions traces, and to animate agents in the context of the CLIMA Contest Food Collection problem [14], in line with the aforementioned work by Cares and colleagues [10]. We are also investigating in depth the formal properties of our proposed mapping, and are trying to understand how to better exploit the underlying SCIFF constraint solver by introducing more complex scheduling and resource constraints so as to capture more detailed business requirements and agent interactions. As a future activity, we plan to investigate the generation of executable business process specifications (such as WS-BPEL) from $B$-Tropos models. Another direction under investigation concerns business process compliance. In particular, we are interested in the problem of the interplay between business and control objectives during business process design [31]. Finally, we intend to conduct empirical studies on large scale, industrial size case studies for a practical evaluation of the framework.
References
|
{"Source-Url": "http://lia.deis.unibo.it/~pt/Publications/climaVIIIa.pdf", "len_cl100k_base": 11056, "olmocr-version": "0.1.53", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 58547, "total-output-tokens": 14261, "length": "2e13", "weborganizer": {"__label__adult": 0.00031185150146484375, "__label__art_design": 0.0006322860717773438, "__label__crime_law": 0.00034356117248535156, "__label__education_jobs": 0.0013704299926757812, "__label__entertainment": 0.00011301040649414062, "__label__fashion_beauty": 0.00019466876983642575, "__label__finance_business": 0.0009012222290039062, "__label__food_dining": 0.00041794776916503906, "__label__games": 0.0007319450378417969, "__label__hardware": 0.0006990432739257812, "__label__health": 0.0004818439483642578, "__label__history": 0.00031280517578125, "__label__home_hobbies": 0.00012242794036865234, "__label__industrial": 0.0006856918334960938, "__label__literature": 0.0004167556762695313, "__label__politics": 0.0003325939178466797, "__label__religion": 0.0004429817199707031, "__label__science_tech": 0.0843505859375, "__label__social_life": 0.00011223554611206056, "__label__software": 0.01311492919921875, "__label__software_dev": 0.89306640625, "__label__sports_fitness": 0.00023674964904785156, "__label__transportation": 0.0006113052368164062, "__label__travel": 0.00019872188568115232}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 55626, 0.02427]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 55626, 0.48464]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 55626, 0.88971]], "google_gemma-3-12b-it_contains_pii": [[0, 2401, false], [2401, 5905, null], [5905, 9189, null], [9189, 10388, null], [10388, 13328, null], [13328, 15345, null], [15345, 18274, null], [18274, 20518, null], [20518, 22549, null], [22549, 26275, null], [26275, 29451, null], [29451, 31722, null], [31722, 34975, null], [34975, 38320, null], [38320, 39583, null], [39583, 42938, null], [42938, 46468, null], [46468, 49563, null], [49563, 52955, null], [52955, 55626, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2401, true], [2401, 5905, null], [5905, 9189, null], [9189, 10388, null], [10388, 13328, null], [13328, 15345, null], [15345, 18274, null], [18274, 20518, null], [20518, 22549, null], [22549, 26275, null], [26275, 29451, null], [29451, 31722, null], [31722, 34975, null], [34975, 38320, null], [38320, 39583, null], [39583, 42938, null], [42938, 46468, null], [46468, 49563, null], [49563, 52955, null], [52955, 55626, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 55626, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 55626, null]], "pdf_page_numbers": [[0, 2401, 1], [2401, 5905, 2], [5905, 9189, 3], [9189, 10388, 4], [10388, 13328, 5], [13328, 15345, 6], [15345, 18274, 7], [18274, 20518, 8], [20518, 22549, 9], [22549, 26275, 10], [26275, 29451, 11], [29451, 31722, 12], [31722, 34975, 13], [34975, 38320, 14], [38320, 39583, 15], [39583, 42938, 16], [42938, 46468, 17], [46468, 49563, 18], [49563, 52955, 19], [52955, 55626, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 55626, 0.10329]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
90ac036b6da6995c01c409b864f7163a24fb2ca5
|
Methods and Problems in Coding Natural Language Survey Data
Rodger Knaus, American University
1. Summary
Experiments with computerized coding of natural language data indicate that agreement of 80% or better can be achieved between computerized coding and the codes assigned by experts; such performance is comparable to that achieved by entry-level coding clerks. This paper presents a class of algorithms for computerized coding based on representing semantic information in natural language constructs as vectors over the set of codes to be assigned.
2. Overview
2.1 The Natural Language Coding Problem
Natural language is useful as a response medium in surveys on subject areas such as employment, household expenditures, or health and safety. In areas such as these, the variable being investigated (e.g., what kind of job the respondent has) set of possible responses is very large. For such large domains, natural language has the following advantages:
ECONOMY IN THE QUESTION SET: No reasonably sized set of multiple choice or other artificial response medium questions can solicit such complete information as simple natural language questions such as "Where do you work?" and "What do you do?";
OBJECTIVITY OF THE SURVEY QUESTIONS: Artificially structured questions also impose the surveyor's view of the subject upon the respondent to a greater degree than natural language questions, because the questions allow the respondent to answer in a way that reflects the respondent's perception of the subject matter rather than the surveyor's;
GENERALITY OF THE RESPONSE DATA: Should one wish to reanalyze the survey data, using, for example, a different partition of the set of possible responses, natural language responses contain the information for such a reexamination, while artificial response data usually does not;
SIMPLICITY FOR THE RESPONDENT: Natural language reduces the effort for a respondent in completing a questionnaire, because both the question and response are in a medium already familiar to the respondent. This allows instructions on how to complete the survey to be simplified. Because a single natural language question captures a large amount of information, the use of natural language responses probably also shortens a survey.
Against the advantages of NL data listed above must be weighted the difficulty in analyzing natural language data. For responses to multiple choice or other questions with a small fixed set of possible responses, there is a simple correspondence between the form of a response and its meaning. No such simple relation between form and meaning exists for natural language. Natural language (NL) data is often coded as a first step in the statistical analysis of such data. In the coding process, each response is assigned a value from some finite set (call it C) of codes. Each value, or code, in this code set represents a distinct response for the purpose of analyzing the NL data; conversely, all NL responses mapped to the same member of C are considered identical for subsequent analysis of the survey data. Coding is thus the process of filtering out the variations of linguistic expression from responses which, for the purposes of a survey, represent the same response.
In the past and in most current surveys, natural language responses have been coded by persons who assigned codes to the responses. For the large surveys, such as the U.S. monthly Current Population Survey (about 70,000) or the decennial population census (17 million responses), this hand coding is expensive, time-consuming and error-prone. These problems with human coding have motivated research in computerized coding at statistical agencies in the U.S., Sweden and elsewhere.
2.2 The Coding Algorithm
Computerized coding of natural language data can be carried out in the following way: If C is set of codes which are to be assigned to survey responses, each recognizable linguistic construct L is represented for the purposes of coding as a vector over C in which the cth component (for cεC) is the expectation that c should be assigned when L occurs. Linguistic constructs which can be recognized by computer include word roots, words, phrases and kernel sentence deep structures. There is a natural part-whole relation on these linguistic constructs, allowing one to find a set of most inclusive linguistic constructs for a given natural language response. The code assignment algorithm given below first examines the vectors of the most inclusive structures. If these are sufficient for coding, a code is assigned. If not, then the vectors of smaller linguistic structures are used to construct a vector for the given NL response. If this constructed vector implies a code, that code is assigned. Otherwise the process is continued through all vector combination methods in the current implementation of the coding algorithm, and through all linguistic structures down to word roots, until a
code is assigned, or until all possible code assignment strategies are exhausted. In pseudocode, this is expressed as follows:
function code(
nlr:natural language response):code;
var s: set of linguistic constructs;
v: vector over codes;
cmax: set of codes
fns: set of functions
from a set of C-vectors
to a C-vector;
c:=undefined;
begin
s:=set of maximal linguistic constructs;
c:=undefined;
fns:= set of functions
which combine s-vectors into
a vector for the response
as a whole
repeat
v:=sum of (or other function
which combines
vectors of members of s
which occur in nlr;
cmax:=set of codes with a maximal
v-component;
if probability
(cmax has a unique max. v-component)
> a predetermined desired coding
reliability
then c:=cmax
else if there is another way of
combining s-vectors,
let fn:= the next such method
else begin
s:=set of linguistic components of
the current members of s;
fns:= set of functions which combine
s-vectors into a vector for
the response as a whole;
end
until c<>undefined or s=nil;
code:=c;
end;
Geometrically, we can think of each of the codes as a unit vector and that all of these code vectors are mutually perpendicular. When given a response, we construct vectors representing the response in the space spanned by these code vectors. When the resulting response vector has a direction sufficiently similar to one of the code vectors, the code for that vector is assigned. Since the code vectors are an orthonormal basis for the response vectors, the most similar code vector under the cosine measure of similarity is that code vector having the largest component in the response vector. The vector-combining procedure template presented so far represents a variety of actual procedures depending on what linguistic structures are recognized, and how the vectors are computed and combined. A series of such particular coding procedures can be combined into one larger overall coding program by trying the particular procedures in sequence on each data record, until some particular procedure in the sequence assigns a code.
One plausible criterion for choosing and ordering procedures in the sequence of particular procedures is to start with procedures that use a great deal of linguistic knowledge. The records which remain unprocessed after applying this linguistic knowledge may reasonably be considered to be composed of statistically independent words, or other small linguistic units. Therefore the particular vector-processing procedures which occur late in the sequence can use statistical techniques which rely on the assumption of statistical independence of the small linguistic units.
2.3 Experiments with Industry Data
Work on automated coding was performed at the U.S. Bureau of the Census Programming Research Staff by Eli Hellerman and the author during the period 1976-79, as part of the Census Bureau's ongoing effort to automate the coding of industry and occupation data. As an example of automatic coding on actual survey data, results of an experiment from this work is presented.
<table>
<thead>
<tr>
<th>strategy</th>
<th>% coded</th>
<th>% agreement</th>
</tr>
</thead>
<tbody>
<tr>
<td>exact match on IA</td>
<td>32.0</td>
<td>100</td>
</tr>
<tr>
<td>exact match on IB</td>
<td>9.4</td>
<td>93.7</td>
</tr>
<tr>
<td>almost exact match on IA & IB</td>
<td>16.9</td>
<td>84.6</td>
</tr>
<tr>
<td>sum heuristic weights on IB</td>
<td>40.6</td>
<td>82.7</td>
</tr>
<tr>
<td>product conditional probabilities on IB</td>
<td>48.7</td>
<td>62.1</td>
</tr>
<tr>
<td>sum heuristic weights on IA & IB</td>
<td>35.9</td>
<td>50.0</td>
</tr>
</tbody>
</table>
Overall results: 96.4% coded, 82.2% agreement with expert-assigned code on the set where a code was assigned; the s.d. of this agreement percentage is about 2.6%.
In the experiment, each strategy was tried on all the records which remained uncoded by previously tried strategies. Therefore while this table presents an ordering of strategies which was experimentally found to produce good results on the data, it should not be used to compare different strategies, because later strategies get the generally harder records which earlier strategies fail to code. The terms used in the above table are defined as follows:
strategy: the method used to combine
vectors;
% coded: the part of the sample for which a strategy assigned a code;
% agreement: on the sample on which a strategy assigned a code, the fraction where the automatic code agrees with that assigned by an expert coder;
2.4 Experimental Coding Strategies
The strategies used in this experiment are described briefly below; a more detailed discussion of coding strategies appears later in the paper.
exact match: a code is assigned iff the response exactly matches a phrase in the coding handbook. Then the code from the handbook is assigned; Although not implemented on the computer in this manner, we may think of this as a vector method in the spirit of the overall algorithm in which the linguistic units are phrases which have only one non-zero component. When only one of these vectors appears, that code is assigned. However the combination of two of these vectors is the null vector unless the codes are the same.
almost exact matching: A code is assigned if there is a unique phrase in the coding handbook which has the most words in common with the response. This is a matching strategy which is found useful in information retrieval, and can be thought of as a vector method in which the vectors represent word occurrences in phrases in the coding handbook; the set of vectors to be combined are all such occurrence vectors for words in the response. These vectors have a 1 for the code of the phrase and 0 elsewhere. These vectors are combined by adding vectors for the same coding manual phrase. If there is a vector with a unique highest code for some score, that code is assigned.
sum of heuristic weights: The linguistic units are word roots. The vector for each word occurrence in the response is of the form H\*V where H is a scalar called the heuristic weight of the word, defined in the section on weighting, and V is the vector computed from the conditional probabilities of codes given the word. If \( p(c_i/w) \) is the conditional probability of code \( c_i \) given word \( w \), the \( c \)th component of \( V \) is \( p(c_i/w)^*k \), where \( k \) is around 0.05. The effect of raising \( p \) to this power is to make \( V \) into a filter which adds weights close to \( H \) to all codes with \( p(c_i/w) \) bounded away from 0, while adding weights close to 0 for codes with very small conditional probabilities. \( H\*V \), in other words, defines a fuzzy set of codes which are acceptable when given the word \( w \). The fuzziness filters out codes for
which the non-zero probabilities are probably the result of human coding errors in the sample used to construct the conditional probabilities.
The \( H\*V \) vectors are combined by addition. A code is assigned if the best code is better than the next-best by an amount determined by a linear function of the best code score; (a possibly better, more statistically motivated criterion for this 'when to code' decision is presented below).
The ordering of these algorithms conforms to the principle of increasing statistical independence in the data record. The exact match strategy is one which assumes that the response is an idiom, the most specific sort of linguistic knowledge. The almost-exact matching is a procedure in which lexical and semantic rules and transformations are applied to the data record in an attempt to match the kernel sentence level case grammar deep structure of the response with that of one of the phrases in a coding dictionary. In the sum-of-heuristic weights algorithm, each word may be considered as an independent weighted filter which adds its weight to the total score of the codes which the word filter accepts. Finally, the product-of-probabilities procedure assigns each code a score proportional to its probability under the assumption that all the words occurred independently of each other, and that only one code can be assigned per data record. A more detailed description of the algorithms used in the experiment cited above appears in the 1981 paper by Knaus. More recent experiments with better results have been conducted at both the U.S. Census Bureau and the Central Statistical Office of Sweden, and are described in the papers of Appel and Lyberg.
3. Semantic Representations for Coding
One of the major problems in building an automated coding system is to find a representation of a linguistic construct adequate for assigning the right code. Ideally such a semantic representation should be largely free from the particular surface structure choices of vocabulary and grammar with which particular respondents encode their
responses; the same meaning, no matter how expressed, should have the same semantic representation. Once such a representation has been selected, the coding of a response can be broken down into the following steps:
1. Map the natural language response into its semantic representation;
2. Assign a code using the semantic representation;
3. Construct a vector.
3.1 Construction by Machine: A Requirement
Automatic coding proceeds by comparing the information contained in a NL survey response with a database of such knowledge for the subject area of the survey (called the knowledge base of the system in artificial intelligence terminology). While many semantic representations of NL information have been proposed in artificial intelligence and linguistics, for automatic coding a representation of knowledge must be chosen which makes the knowledge base largely constructible by machine. This is because the domain of discourse of many surveys is large (e.g., all economic activity for the census industry data) and because the range of linguistic expression over different respondents (e.g., a large random sample of the general population) is also great. Knowledge representations requiring hand work on each different word, word sense, or meaning are not generally suitable where these extensive knowledge bases are required.
3.2 Vectors over Codes
The semantic representation used in our experiments with automatic coding is a real vector over the coding set C. The meaning of each word, phrase, or other linguistic construct, is, for the purposes of coding, represented by a vector over C; the value of the ci-th component represents the tendency of the construct to represent a response which should be coded to ci. For example, a phrase which always is coded to a particular code c0 might have a c0 component of 1 and all other components 0. The remainder of this paper discusses how to build these vectors for words from hand-coded data, how to combine the word vectors into vectors representing the entire NL response, and how to extract the code from the constructed vector.
3.3 Building the Database
The semantic representation vectors, called semantic vectors, or simply vectors in the following, can be built from a hand-coded sample H of data. Let L be a linguistic construct for which a vector is to be constructed. L must have the property that a computer can be programmed to recognize it reliably, although 100% reliability is not necessary. Examples of such linguistic constructs are words, word roots, phrases and kernel sentence level deep structures.
For each such construct L a count vector is constructed from the sample H of hand-coded responses. This count vector is a vector over C in which the ci component is the number of times an occurrence of of L was observed in responses in H which were hand-coded to ci. These count vectors are built by passing H through a program which recognizes the linguistic constructs which occur in each record, and then increments the count for the hand-assigned code in a current count vector for each construct occurring in the record.
From this count vector a normalization process can be used to construct the corresponding semantic vector. One useful normalization process is
\[
\text{semantic}(ci) = \frac{\text{count}(ci)}{\sum \text{count}(ci)}
\]
of assigning code ci when linguistic feature L occurs. This probability, which we will write \( p(ci/L) \), will be used extensively in our attempts at automatic code assignment. Besides hand-coded data, another source of information about how to code are the coding handbooks used by human coders. These consist of NL phrases and associated codes; the U.S. Census industry coding handbook, for example, contains about 15,000 such phrases. This information can be recast into the above form of a vector over the codes in the following way: If the code for phrase P is c, then the cth component of the vector for P is 1 and all other components are 0.
3.4 Weighting of Vectors
The count and conditional probability vectors defined in the previous section have a direction which expresses the tendency of the construct they represent to cause a particular code to be assigned. In this section we discuss a technique for varying the length of these vectors according to their usefulness in coding. One of the ways in which vectors of components can be combined into a vector for the response as a whole is to add the component vectors. By giving a greater weight to those components which have been found to be most useful in coding, one reduces coding errors caused by the random variation among the components of vectors for components, such as the word ‘company’, which have little usefulness in coding.
3.4.1 The Heuristic Weight. One way of weighting vectors, which we call
the heuristic weight, is based on the entropy of the distribution of codes for a given component; (in the Census experiments, the linguistic components for which the heuristic weight was computed were word roots, but the computation works for all vectors over C, regardless of what the vectors represent.) Let $C_0$ be the count vector for the feature 'is a survey response', i.e. the $c_i$ component is the number of responses in our hand-coded sample which have code $c_i$; let $V_0$ be the corresponding vector of probabilities of assigning $c_i$. Now if $V_c$ is the probability vector for a construct $c$, $V_c$ represents a construct useful for coding if $V_c$ is not similar to $V_0$; however, if $V_c$ is nearly codirectional with $V_0$, then $V_c$ is of little use in coding, because the probability of $c$ occurring is independent of what code is assigned. If the $c_i$ components of $V_0$, $V_c$ are $v_{0i}$, $v_{ci}$, then we form probabilities $p_i'$ as follows:
Let $v_{ci}' := v_{ci} / v_{0i}$;
$p_i' := v_{ci}' / \sum (v_{ci}')$;
The $p_i'$ represent the probabilities of assigning code $c_i$ when given construct $c$ under the conditions that the conditional probabilities of assigning $c_i$ when given $c$ remained the same, but the distribution of codes in the sample as a whole is made uniform. As the next step in computing the heuristic weight, we compute the entropy $E'$ using the uniformized probabilities $p_i'$:
$$E' = \sum p_i' \ln(p_i')$$
over all $p_i'$.
This is the entropy of the distribution of the vector $V_c$ when the non-uniformity of the overall distribution $V_0$ has been normalized out. $E'$ is a minimum when $V_c$ is similar to $V_0$ and is approaches 0 when $V_c$ has a large probability for a code which is rare in the sample as a whole.
The final step in the heuristic weight computation is to transform $E'$ in such a way that constructs that are useful in coding have a large positive weight, while those which are useless have a weight near 0. This means that we want a transformation which maps the normalized entropies $E'$ near 0 into large heuristic weights, while those which are useless have a weight near 0. This means that we want a transformation which maps the normalized entropies $E'$ near 0 into large heuristic weights; conversely, entropies near the minimum of $E'$, (which is a constant $\ln(1/n)$ depending on $n$, the number of codes) are to be mapped into a small interval around 0. The function
$$H = (E_u - E') / E_u$$
has this property, where
$E_u$ is the entropy of the uniform distribution over $C$;
$E'$ is the entropy $E'$, modified slightly if necessary to ensure that $E'$ is non-zero.
In the case where $E'$ is zero, a small random error is added to the distribution $V_c$. The added error is an estimate of the probability that $c$ would be encountered in a response hand-coded to a code different than that encountered so far. This addition of a random error is justified by the nature of the data, because words sometimes appear in unusual contexts as proper names, as used by persons with limited English, or as transcription errors; therefore a very large sample would contain few if any distributions with zero entropy.
The heuristic weight computed by this method appears to agree with our intuitive notion of how specific a word is for coding. Where the coding is over a set of about 250 (U.S. Census-defined) industry classes, some heuristic weights defined from a hand-coded sample of around 100,000 are given in the following table:
<table>
<thead>
<tr>
<th>Code (Industry)</th>
<th>Weight</th>
</tr>
</thead>
<tbody>
<tr>
<td>Co. 1.78</td>
<td>Citrus</td>
</tr>
<tr>
<td>Plant 1.85</td>
<td>Shoes</td>
</tr>
<tr>
<td>Service 1.98</td>
<td>Hospital</td>
</tr>
<tr>
<td>Metal 3.80</td>
<td>Liquor</td>
</tr>
<tr>
<td>Medical 4.10</td>
<td>Beer</td>
</tr>
<tr>
<td>Iron 4.35</td>
<td>Airline</td>
</tr>
<tr>
<td>Farm 4.60</td>
<td>Turbine</td>
</tr>
</tbody>
</table>
The heuristic weight can be viewed as a generalization of the inverse document frequency weight for terms in information retrieval. In the information retrieval case, there are just 2 categories, relevant and irrelevant, into which documents are to be assigned. In the case of just 2 categories, the inverse document frequency and heuristic weight are approximately proportional for the case where one of the categories (e.g. the relevant documents) has a very low probability.
3.4.2 Other Methods of Weighting Vectors. Alternatively, one might weight each vector by the correlation over all responses in the hand-coded sample between the hand-assigned weight for a code and the weight assigned by the vector. More particularly, for each response $r$ and code $c$ let $h(r,c) = 1$ if the hand-assigned code of $r$ is $c$, and $h(r,c) = 0$ if the hand-assigned code is not $c$. Let $v(r,c)$ the $c$th component of $v$, i.e. the probability of code $c$ given the construct represented by $c$. Then if there are $#C$ codes and $#R$ responses, there are $#R \times #C$ $(h,v)$ pairs, over which we compute the correlation coefficient: Starting with the usual formula for the correlation coefficient and applying some algebra, we get
$$r = (\#C \sum (vc^{**2}) - 1) ** (1/2)$$
/ ($\#C - 1$) ** (1/2)
This correlation coefficient is seen to be 0 for the uniform distribution, and one
for a vector with just 1 non-0 component. For vectors between the uniform and single-valued extreme, the correlation coefficient is between 0 and 1.
Just as for the heuristic weights of the previous section, the correlation coefficient should be computed with a vector of probabilities which have been uniformized with respect to the distribution of codes in the sample as a whole, so that a vector with probabilities similar to the sample as a whole has a computed correlation coefficient of 0. This normalization is important because some codes may occur much more frequently than others.
4. Constructing Vector Representations
In the overview section of the paper, the vector representations and the combination methods actually used in experiments were described. In this section the relation between the methods are discussed and some refinements are suggested.
4.1 Relative Coding Effectiveness
4.1.1 Phrase Matching. There is a saying in the advertising industry that a smart dime never beat a stupid dollar. The same applies here: nothing beats phrase matching, where exactly the entire response, at least up to trivial variations, is matched against an entry in a coding lexicon. This method is fast and reliable. It was not used to maximum effectiveness in the Census experiments, because of an unwillingness to add to the combinatorial methods. In a production system, one should constantly add new phrases to the online coding handbook because new phrases are constantly coming into use; the coding handbook used in the experiment reported above, for example contained 'tourist cottage' but not 'computer store'. One method of identifying new phrases to add to the coding handbook is to write out to a special file the complete text of responses which are not phrase-coded. One may sort these to identify common phrases not in the coding handbook and present the list to experts, who can identify phrases and associated codes that should be added to the lexicon.
Phrase matching as an initial coding method also increases the statistical independence of words in the residue of records not coded by phrase matching. This independence is an assumption behind the product scoring and the error estimation for linear scoring.
4.1.2 Addition versus Multiplication.
In the experiment, both addition and multiplication were used to combine vectors for words into vectors for phrases. Weighted sums generally were good at identifying possible codes but were more error prone, swayed by a single word which was strongly associated with a particular code. The product of conditional probabilities method, on the other hand, avoided these errors, but sometimes failed to identify codes that should be assigned, because the ration between best and next codes failed the coding criterion. Although not tried in the above experiment, it would be reasonable to try these two methods together in a generate-and-test algorithm similar to that used in many artificial intelligence programs. The weighted sum would be used to suggest one or a small set of possible codes. If one code were so suggested, it would be assigned only if the product score was sufficiently good relative to the product scores of other codes. This would eliminate weighted sum assignments which were based on only part of the response which was highly related to a particular code. If several codes were suggested, some function of sum and product codes would be required to pass a criterion function before coding occurs.
4.2 Linguistic Refinements
4.2.1 Case Grammar. In a semantic theory that has become widely used in computerized language processing, Fillmore noted that simple sentences consisted of a verb and a set of arguments, expressed as noun phrases, which stand in fixed semantic relations to the verb. Viewed in this way, a simple sentence has the following semantic parts:
- action: the action that takes place, described by the sentence verb;
- object: the thing which is affected or changed by the action;
- source: the environment or state, particularly of the object, before the action;
- location: the environment or state, particularly of the object, during the action;
- destination: the environment or state, particularly of the object, after the action;
- agent: the thing which causes the action;
- instrument: the thing which is used in
4.2.2 Word Uses. Case grammar can be applied to sentence fragments as well as sentences. The grammar of such fragments, however, is a function of the question which the fragments answer. As is true most linguistic data, respondents choose a grammatical form which eliminates redundant information and which places information known to the questioner and present in the response before information in the response new to the questioner; (this is the "given-new" principle in linguistics.) For example, in response to the question 'Where do you work?', the answer is often a free-standing noun phrase which is a location in the sentence which would describe the activity of the worksite. Another common response to this question is a nominalized verb plus object. Inspection of the data confirms the general linguistic observation that the form of the question very tightly constrains the grammatical form of the response; in the case of the industry census questions, a few grammatical forms cover all but a few records.
Survey responses are a typically very short sentence fragments; 4 words or less were typical in the industry data of the experiment. Computer programs which make use of endings, word order and other syntactic features can identify the case-grammar function of most words in the response. Furthermore these programs can decide if a word in a noun phrase is a head noun or a modifying word. We will define a word use as a triple \((w,c,hb)\), where \(w\) is a word, \(c\) a case grammar function and \(hb\) either head-noun, modifying word, or not applicable (for verbs). After processing with the appropriate linguistic analysis programs, a response may be considered to be a set of word uses.
Using such marking of word occurrences with case function and head-noun or modifying word for noun phrase words, we can build and use conditional probability vectors for word uses in the same way in which such vectors were built and used for word stems. However, for many words, informal hand inspection of the data suggests that such vectors for the same word but different uses would vary considerably in direction from one another. For example 'farm' as a head noun in a free-standing noun phrase answering 'Where do you work' is for industry coding very heavily associated with the agriculture code. On the other hand, 'farm' as a modifying noun is much more commonly used in responses which code to machinery and supplies used by farms, i.e. responses with codes other than agriculture. It would appear, from this and other similar examples, that word uses are better predictors of codes than word roots.
4.2.3 Features Based on Word Uses.
The basic principle of the coding algorithm presented in the 'overview' section is that when a large constituent determines a word use is assigned before proceeding to smaller constituents. This principle can be employed for word uses by building a coding dictionary in which the entries are sets of cooccurring word uses and an associated code. Such a dictionary would be consulted before attempting to code using sums or products of word use vectors. This dictionary would be similar to the ordinary coding dictionary but would allow for more variation in linguistic surface structure in responses which can be successfully matched against the dictionary. Another refinement based on word uses is to subdivide the head-noun versus modifying word distinction. One might classify modifying words in noun phrases according to the sum of the weights of the words which conclude after them in the noun phrase. Alternatively, one might define an inclusion relation on vectors such that \(V1 \subseteq V2\) if every code that is plausible given \(V1\) is also plausible given \(V2\); then one might distinguish between modifying words which precede a more inclusive word and those which do not. The motivation for this distinction is found in phrases like 'grocery store', in which 'grocery' functions as if it were a head noun; generally, those words followed by more inclusive words have a behavior which is more like that of head words than is the behavior of words which precede words that do not include them.
5. Assigning Codes to Survey Responses
If \(V\) is a vector which represents the response \(r\) using the set of linguistic constructs, and \(c\) is a code, then the \(c\)th component of \(V\) is a real number which we will call the score of \(c\) in \(V\) for \(r\) using \(s\). The vector \(V\) will sometimes be called a scoring vector. A code with the highest score among the \(c\) in \(C\) will be called the best code. Our basic code assignment algorithm assigns the best code to a survey response represented by \(V\) if this best code has a score sufficiently better than the other scores. In this section we consider some methods for making the decision about whether the best code is sufficiently better-scoring than the rest.
In deciding whether an automated coder has assigned the right code, our
only available criterion is agreement with a hand-assigned code. This is in fact a correct criterion only when the hand-assigned code is correct. In research on automatic coding, it is important to have a sample of hand-coded responses containing as few errors as possible. By having the sample hand-coded by experts, or even a panel of experts, the number of wrong hand codes (perhaps definable as codes later rejected by the same or other experts), can be reduced but not eliminated for an area as complex as industry and occupation coding; indeed, for some responses, there is more than one acceptable code. In assessing the actual accuracy of an automated coder, one must decide in cases of disagreement with the hand code, which code is correct or better, or if both are acceptable. Preferably this evaluation should be done by experts who are blind to which code is automatically assigned, to eliminate any prejudice against the automatic codes.
While keeping the above limitations of hand coding in mind, we will use right code as a convenient shorthand for 'the hand-assigned code' and wrong code as a shorthand for 'a code not equal to the hand-assigned code'.
5.1 When to Assign a Code
5.1.1 Kinds of Scoring Errors. One problem in automatic coding deciding when the score of the best code is sufficiently better than that of the others to justify assigning a code. In general it is observed that as the spread between the scores for the best code and next-best code increases, the probability of the best code agreeing with the hand code increases, but that there are a few wrong codes with high scores. These disagreements with the hand codes are of several types. In some cases the hand codes are in error. Another source of code disagreement is statistical scoring error, the probability that in our particular hand-coded sample, the true best code appears as the next-best. Stated another way, the statistical scoring error is the probability that the various sample probabilities are such that the sample score of the true best code (i.e. the one that would score best were the probabilities computed over the entire population of responses) is not the highest score.
In the early experiments performed while the author was at the Census Bureau, a fixed linear function involving the best and next-best codes was used as a coding criterion for all records in a given sample. There was no clear relation between these when-to-code functions and the resulting fraction of coding errors. However, some statistical and computational techniques allow one to get a record-dependent estimate for that part of the chance of miscoding that related to statistical errors in the best and next-best scores. We can use these estimates in the code assignment process to control the level of errors due to statistical scoring errors; this is done by assigning a code when and only when the probability of an error due to a statistical scoring error is below some preset level of errors.
In addition, however, there is an additional error of miscoding which is not included in these estimates, i.e. the error that the highest scoring code is truly the highest scoring code but is still wrong. This component of the coding error can be estimated experimentally by comparing the observed coding error after running an automated coder on a large sample of data with the expected level of statistical scoring errors. In estimating the statistical error in coding, we will reduce the problem to that of estimating the error in assigning the best instead of the next-best code. In the case where there are more than 2 close contenders, the pairwise error estimates can be used to get an error estimate for one of a small set of next-best codes, and in the case of a large set of such next-best codes, coding is obviously very risky.
5.1.2 Estimating Errors as a Linear Sum of Random Variables. In the case where the scoring vector is a weighted sum of conditional probabilities (of codes given constructs) vectors, then a particular code is highest-scoring when and only when some weighted sum of conditional probabilities is > 0. In particular let b,n be the best and next-best codes, V=\text{sum}(a_i V_i) and vib, vin the conditional probabilities of b and n in V. b is assigned when
\[
\text{sum}(a_i \text{vib}) - \text{sum}(a_i \text{vin}) > 0.
\]
When the probabilities in the above are such that no one of them is close to 1, the ai coefficients, if they were computed either as heuristic weights or as correlation coefficients, are such that they are stable under changes in the probabilities (vib,vin), provided that such changes are such that the probabilities stay out of some interval around 1. Therefore we can approximate the statistical coding error in the above inequality by assuming that the a's are constants and looking at the expression on the left as a linear sum of random variables (vib,vin). The variance of the linear sum is computable in terms of the a's and the variances and covariances of the random variables. In the region of stability we may assume that the covariances are 0. The variance of the left side of the inequality is then
\[
\text{var}(b \text{over } n) = \text{sum}(a_i^2 \text{var(vib)} + \text{var(vin)})
\]
The variances of the \(vib, vin\) can be computed using formulas for the variances of proportions in a binomial or approximating normal distribution, so that the variance of the inequality expression is computable from available information. This variance allows us to estimate the probability that the score of \(n\) would be \(\geq\) that of \(b\), which is an estimate of the statistical coding error in assigning the code \(b\) rather than \(n\). While the above variance and associated probability estimate is often best left to a computer, the method is illustrated in this simple example: Suppose the response is 'auto repair', both words have a heuristic weight of 4 computed from a sample of 1000 occurrences, and the probabilities of various codes are given by
<table>
<thead>
<tr>
<th>Code</th>
<th>'Auto'</th>
<th>'Repair'</th>
</tr>
</thead>
<tbody>
<tr>
<td>Auto Mfg.</td>
<td>0.3</td>
<td>0</td>
</tr>
<tr>
<td>Auto Service</td>
<td>0.3</td>
<td>0.15</td>
</tr>
</tbody>
</table>
Then the best code is 'auto service' when
\[
\begin{align*}
wt(\text{auto}) &= p(\text{auto service}/\text{auto}) + wt(\text{repair})p(\text{auto service}/\text{repair}) - wt(\text{repair})p(\text{electrical repair}/\text{repair}) > 0
\end{align*}
\]
The variance of the value of that expression is
\[
\begin{align*}
wt(\text{auto})^2 &\times var(\text{auto service}/\text{auto}) + wt(\text{repair})^2 &\times var(\text{auto service}/\text{repair}) + wt(\text{repair})^2 &\times var(\text{electrical repair}/\text{repair}) \\
&= 3.16 \times (2.2 \times 10^{-4} + 1.3 \times 10^{-4}) \\
&= 2.64 \times 10^{-2}
\end{align*}
\]
so the standard deviation of the value of this expression is 0.163. The value 0 is about 6.7 s.d.'s from the observed value of 1.1, so that the statistical error in this code assignment is very small.
However, if one had a similar set of probabilities but the number of observations per word was only 25 (for example with a word pair like 'canvas awnings'), then the s.d. is increased by a factor of \(\sqrt{40}\), and becomes 1.03. Then the value 0 is about 1.07 s.d.'s from the observed value of the expression, and the statistical coding error is about 14%.
While this example is a made-up one, the numbers are typical of the sample sizes and probabilities which arise when constructing conditional probability vectors from a large sample of actual responses. The example illustrates the extreme variation in statistical coding error based on the sample probabilities used in coding. By illustrating this variation in statistical error between records, the example strongly suggests that coding performance can be improved by using a boolean "when-to-code" function in the computerized coder which estimates the statistical coding error.
### 5.1.3 Error Estimates in the Unstable Region
When the distribution of probabilities in a vector is such that one probability is near 1 while the others are very small, the coefficient (heuristic weight or correlation coefficient, for example) may be significantly affected by small changes in the large probability. In this case, the above method, which assumed that the coefficients were for practical purposes constant under changes in the probabilities, does not apply. As an alternative method for the unstable case one may
Estimate from the given vector \(V\) the probability \(p\) that the next occurrence of the construct represented by \(V\) will not have the single high-probability code; If \(p\) is sufficiently small, ignore the possibility of a change in the large probability;
Otherwise compute the statistical error as the probability-weighted average of the case where the large probability remains unchanged and the case where the greatest other probability is incremented by the addition of a single occurrence to the sample with that next-highest code in \(V\). In the two subcases, the statistical error is computed by with \(V\) assumed constant, but vectors other than \(V\) which have not been assumed constant at some previous subdivision of the computation allowed to vary.
### 6. Computer Implementation
At the time these experiments were conducted, hardware was considerably more expensive and the hardware options confined to large computers. Processing time was expensive, and the concern with processing time per record, about 1 sec. for the algorithm in the experiment, prevented more elaborate coding experiments. In addition, development and experiment with the program was hindered by the long waits and down time associated with a heavily loaded time-shared computer.
#### 6.1 Coding on a Local Area Network
Today, all of these limitations can be overcome. 16-bit micros in a local area network could be used to code during the data entry task. For example we might use the following system:
shared hard disk: The storage requirements would be around 40 million bytes for word-use vectors for industry coding, plus another few million bytes for the coding
handbooks. A 70 mb. hard disk for $7000 has recently been announced.
Work station micros: These are 16-bit micros; if used only for coding, floppy disk drives would not be needed, so that the cost per micro would be around $2500.
Additional ram for the micros: A large electronic disk is added to the workstation micros so that common phrases and words can be stored locally to reduce network traffic and improve response time. Currently 1 mb. of ram is available for $1600, and prices should continue to drop.
Communications hardware: For each workstation and for the disk controller, a network interface board, about $1000, is needed.
Mainframe link: A link with a mainframe is needed to get data to be coded and to offload coded records. This link might consist of a high speed data line and interface hardware; a very rough cost estimate might be several thousand dollars.
A local area network like the above should be able to support 20 users with a response time that would appear instantaneous, assuming that as each word was entered, some processing occurred while the user typed the rest of the record. The per-user cost would be around $6000. The fast response time and sophisticated programming language software available in this environment would also be ideal for developing coding software.
6.2 Advantages of Coding during Data Entry
6.2.1 Elimination of Errors.
There appear to be several advantages to coding during data entry. For one thing, the entry clerk can use some judgement in the order in which parts of the response are entered, entering what appear highly descriptive before usually meaningless parts, such as proper names. If coding occurs before the whole record is entered, the rest never has to be keyboarded. This discretionary entry also prevents the machine from being distracted by proper names which it, in its limited ability to understand language, does not recognize as such. Additionally, the computer can catch many probably spelling errors at a time when the entry clerk can correct them. In the experimental data, some records were not coded correctly by the machine because of such coding errors.
7. References
|
{"Source-Url": "http://www.asasrms.org/Proceedings/papers/1983_009.pdf", "len_cl100k_base": 9781, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 31504, "total-output-tokens": 10815, "length": "2e13", "weborganizer": {"__label__adult": 0.0004329681396484375, "__label__art_design": 0.0007824897766113281, "__label__crime_law": 0.0006442070007324219, "__label__education_jobs": 0.004367828369140625, "__label__entertainment": 0.0002371072769165039, "__label__fashion_beauty": 0.0002390146255493164, "__label__finance_business": 0.0005445480346679688, "__label__food_dining": 0.0005354881286621094, "__label__games": 0.0008883476257324219, "__label__hardware": 0.0009889602661132812, "__label__health": 0.0008668899536132812, "__label__history": 0.00042939186096191406, "__label__home_hobbies": 0.00016868114471435547, "__label__industrial": 0.0006780624389648438, "__label__literature": 0.0030574798583984375, "__label__politics": 0.0003871917724609375, "__label__religion": 0.00064849853515625, "__label__science_tech": 0.1705322265625, "__label__social_life": 0.00018846988677978516, "__label__software": 0.0204925537109375, "__label__software_dev": 0.7919921875, "__label__sports_fitness": 0.0002951622009277344, "__label__transportation": 0.0006251335144042969, "__label__travel": 0.0001862049102783203}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 47679, 0.02547]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 47679, 0.87635]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 47679, 0.93343]], "google_gemma-3-12b-it_contains_pii": [[0, 4956, false], [4956, 9597, null], [9597, 14180, null], [14180, 18992, null], [18992, 24230, null], [24230, 28575, null], [28575, 33559, null], [33559, 38818, null], [38818, 43715, null], [43715, 47679, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4956, true], [4956, 9597, null], [9597, 14180, null], [14180, 18992, null], [18992, 24230, null], [24230, 28575, null], [28575, 33559, null], [33559, 38818, null], [38818, 43715, null], [43715, 47679, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 47679, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 47679, null]], "pdf_page_numbers": [[0, 4956, 1], [4956, 9597, 2], [9597, 14180, 3], [14180, 18992, 4], [18992, 24230, 5], [24230, 28575, 6], [28575, 33559, 7], [33559, 38818, 8], [38818, 43715, 9], [43715, 47679, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 47679, 0.09211]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
9a9348e70cfcc4893f702812dffdd5da881f5c28
|
COATCheck: Verifying Memory Ordering at the Hardware-OS Interface
Daniel Lustig ∗
Princeton University
dlustig@nvidia.com
Geet Sethi ∗
Rutgers University
geet.sethi@rutgers.edu
Margaret Martonosi
Princeton University
mrm@princeton.edu
Abhishek Bhattacharjee
Rutgers University
abhib@cs.rutgers.edu
Abstract
Modern computer systems include numerous compute elements, from CPUs to GPUs to accelerators. Harnessing their full potential requires well-defined, properly-implemented memory consistency models (MCMs), and low-level system functionality such as virtual memory and address translation (AT). Unfortunately, it is difficult to specify and implement hardware-OS interactions correctly; in the past, many hardware and OS specification mismatches have resulted in implementation bugs in commercial processors.
In an effort to resolve this verification gap, this paper makes the following contributions. First, we present COATCheck†, an address translation-aware framework for specifying and statically verifying memory ordering enforcement at the microarchitecture and operating system levels. We develop a domain-specific language for specifying ordering enforcement, for including ordering-related OS events and hardware micro-operations, and for program-matically enumerating happens-before graphs. Using a fast and automated static constraint solver, COATCheck can efficiently analyze interesting and important memory ordering scenarios for modern, high-performance, out-of-order processors. Second, we show that previous work on Virtual Address Memory Consistency (VAMC) does not capture every translation-related ordering scenario of interest, and that some such cases even fall outside the traditional scope of consistency. We therefore introduce the term transiency model to describe the superset of consistency which captures all translation-aware sets of ordering rules.
1. Introduction
Computer systems are becoming increasingly complex, with multiple processing elements (e.g., multicore CPUs, GPUs, and other accelerators) running multiple layers of system software (user code, libraries, operating systems, hypervisors, etc.). These heterogeneous systems frequently enable inter-element communication by presenting the user with an abstraction of shared virtual memory, even when the underlying hardware may contain discrete physical memory blocks [11, 42]. Harnessing these systems’ full potential requires careful coordination between the hardware and the OS to ensure that the memory consistency model(s) (MCMs) and address translation (AT) mechanisms are properly- implemented. Unfortunately, the ability to rigorously verify these subsystems and their interactions with each other remains a vexing problem.
Recent years have seen increased attention being paid to the need for well-defined memory models and analysis techniques at each layer of the hardware-software stack. Many architectures and programming languages have recently developed formal consistency model specifications [10, 13, 30] and tools to help analyze them [3, 40]. However, most (but not all [38, 39]) of these models ignore the implications of virtual-to-physical address translation, such as synonyms and page permission updates, on memory ordering. Furthermore, these tools cannot verify the underlying implementations of these models, leaving a verification gap within which bugs often arise.
A key challenge is that microarchitectural events (i.e., those which are not architecturally visible) and OS behavior can affect memory ordering in ways for which standard (i.e., non-translation-aware) memory consistency analysis can be fundamentally insufficient [38, 39]. Proper implementation of a memory model requires correctness to be maintained through library calls, through system calls, and through the varying and/or unpredictable behavior of the microarchitecture. Events within each of these layers interact with and affect the state of memory, and, crucially, events within these low-level layers may behave differently from the “normal” accesses described by the formal memory model. For example, on the x86-64 architecture, which implements the rela-
tively strong total store ordering (TSO) memory model \[32\], events such as page table accesses may be inherently racy: page table walks are automatically issued by hardware, can happen at any time, and are often not ordered with respect to most fences \[16, 20\].
No existing notion of memory consistency captures the strictest possible translation-aware set of orderings. As we show in this paper, even data-race-free programs \[11\], sequentially consistent machines \[23\], and systems obeying sequential consistency for virtual address memory consistency (SC-for-VAMC) \[38, 39\] can nevertheless be prone to (perhaps surprising) ordering bugs. These bugs relate to the checking of metadata which is not directly associated with the virtual or the physical address being accessed; this places it outside the scope of memory consistency, including VAMC. We therefore use the term memory transistency model to refer to any set of memory ordering rules which explicitly takes virtual-to-physical address translation issues into account, even through the extra layers of indirection needed above.
To aid in the analysis of transistency models and their implementations, this work develops techniques and tools—collectively called COATCheck—for verifying memory ordering enforcement in the context of virtual-to-physical address translation. COATCheck extends existing tools and techniques \[26, 29\] to allow users to reason about system calls, interrupts, microcode, and so on. The goal of COATCheck is to improve the ability to specify and verify system behaviors at an already bug-prone interface \[38, 39\] whose complexity is worsening with heterogeneous parallelism. Our contributions are as follows.
First, we demonstrate a comprehensive yet tractable methodology for specifying and statically verifying memory ordering enforcement at the hardware-OS interface. We develop a Domain-Specific Language (DSL) called \(\mu\)spec within which each component in a system (e.g., each pipeline stage, each cache, each TLB) can independently specify its own contribution to memory ordering using the languages of first-order logic and \(\mu\)b graphs \[26, 27, 29\]. \(\mu\)spec extends the constraint-based approach of previous work \[29\] to support modeling of TLB occupancy, page table walk microcode, activities that emit memory references despite not being part of the user-level instruction stream, system calls for memory allocation (e.g., \texttt{malloc/mmap}), and interrupts (e.g., inter-processor interrupts to maintain TLB coherence). The \(\mu\)spec approach allows components to be swapped in and out without affecting others, thereby providing a more modular approach to memory ordering verification.
Second, we develop a fast and general-purpose constraint solver which automates the analysis of \(\mu\)spec specifications, thereby allowing interactive exploration of memory ordering scenarios more complex than previous tools have handled. We demonstrate the use of COATCheck (the methodology and the tool) on several case studies that highlight interesting challenges at the hardware-OS boundary: a sophisticated model of an Intel Sandy Bridge-like processor running Linux, as well as classes of translation-related bugs recently identified by processor vendors. The full toolset (the DSL, models, litmus tests, and analysis tool) is open-source and publicly available.
Finally, we use COATCheck to identify cases in which transistency goes beyond the traditional scope of consistency. We demonstrate cases where even sequentially consistent (or, following recent work, SC for VAMC \[38, 39\]) code may be buggy due to improper handling of page table entry status bits for virtual address synonyms. Overall, our work offers formal, yet practical tools for memory ordering checking, and it broadens the very scope of memory consistency.
2. Overview
2.1 Background and Motivation
As motivation, consider the litmus test in Figure 1b. As written, \(x\) and \(y\) appear to be distinct addresses. Under that assumption, Figure 1b shows that even a strong MCM such as sequential consistency (SC) \[23\] considers the proposed final values to be observable, because an event interleaving exists to achieve that value outcome. If instead, as in Figure 1c \(x\) and \(y\) are actually synonyms (i.e., both map to the same physical address), the test is forbidden by SC, because
\[\text{Outcome } r1=2, r2=1 \text{ permitted}\]
\[\text{Proposed outcome: } r1=2, r2=1 \text{ forbidden}\]
Figure 1: The litmus test outcome is permitted if TSO is considered to apply only to virtual addresses or if \(x\) and \(y\) are not synonyms, and forbidden otherwise.
---
1. \texttt{http://github.com/daniellustig/coatcheck}
2. Litmus tests are small programs testing some aspect of a MCM. Each proposes a particular outcome (i.e., the value returned by each load) and then specifies/tests whether that outcome is permitted or forbidden by the MCM’s rules.
if the addresses are the same, no interleaving of the threads produces the proposed outcome. While simple, this example highlights how memory ordering verification is fundamentally incomplete unless it explicitly accounts for address translation when determining expected behaviors and verifying correctness.
**Relationship to Past Work:** The bulk of prior MCM work has focused on the high-level programming language and hardware layers [2, 10, 13, 30, 32, 40]. However, abstractions effective at these levels (e.g., SC-for-DRF [11], TSO [33]) are often ineffective at the hardware level, as high-performance data structures and low-level hardware operations are often inherently weakly ordered and racy. For example, although x86 processors implement the Total Store Ordering (TSO) MCM, page table walks are TSO-ordered with respect to neither normal memory accesses nor mfence instructions (which enforce orderings between all normal loads and stores) [16, 20]. Likewise, many parallel data structures bypass software memory models in favor of higher-performance (but less portable) assembly implementations that interface more directly with the non-SC hardware memory model [10, 24, 25, 31].
Romanescu et al. were the first to distinguish between MCMs meant for virtual addresses (VAMC) and those for physical addresses (PAMC) [38, 39]. They considered hardware to be responsible for enforcing the latter, and a combination of hardware and OS for the former. Accordingly, traditional hardware models such as TSO would fall under PAMC, while synonyms, page mapping changes, and side effects (i.e., page status bits) would be added to form VAMC. COATCheck provides a rigorous means of specifying and verifying the interaction between the two models. This paper also goes beyond the VAMC-PAMC distinction to identify cases in which ordering bugs can be found even when both VAMC and PAMC are made sequentially consistent.
PipeCheck and CCICheck verify memory ordering enforcement through the use of microarchitecture-level happens-before (µhb) graphs [20, 27, 29]. PipeCheck performs MCM verification by enumerating a complete family of µhb graphs for any given litmus test. CCICheck extends PipeCheck to handle coherence-consistency interface issues. COATCheck extends both of these by providing a fully-general DSL for specifying ordering enforcement, and by demonstrating how PAMC, VAMC, and transitivity in general can be analyzed using µhb graphs.
### 2.2 The COATCheck Approach
Figure 2 shows layers at which memory ordering issues might be considered. Our work enables the building and verification of detailed models for hardware-OS memory ordering implications, particularly focusing on layers 3, 4, and 5 in Figure 3. This allows us to analyze memory ordering in an execution stream that includes library and kernel code, as well as microcode-level events such as the hardware page table walks executed on behalf of the program at TLB misses. Previously, incomplete specifications, incorrect implementations, or poor coordination between the layers could (and did) cause bugs. These include forbidden multithreaded outcomes becoming observable, legal data disappearing due to incorrect updates of page table entry dirty bits, processors experiencing deadlock/livelock, and any number of other undesirable outcomes.
COATCheck overcomes these problems by building modular models of memory ordering enforcement at the hardware-OS interface. In particular, each component (a pipeline stage, a page table walker, an OS mechanism, etc.) can specify its own independent contribution to memory ordering enforcement. Prior to verification, the independent contributions of the components which form the system under test are merged into a single overall specification. The COATCheck tool then uses this combined specification to generate families of µhb graphs to statically verify the overall correctness of the system.
Figure 3 shows the overall COATCheck toolflow. First, traditional (i.e., address translation-unaware) litmus tests are converted into enhanced litmus tests which include all microcode relevant to memory ordering. Second, the enhanced litmus tests are analyzed according to the rules of the µspec specification of the orderings enforced at various parts of the system. This produces a set of constraints describing the conditions under which the outcome proposed by the litmus test would be observable. Third, the constraints are analyzed by the COATCheck constraint solver tool to determine whether any observable execution (in the form of a µhb graph) can be found. We analyze each step in detail in the sections that follow.
After describing the overall flow, Section 6 gives the concrete example of how a system consisting of Intel Sandy Bridge-like hardware, a Linux-like OS, and interesting litmus tests as software can be modeled and analyzed using COATCheck. This is followed by an analysis of the perfor-
mance of the COATCheck constraint solver, using the above system as a case study.
3. Beyond Traditional Litmus Tests
Litmus tests have become a standard tool in memory model analysis. This section describes how we derive Enhanced Litmus Tests (ELTs) which account for address translation operations, page table walks, remappings, memory-accessing microcode, and other operations relevant to memory ordering and other system-level operations that execute concurrently with, and on behalf of, user-level code.
3.1 Enhanced Litmus Tests: An Overview
ELTs use three key constructs in moving beyond traditional, user-level litmus tests. First, ELTs describe virtual and physical addresses as distinct and simultaneous entities. In the same way that traditional litmus tests propose an outcome consisting of a set of values returned by the loads in the program, ELTs likewise propose as part of the outcome the physical addresses used by each access. In this way, the analysis of later sections can directly test whether a proposed set of address translation outcomes is legal.
Second, ELTs incorporate relevant chunks of OS activity, such as map/remap functions (MRFs) or inter-processor interrupts (IPIs) as used in software TLB coherence [37, 46]. They similarly include events such as the sending of IPIs, register reads/writes which cause the enabling/disabling of interrupts, and so on.
Third, ELTs include “ghost instructions” which model lower-than-ISA operations (e.g., page table walks) executed by the hardware on behalf of user-level or system-level code, even if these instructions are not fetched, decoded or issued as part of the normal ISA-level instruction stream. The combined power of address tracking, ghost instructions and OS module insertions gives ELTs sufficient expressive power to test all aspects of memory ordering enforcement as it relates to address translation.
Figure 3 presents an example flow from user-level litmus test to ELT. Our example shows the store buffering/store→load reordering to be visible. We form a more system-oriented example by inserting an extra system call to mprotect at the beginning of thread 0 in Figure 3.
3.2 Litmus Test Expansion Synopses
As Figure 4 shows, each ELT comes about as the result of a sequence of modification passes. We systematize this transformation in the form of litmus test expansion synopses, or simply synopses for short. Each synopsis is a “recipe” for expanding each instruction in an input litmus test into one or more instructions in an output litmus test. As described below, there is some flexibility in what the recipes look like: the original instruction may or may not be maintained, and instruction expansions may insert new code into other threads as well. Furthermore, each expanded instruction may be inserted either before or after the original instruction. In this way, synopses serve as succinct representations of behaviors that we hope might one day become more rigorously and precisely formalized.
3.3 From User-Level to Kernel-Aware Litmus tests
Incorporating system calls and kernel-level code into a user-level litmus test requires multiple changes. First, in the invoking thread (e.g., Thread 0 in Figure 4a), we must be able to substitute the system call with a module of memory reference operations that correctly encapsulate the system call behavior relevant to memory ordering verification. Figure 4b continues the example of Figure 4a by expanding the system call into a set of four instructions on the invoking Thread 0. The expanded regions are shaded in blue.
It is worth noting that this expansion can affect multiple threads at once. Because one of the instructions expanded in Thread 0 triggers an inter-processor interrupt (IPI), we also add a three instruction interrupt handler to all of the other threads (in this case, Thread 1a). Notably, since the relative timing of Thread 0 and Thread 1a has not yet been established, it is also impossible (at this point) to determine the relative ordering of Threads 1a and 1b. For example, while it is clear that the interrupt handler must happen sometime after the interrupt is originally triggered, there is little other synchronization between the threads. Any such orderings will be filled in later by the µspec specification of IPI behavior in the microarchitecture in question.
OS Synopses: On the invoking core, an OS synopsis specifies a mapping from each system call into a simple pre-defined sequence of microops capturing the effects of that system call on address translation and consistency. When the system call contains an inter-processor interrupt (IPI), the OS synopsis also instantiates pre-defined interrupt handler threads on those cores, again using a sequence of statically-determined instructions. Each interrupt handler may be arbitrarily interleaved with the other thread(s) assigned to that core, subject to µspec constraints (Section 4.2).
We do not currently model the complexities of OS decision making or data structures; our OS synopses currently include only memory accesses which update the paging structures and any synchronization used to enforced orderings with respect to these updates. However, these synopses could be made more sophisticated as needed; we believe that attempting to formalize the relationship between these OS synopses and the full OS code would make for interesting follow-up work.
3.4 Memory-Accessing Microcode: Ghost Instructions
During V-P translation, there are many microcode operations which are not fetched as ISA-level instructions (either user or kernel) but which still play a key role in enforcing consistency. These microcode operations are used to support hardware page table walks, TLB refills, accessed/dirty bit updates, and so on. We refer to these operations as ghost instructions, as they are present but not visible to the user or to the OS kernel.
The presence and behavior of ghost instructions depends heavily both on the architecture and on the microarchitecture in question. At an architecture level, operations such as page table walks may be specified as being enforced entirely by hardware, entirely by software, or anywhere in between. COATCheck is flexible enough to cover any point on this spectrum. We break this problem into two parts: the specification of the instructions (and ghost instructions) which are emitted to cause ordering to be enforced (this section), and the specification of the orderings enforced between these instructions at different points in their executions (Section 4.2).
Figure 4c depicts an ELT derived from Figure 4b. Darker red-shaded regions are microcode operations that have been expanded at this phase; lighter blue regions remain those expanded in the previous step. For this test scenario, thread 0’s access to [x] requires a page table walk, because the TLB entry for that virtual address would have been invalidated by the invlpg instruction. Also, since the initial condition states that the page containing [x] is clean, hardware would also mark the page as dirty prior to the write (specified to occur on x86 using a LOCKed atomic operation). Other accesses may also take TLB misses and trigger page table walks themselves, although (for space reasons) the figure does not show all of them. Finally, the ELT includes hardware operations for receiving the interrupt, saving state, and disabling nested interrupts via the microcode preamble to thread 1b. In this example, hardware is responsible for saving state, but software is responsible for restoring it. This again highlights the degree of collective responsibility between hardware and OS for ensuring ordering correctness.
Microarchitecture Synopses: As with the OS synopses, our microarchitecture synopses currently consist of rela-
4. μspec: A μhb Graph-Centric Ordering Specification Language
4.1 Background: μhb Graphs
Microarchitecture-level happens-before (μhb) graphs capture consistency model enforcement at the implementation level [20, 27]. Figure 5 gives an example. Nodes in a μhb graph represent events corresponding to a particular instruction (column) at a particular physical location (row). For example, a node may represent a particular instruction passing through a particular pipeline stage. Edges represent happens-before orderings guaranteed by some property of the microarchitecture: an instruction flowing through a pipeline, a FIFO-ordered structure, the passage of a message, and so on.
All edges in a μhb graph are equivalent, regardless of their label. This allows the transitive closure of two μhb graphs to itself always be a legal μhb graph, and it implies that any cycle in a μhb graph indicates that the execution is infeasible. This places μhb graphs in contrast to some other existing approaches in which only specific subsets of a happens-before graph are analyzed for cycles [3].
Figure 5: μhb graph for the test of Fig. 4a without the syscall.
4.2 μspec: A DSL for Specifying μhb Graph Orderings
μspec is a domain specific language in which hardware or software component designers express the ordering relationships enforced by that component. Through the μspec specification, designers provide a set of μhb graph-focused axioms which must all be satisfied for a given test to be considered observable [29]. This axiomatic approach is widely used in memory model analysis [2, 3, 28, 32], but μspec extends this down to a lower level of abstraction.
μspec axioms are first order logic (i.e., AND, OR, NOT, EXISTS, FORALL) statements built on top of instruction- and μhb graph-related predicates. The instruction-related predicates are used to constrain an axiom to a relevant subset of instructions. For example, an axiom that states a property relating two reads to the same physical address would use the two predicates IsRead and SamePhysicalAddress.
The core μspec axiom is the predicate EdgeExists (or, in plural form, EdgesExist). This predicate takes three arguments: a source μhb node, a destination μhb node, and a label. The nodes are in turn formed of two components: an instruction, and a microarchitectural location or event. These correspond to the column and row of the node in the μhb graph, respectively. The edge label is purely cosmetic, as all edges in a μhb graph are equivalent (Section 4.1).
Figure 6 gives an example of the μspec syntax. The Axiom "ReadsFrom":
Axiom "ReadsFrom":
forall microops "r", IsRead r => exists microop "w", IsWrite w /
SamePhysicalAddress w r /
SameData w r /
EdgeExists ((w, AccessMemory), (r, AccessMemory), "rf").
Figure 6: μspec syntax example.
Figure 6: μspec syntax example.
4.2 μspec: A DSL for Specifying μhb Graph Orderings
μspec is a domain specific language in which hardware or software component designers express the ordering relationships enforced by that component. Through the μspec specification, designers provide a set of μhb graph-focused axioms which must all be satisfied for a given test to be considered observable [29]. This axiomatic approach is widely used in memory model analysis [2, 3, 28, 32], but μspec extends this down to a lower level of abstraction.
μspec axioms are first order logic (i.e., AND, OR, NOT, EXISTS, FORALL) statements built on top of instruction- and μhb graph-related predicates. The instruction-related predicates are used to constrain an axiom to a relevant subset of instructions. For example, an axiom that states a property relating two reads to the same physical address would use the two predicates IsRead and SamePhysicalAddress.
The core μspec axiom is the predicate EdgeExists (or, in plural form, EdgesExist). This predicate takes three arguments: a source μhb node, a destination μhb node, and a label. The nodes are in turn formed of two components: an instruction, and a microarchitectural location or event. These correspond to the column and row of the node in the μhb graph, respectively. The edge label is purely cosmetic, as all edges in a μhb graph are equivalent (Section 4.1).
Figure 6 gives an example of the μspec syntax. The Axiom "ReadsFrom":
Axiom "ReadsFrom":
forall microops "r", IsRead r =>
exists microop "w", IsWrite w /
SamePhysicalAddress w r /
SameData w r /
EdgeExists ((w, AccessMemory), (r, AccessMemory), "rf").
Figure 6: μspec syntax example.
ions or components. In other words, this approach allows microarchitecture-specific axioms to be easily swapped out for another set while OS- and architecture-level are kept unchanged, to give just one example. While µspec cannot (yet) automate the extraction of a specification from Verilog or HDLs, it does greatly reduce the effort required to express ordering requirements and expectations, and we hope to elaborate on this aspect in future work.
5. Constraint Solver and Software Implementation
As described in Section 4.2, the µspec specification of a system produces a first-order logic formula whose satisfiability corresponds to the feasibility of a particular execution. The COATCheck constraint solver accepts this formula and some litmus test as inputs, and it searches to find any execution of that test which satisfies all of the constraints of the model. If one can be found, then the proposed outcome is observable. If not, then the proposed outcome is forbidden. In PipeCheck, which introduced µhb graphs, the graphs were generated using naive exhaustive enumeration [26, 27]. However, the PipeCheck approach does not scale to the sizes and numbers of graphs needed to handle ELTs.
5.1 Constraint Solver Algorithm
The preliminary step in the solving process is to eliminate the quantifiers in the formula. Doing so produces a quantifier-free propositional logic formula that is more directly amenable to being solved. Since the domain of each quantifier is concrete in the context of some particular litmus test, the quantifiers are removed by simply converting each forall into a conjunction (AND) over its domain (i.e., cores, threads, or instructions). Likewise, each exists is converted into a disjunction (OR).
The solver algorithm itself resembles and is inspired by the Davis-Putnam-Logemann-Loveland (DPLL) algorithm widely used in SAT solvers [14]. We apply it to µhb graphs, making our solver resemble a primitive but effective SMT solver for the theory of acyclic directed graphs. At a high level, the solver uses a backtracking approach: given a starting point, it generates a list of subcases of “either-or” edge additions, and it then recursively descends into each subcase, abandoning those which cannot satisfy the given conditions and stopping when it reaches a leaf node (i.e., an acyclic graph). Although we could have used an off-the-shelf SAT or SMT solver, our custom solver provided significantly better debugging ability and status visualization (e.g., of partially-completed µhb graphs and decision trees) than would have been possible with a black-box solver. We found this empirically to be very useful during our work.
5.2 Software Toolchain
We implemented the COATCheck methodology into the complete working toolflow shown in Figure 3. In normal operation, the tool takes as input a user-level litmus test, HW/OS synopses (Section 3.1, and µspec models (Section 4.2). For low-level debugging, the tool also allows the direct input of a manually-written ELT (bypassing the synopses). The ELT and component µspec models feed into the model analyzer, which applies the axioms of the models to the ELT to generate a tree of µhb graph constraints. The constraints are then passed to the constraint solver to determine if the outcome is feasible.
Our core analysis infrastructure is written in Coq to allow for formal verification [44]. However, we have not yet completed any formal proofs of correctness; this remains an open problem, and in any case, the specifications of expected behavior often simply do not yet exist. We therefore leave this to future work, and we instead extract the Coq code to OCaml to build COATCheck as a standalone tool.
Because we aim for backwards compatibility with existing MCM analysis frameworks, we interface our tool with the litmush format [3]. This allows us to draw from a large existing body of litmus tests. Although these tests do not distinguish virtual and physical addresses, they serve as valuable sanity checks for the basic correctness of a pipeline model. Furthermore, hardware models from previous work [26, 27, 38, 39] can be easily adapted into µspec and our OS models, and hence into COATCheck as well.
6. Detailed Processor+OS Model Case Study
This section presents an in-depth case study of how hardware and software designers might use COATCheck and µspec to model a high-performance out-of-order processor and OS, respectively. The resulting µspec model is the one used for Section 7’s litmus test case studies, and Section 8’s performance results.
6.1 Basic Overview
Our case study model has three main parts, two of which are provided by the hardware designer and one of which is provided by someone familiar with the OS. The first component is a µspec model which describes a given processor microarchitecture. This model provides a set of µspec axioms representing the ordering constraints enforced by the hardware. In this case study, this hardware component is inspired by the Intel Sandy Bridge microarchitecture, and was developed in detail using public documentation [19, 20], information gleaned from patents [16], and some educated guesses used to fill in gaps. Many of the low-level details remain proprietary, so it cannot be an exact match, but the paper nevertheless refers to this as our SandyBridge model. Table 1 gives an overall enumeration of the µspec axioms in this model.
The second component is a SandyBridge hardware synopsis (Section 3.4), which specifies how litmus tests might be expanded by hardware when executed on SandyBridge. These expansions pertain to hardware page table walks or other hardware-initiated events (i.e., ghost instructions) in-
Table 1: Axioms for SandyBridge model. Some axioms include macros which expand to address other orderings not listed.
<table>
<thead>
<tr>
<th>Axiom</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>Reads</td>
<td>Path (Pipeline stage sequence) and µhb orderings for read instructions</td>
</tr>
<tr>
<td>Writes</td>
<td>Path/orderings for write instructions</td>
</tr>
<tr>
<td>m fence</td>
<td>Path/orderings for m fence</td>
</tr>
<tr>
<td>invlpg</td>
<td>Path/orderings for invlpg</td>
</tr>
<tr>
<td>iret</td>
<td>Path for iret instruction</td>
</tr>
<tr>
<td>RMW</td>
<td>Atomicity of LOC/Ked RMW operations</td>
</tr>
<tr>
<td>FetchPO</td>
<td>Program Order enforced at Fetch</td>
</tr>
<tr>
<td>DispatchPO</td>
<td>Fetch order maintained at Dispatch</td>
</tr>
<tr>
<td>CommitPO</td>
<td>Dispatch order maintained at Commit</td>
</tr>
<tr>
<td>SButFPPO</td>
<td>Commit order maintained at Store Buffer</td>
</tr>
<tr>
<td>Write Serialization</td>
<td>Per-physical address total order on all writes reaching Access Cache</td>
</tr>
<tr>
<td>SLR</td>
<td>Speculative Load Reordering</td>
</tr>
<tr>
<td>IPIInsertions</td>
<td>IPI handlers embedded within user thread</td>
</tr>
<tr>
<td>IPIOrdering</td>
<td>Enumeration of all nestings of IPI handlers</td>
</tr>
<tr>
<td>IPIReceive</td>
<td>Paths for OS code modeling receiving of IPI</td>
</tr>
<tr>
<td>IPIRecvAtomicity</td>
<td>OS code modeling receiving of IPI is atomic</td>
</tr>
<tr>
<td>IPIAffs</td>
<td>IPI handlers must complete before issuing thread allowed to proceed</td>
</tr>
<tr>
<td>TLBEntries</td>
<td>Paths for PT walks and TLB entry µhb nodes</td>
</tr>
<tr>
<td>TLBEntriesNoDups</td>
<td>No concurrent duplicate TLB entries</td>
</tr>
</tbody>
</table>
6.2 Memory Dependency Prediction and Disambiguation
The first type of functionality we consider is a sophisticated, high-performance store buffer (SB) forwarding mechanism. We focus on capturing the key role address translation plays in the store buffer. If the implementation operated using only virtual addresses, for example, then it would be unable to detect virtual address synonyms, leading to problems such as the one in Figure 7. Placing the TLB on the critical path would avoid this problem; however, this would come with a performance cost. This realistic example reveals the perhaps-surprising complexity involved in the seemingly straightforward process of store buffer forwarding. We start by describing the mechanism, and we then describe its implementation in µspec. Hardware or OS synopses are not used in modeling this feature, so they are not discussed.
**Mechanism:** High-performance forwarding in our SandyBridge model consists of memory dependency prediction and memory disambiguation [19]. The prediction stage anticipates dynamic same-physical address dependencies between stores and loads to try to preemptively prevent consistency violations that might arise. The disambiguation stage later ensures that all predictions were correct. This pairing ensures that synonyms can be detected while keeping the TLB off of the forwarding critical path.
The mechanism we model works as follows. All stores write their virtual address and data into the SB in parallel with accessing the TLB. The physical address is later written into the SB as well, once the TLB provides it. Loads, in parallel with accessing the TLB, write their lower 12 bits (“index bits”) into a CAM-based load buffer that holds all loads that have not yet committed. Due to the minimum 4KB page size, these lower 12 bits will always be identical between virtual and physical addresses.
Initially, each load compares its index bits against the index bits of all older stores present in the store buffer. If no index match is found among the filled-in entries, then clearly no match exists. If an older entry is allocated but not filled in (because its address was not yet generated), it is predicted to cause no dependencies. If an index match is found, the load will then compare high-order bits. If the load’s virtual tag matches the virtual tag of the store, the store will forward its value to the load. If not, the load compares its physical tag against the store’s physical tag, stalling if either instruction’s TLB access has not yet returned a translation. If the physical tags match, the store will forward its data to the load. Otherwise, the load will have determined that no dependency exists between the load and that particular store.
In addition to the above, our model must also reflect disambiguation, in which stores determine whether their spec instructions are synonyms to ensure that each load receives its value from the latest store to the same physical address.
Homonyms are handled on Linux/x86 by flushing non-global TLB entries on each context switch.
The model also serves as an example of how key hardware—noting which parts are implemented in hardware and OS. This section describes mechanisms for how these work, particularly relating to memory mapping/remapping functions. This reflects the functionality of system calls and interrupts as they represent other solutions (e.g., load reexecution [15]).
### 6.3 Memory Mapping/Remapping Functions, System Calls, and Interrupts
A second component of our Linux+SandyBridge model reflects the functionality of system calls and interrupts as they relate to memory mapping/remapping functions. This section describes mechanisms for how these work, particularly noting which parts are implemented in hardware and OS. The model also serves as an example of how key hardware-
```
DefineMacro "StoreBufferForwardPTag":
exists microop "w", |
SameCore w i /\ IsAnyWrite w /\ ProgramOrder w i /\ |
SameIndex w i /\ "(SameVirtualTag w i) /\ |
SamePhysicalTag w i /\ SameData w i /\ EdgesExist [ |
((w, SB-VTag/Index/Data), (i, LB-SB-IndexCompare), |
"SBEntryIndexPresent") |
((w, SB-PTag), (i, LB-SB-PTagCompare), |
"SBEntryPTagPresent") |
((i, LB-SB-DataForward), (w, (0, MemoryHierarchy)), |
"BeforeSBEntryLeaves") |
((i, LB-SB-IndexCompare), (i, LB-SB-VTagCompare), |
"path") |
((i, LB-SB-PTagCompare), (i, LB-SB-PTagCompare), |
"path") |
((i, LB-PTag), (i, LB-SB-PTagCompare), "path"); |
((i, LB-SB-PTagCompare), (i, LB-SB-DataForward), |
"path"); |
((i, LB-SB-DataForward), (i, WriteBack), "path") | |
ExpandMacro STBNoOtherMatchesBetweenSrcAndRead).
```
Figure 8: µspec Macro describing instance of SB-Forwarding
Other possibilities exist for memory disambiguation. An advantage of µspec is that the axioms representing the approach we describe above can be easily replaced by those representing other solutions (e.g., load reexecution [15]).
### OS functionality is reflected in our SandyBridge µspec model and the hardware and OS synopses.
#### Mechanism: Although x86 TLB lookups and page table walks are performed by the hardware, x86 TLB coherence is OS-managed. To support this, x86 provides the privileged `invlpg` instruction, as well as the support for cores to communicate via inter-processor interrupts (IPIs). `invlpg` is a privileged instruction which serializes the instruction stream and invalidates the TLB entry containing the mapping corresponding to the specified virtual address. As a serializing instruction, `invlpg` causes all previous instructions to commit and drains all pending writes in the store buffer to memory before fetching the following instruction. `invlpg` also ensures that the next access to the virtual page invalidated will be a TLB miss, thus forcing the latest version of the corresponding page table entry to be brought into the TLB.
**Linux Synopsis:** Our Linux synopsis expands the system call mprotect into code snippets which 1) update the PTE appropriately, 2) invalidate the now-stale TLB entry on the current core, and 3) send TLB shootdowns to other cores via IPIs and interrupt handlers. The interrupt handler performs its own `invlpg` operation before sending an acknowledgment to the sender and returning from the interrupt.
**SandyBridge Synopsis:** The SandyBridge synopsis reflects ghost instructions added to reflect hardware-initiated memory references. In this case, the hardware synopsis first adds a ghost instruction representing the reception of the interrupt. This allows the µspec model to draw µhb edges reflecting that operation. Second, it adds a ghost instruction representing a write to the FLAGS register bit that determines whether interrupts are enabled, as on x86, after receiving an interrupt, subsequent interrupts are disabled until re-enabled by the handler or the OS. Interrupt re-enabling is handled by the OS code expanding to sequences containing either iret or cli instructions; both implicitly model writes to the FLAGS register as well.
**µspec Axioms:** Figure 9 shows how the orderings between these events are specified within our SandyBridge µspec model. This snippet shows a macro EmbeddedIn and an axiom IPITOrdering that makes use of it. The user sim-
ply specifies the set of possibilities, and the solver (Section 5) automatically and efficiently enumerates all ways the axiom can be satisfied. A separate macro (not shown) adds pipeline- and store buffer-draining \( \mu \)hb edges representing the fact that interrupts are precise [19].
6.4 Page Table Walks
**Mechanism:** On the x86 architecture, the page table walker is built into the hardware [20]. This means that page table walk loads are not issued by the user or by the OS; instead, they are contained entirely within hardware and are invisible to the user.
**SandyBridge Synopsis:** Every potential page table walk is instantiated by the microarchitecture synopsis as a set of ghost instruction loads of the page table entry. (The TLB is modeled as described in Section 6.6)
**\( \mu \)spec Axioms:** Our model handles these ghost loads as follows. First, it does not draw Fetch, Dispatch, etc. nodes in the paths (i.e., columns) for these special loads, as they do not pass through the pipeline. Second, the axiom never uses predicates such as SameVirtualTag with them, since page table walks are done using only physical addresses. Third, since the loads are not TSO-ordered, they do not search the load buffer as in Section 6.2. Lastly, it adds exceptions to, e.g., the mfence axioms to omit \( \mu \)hb edges touching these instructions. The model does ensure, however, that page table walks are ordered with respect to invlpg [16].
6.5 Status Bit Updates
**Mechanism:** When a page table accessed or dirty bit needs to be updated due to a memory access, our SandyBridge pipeline waits until the triggering instruction reaches the head of the reorder buffer. At that point, the processor injects microcode implementing the update into the buffer. It also must ensure that the update is ordered against younger instructions to prevent later loads from reading the now-stale state of the PTE from before the update.
**SandyBridge Synopsis:** For each store, the hardware syn-
opis instantiates dirty bit updates as ghost instructions performing a LOCKed RMW operation on the bits in memory. These instructions are inserted just before the triggering instruction. At a low level, we represent the read and the write of the RMW as separate microops, and atomicity is guaranteed by the \( \mu \)spec axioms described below.
**\( \mu \)spec Axioms:** The ghost instructions in a status bit update do traverse the Dispatch, Issue, and Commit stages, unlike the ghost page table walks, as the status bit updates do propagate through most of the pipeline and affect architectural state. They are not fetched, however. The orderings enforced for these ghost instructions are modeled in \( \mu \)spec by adding \( \mu \)hb edges to enforce that 1) all previous accesses must have committed, reflecting the condition that the triggering in-
\[ \text{Initial: } [x] = 0, [y] = 0, \]
\[ \text{VA } x \rightarrow \text{PA } a \text{ (R/W, acc, dirty),} \]
\[ \text{VA } y \rightarrow \text{PA } a \text{ (R/W, acc, dirty),} \]
<table>
<thead>
<tr>
<th>Core 0</th>
<th>Core 1</th>
</tr>
</thead>
<tbody>
<tr>
<td>(10.0) St [x/a] ← 1</td>
<td>(12.0) St [y/a] ← 2</td>
</tr>
<tr>
<td>(10.1) Ld PTE(x)</td>
<td>(12.1) Ld PTE(y)</td>
</tr>
<tr>
<td>(11.0) Ld [y/a] → r1</td>
<td>(13.0) Ld [x/a] → r2</td>
</tr>
<tr>
<td>(11.1) Ld PTE(y)</td>
<td>(13.1) Ld PTE(x)</td>
</tr>
</tbody>
</table>
Outcome: Forbidden; \( r1 = 2, r2 = 1 \)
Figure 10: Litmus test n5. Cycle shown with thicker edges.
6.6 Modeling TLB Occupancy
**\( \mu \)spec Axioms:** To model TLB occupancy in \( \mu \)hb graphs, we add two special new nodes to the paths of page table walk and status bit update ghost instructions. For page table walks and status bit updates, after the last memory access of the walk completes, the insertion of the entry into the TLB corresponds to a TLB entry creation event. Some time later, there is a TLB entry invalidation event that takes place for the same entry. Note that this invalidation, in general, occurs long after the page table walk has completed. This approach, which resembles the value-in-cache-line (ViCL) mechanism developed previously [29], tracks the ordering of TLB state changes and applies equally well to microarchitectural variants such as allowing in-place updating of TLB status bits.
We then add a constraint stating that each instruction must read from a TLB entry which exists and which contains matching data (translation and status). More precisely, for a given instruction \( i \) accessing virtual address \( v \) and physical address \( p \), this means that there must exist a ghost instruction \( g \) such that the data returned (for walks) or written (for updates) by \( g \) is a PTE mapping the tag of \( v \) to the tag of \( p \). The relevant model axiom also checks for status and permission bits in the same way. Once the entry is found,
the axiom adds two $\mu$hb edges representing that 1) the TLB entry must be created before the instruction can access it, and 2) the TLB entry must be accessed before it is invalidated.
Note that each page table walk may be but need not be associated with a particular instruction. A TLB prefetch could trigger a page table walk at any point in an execution, and so there may be “floating” walks. The TLB occupancy model operates in exactly the same way in both cases.
7. Case Study Litmus Tests
In this section, we study three interesting test cases in the context of the system modeled in Section 6.
7.1 Dependence Checks and Store Buffer Forwarding
As described in Section 2, n5_synonym tests whether the store buffer forwarding mechanism takes physical addresses into account. If either core’s store buffer does not realize that the two accesses from that core are synonyms, either load may be allowed to bypass the store before it, leading to an illegal outcome. Figure 10 shows one of the $\mu$hb graphs by which COATCheck verifies that such an (erroneous) outcome is impossible on our SandyBridge model.
The particular execution of Figure 10 shows the scenario in which the processor speculates that the store and load access different addresses and hence (under TSO rules) may be reordered. As in previous litmus tests, the red shading denotes hardware microcode operations (PT walks) executed on behalf of the user-level code. The columns of the $\mu$hb graph are annotated with instruction labels (user-level or microcode) corresponding to those in the litmus test.
When the load (i3.0) executes, it finds that the store buffer contains no previous entries with the same index (because in this scenario, the store (i2.0) has not yet issued). This leads to a happens-before edge (shown in thick blue in the figure) between the LB-SB-IndexCompare stage of (i3.0) and the SB-VTag/Index/Data stage of (i2.0). However, when the store (i2.0) does eventually execute, the condition under which it would not squash the speculatively-executed load is if the load buffer has no entries matching the same index. This could occur only if the load had not yet entered the load buffer. This situation would cause there to be a happens-before edge (also shown in thick blue) from the LBSearch stage of the (i2.0) store back to the LB-Index stage of the load (i3.0), thereby completing a happens-before cycle indicating that the scenario cannot occur in practice.
7.2 Page Remappings and TLB Shootdowns
Figure 11 shows the $\mu$hb graph for the Figure 12 scenario involving address remappings and TLB shootdowns. This scenario is derived from previous work [38, 39]; however, we fill in some underspecified page mappings and initial conditions and adapt it to x86-TSO. The original test also considered two outcomes, one of which was observable and the other of which was forbidden. Here, we split this into two tests; we treat the permitted variant as a separate test and a sanity check that the legal outcome remains observable.
At a high level, thread 0 changes the mapping for $x$ (i0.0), triggers a TLB shootdown (i2.0), and sends a message to thread 1 (i4.0). Thread 1 receives the message (i7.0) and writes to $x$ (i8.0), whose mapping was just modified. Cor-
Initially: \([x] = 0, VA \ x \rightarrow PA \ a \ (R/W, \ acc, \ dirty), \)
(other mappings omitted for space reasons)
<table>
<thead>
<tr>
<th>Core 0</th>
<th>Core 1</th>
</tr>
</thead>
<tbody>
<tr>
<td>Thread 0</td>
<td>Thread 1a</td>
</tr>
<tr>
<td>(10.0) St ([z/PTE(x)] ) ← (VA \ x \rightarrow PA \ b)</td>
<td>(17.0) Ld ([y/c] ) → 2</td>
</tr>
<tr>
<td></td>
<td>(18.0) St ([x/a] ) ← 3</td>
</tr>
<tr>
<td>(10.1) Ld PTE(x)</td>
<td>(18.1) Ld PTE(x) → TLB</td>
</tr>
<tr>
<td>(11.0) invlpg ([x])</td>
<td>(19.0) St ([y/c] ) ← 4</td>
</tr>
<tr>
<td>(12.0) St ([v/APIC]) ← mrf</td>
<td>Thread 1b</td>
</tr>
<tr>
<td>(12.1) Ld PTE(v) → TLB</td>
<td>(110.0) Ld ([v/APIC]) → mrf</td>
</tr>
<tr>
<td>(13.0) Ld ([v/d]) → ack</td>
<td>(111.0) Ld EFLAGS → ([IF])</td>
</tr>
<tr>
<td>(14.0) St ([y/c]) ← 2</td>
<td>(112.0) St EFLAGS ← ([IF])</td>
</tr>
<tr>
<td>(15.0) Ld ([y/c]) ← 4</td>
<td>(113.0) invlpg</td>
</tr>
<tr>
<td>(16.0) Ld ([y/b]) ← 1</td>
<td>(114.0) St ([v/d]) ← ack</td>
</tr>
<tr>
<td>(16.1) Ld PTE(x) → TLB</td>
<td>(115.0) iret</td>
</tr>
</tbody>
</table>
Outcome: Forbidden
Figure 12: Code for litmus test ipi8
rectness dictates that this write use the new rather than the old mapping; however, this particular test has the load using the old, stale translation in an effort to have COATCheck verify that such a situation is unobservable. Thread 1 (i9.0) sends a message back to thread 0 (via i5.0), which checks (i6.0) that the value at x (according to the new mapping) was not overwritten by the thread 1 store (i8.0) (as it used the old mapping). These orderings (plus the remaining low-level details) cause COATCheck to find \\( \mu h b \) cycles in all cases.
This graph combines many COATCheck features: IPIs and their handlers, microcode which does not pass through the entire pipeline, orderings enforced (or not) by fences on different types of orderings, and so on. The scale and complexity of these analyses emphasizes the need for tools like COATCheck to automate the enumeration of such graphs and identify the cycle (thicker highlighted lines) to demonstrate non-observable outcomes.
7.3 OS Responsibility for Synonym Tracking
A third case study discusses maintaining coherence among the status bits in a synonym set. Our SandyBridge hardware does not guarantee that dirty bit update ghost instructions will also update all synonym pages [20]; the OS is responsible for identifying and updating dirty bits for any synonym PT entries. This can lead to a scenario in which data may be lost if this coordination is not implemented correctly.
Suppose x and y are synonyms mapped to PA a whose PTEs are both marked as clean. When a store is done to x, hardware will mark its PTE as dirty. Suppose the OS intends to swap out the physical page holding PA a and hence needs to check if the page is dirty. If it does so by only checking the PTE for y (and not checking the PTE for its synonym x), then a naive OS may incorrectly think the physical page is clean. We have implemented the model for this scenario (\\( \mu h b \) graph not shown due to space reasons) and while the analysis accounts for all known happens-before orderings, an acyclic \( \mu h b \) graph can be found, indicating the event may be observable.
Fixing this case requires that at page eviction time, a less-naive OS such as Linux checks whether PTEs for all members of a synonym set are marked clean. The ELT would therefore contain an extra load of PTE(x) and a proposed outcome indicating that this load returned a clean PTE. COATCheck would then detect a violation the Sandy-Bridge Reads axiom, which requires that each load return the value of the latest store to that physical address (since the dirty bit update would be ordered before the load), and conclude that the bad outcome is no longer observable.
This example highlights the fact that memory consistency models are broader in scope than memory consistency models, including SC-for-VAMC [38, 39]. Note that in the example, the bug may be observable even when there is no reordering of any kind taking place. In other words, the bug is observable even on a sequentially consistent system. Furthermore, since the two non-ghost instructions access different virtual and physical addresses, and the status bit updates target a different PTE than the non-ghost load uses, the necessary ordering requirement (i.e., to check the state of the synonym pages) is also outside the scope of VAMC. Hence, there may be a translation-related memory ordering bug even on a SC-for-VAMC system. COATCheck is the first MCM analysis framework which can capture and reason about this additional level of indirection, and it can do so regardless of which layer of Figure 2 is used to solve it.
8. Automated Verification Software
8.1 Test Characteristics
We have performed tests of COATCheck on a wide-ranging set of 118 litmus tests. We include a number of tests from Intel and AMD manuals and other prior work to sanity check that our SandyBridge model (Section 6) behaves as expected [7, 17, 32, 45].
Another category of litmus tests are modifications of the “standard” tests above to directly test address translation and OS interface issues. We either derived these from previous work [38, 39] or custom wrote them to test new functionality or scenarios. In some cases these originate as user-level codes and follow the full transformation path shown in Figure 3. In a few cases, we wrote the tests directly as ELTs to expediently achieve the desired test scenario. In general, these litmus tests lead to larger graphs, because they employ more of the ghost instructions and system call modules that lead to test size growth en route to an ELT.
Lastly, we also include the case studies of Section 7 plus numerous variants thereof. In all, this gives us a wide-ranging suite of tests on which we perform our analysis.
8.2 Performance Results
Figure 13 shows the full runtimes for our COATCheck implementation. Performance measurements were taken on a server with a 3.2GHz Intel Xeon E5-2667 v3 CPU. The runtimes shown in this graph are for the path from ELT
Figure 13: Execution times for full litmus test suite, sorted from smallest to largest run time.
through the constraint solver of Section 5. Recall that a cyclic $\mu$hb graph means that the event cannot be observed, and an acyclic $\mu$hb graph means that it is possible to observe. Therefore, when performing cycle checks, the cycle-checker can stop its analysis for a given graph as soon as a single cycle has been found in it. Likewise, when performing cycle checks for a litmus test intended to be forbidden, one can stop the checking for the whole litmus test as soon as a single acyclic graph has been found; one acyclic graph indicates an outcome intended to be forbidden might be observable.
Figure 13 shows the runtimes of the tests, sorted in increasing order. All 118 tests complete in less than 100 seconds, and many are even faster. All tests taking more than 30 seconds were created for this paper to test address translation, IPIs, TLB behavior, and so on. Although these $\mu$hb graphs are the largest for which such enumerate-and-check approaches have been used—they often order of magnitude larger than those from the original PipeCheck work—our runtimes are similar to or less than those of PipeCheck. This shows that even though $\mu$spec is more general than previous work on $\mu$hb graphs [25, 27], the COATCheck constraint solver algorithm and its implementation are orders of magnitude more efficient. This allows COATCheck to be suitable even for interactive verification, thereby dramatically improving ease of use and debugging in practice.
9. Related Work
Computer architects and verification experts have long studied MCMs and in recent years researchers have made significant progress in formalizing MCM specifications and analysis [3, 13, 23, 28, 30, 32, 40]. These efforts have clearly demonstrated that 1) there is a need for test address translation, IPIs, TLB behavior, and so on. Although these $\mu$hb graphs are the largest for which such enumerate-and-check approaches have been used—they often order of magnitude larger than those from the original PipeCheck work—our runtimes are similar to or less than those of PipeCheck. This shows that even though $\mu$spec is more general than previous work on $\mu$hb graphs [25, 27], the COATCheck constraint solver algorithm and its implementation are orders of magnitude more efficient. This allows COATCheck to be suitable even for interactive verification, thereby dramatically improving ease of use and debugging in practice.
10. Conclusions
Memory consistency models have long been difficult to define, implement, and analyze. The need to properly handle hardware-OS interface issues such as address translation only adds new complexity. This paper provides methods and a full, efficient toolflow for automatically specifying and verifying memory ordering at the hardware-OS interface. Through many detailed case studies, our work also pushes beyond existing definitions of “consistency” to the more general notion of “transistency”, because the ordering requirements in some of our tests cannot be expressed by consistency models alone. The COATCheck toolset facilitates further exploration of hardware-OS memory ordering issues, both in support of system verification itself, and also in the context of forward-looking definitions and explorations of consistency and transistivity.
Acknowledgments
We thank Guilherme Cox, Yatin Manerkar, Caroline Trippel, Jan Vesely, and the anonymous reviewers for their helpful feedback. This work was supported in part by C-FAR (under the grant HR0011-13-3-0002), one of the six SRC STARNet Centers, sponsored by MARCO and DARPA, and in part by the National Science Foundation (under grants CCF-1117147 and CCF-1253700).
References
[33] B. Pham, V. Vaidyanathan, A. Jaleel, and A. Bhattacharjee. CoLT: Coalesced large-reach TLBs. In 45th International
[34] B. Pichai, L. Hsu, and A. Bhattacharjee. Architectural support for address translation on GPUs. In 19th International Conference on Architectural Support for Programming Languages and Operating Systems (ASPLOS), 2014.
|
{"Source-Url": "https://www.cs.rutgers.edu/~abhib/dlustig-gsethi-asplos16.pdf", "len_cl100k_base": 13260, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 53029, "total-output-tokens": 16414, "length": "2e13", "weborganizer": {"__label__adult": 0.0005135536193847656, "__label__art_design": 0.0007333755493164062, "__label__crime_law": 0.0004658699035644531, "__label__education_jobs": 0.0006957054138183594, "__label__entertainment": 0.00013387203216552734, "__label__fashion_beauty": 0.0002913475036621094, "__label__finance_business": 0.0004520416259765625, "__label__food_dining": 0.0004930496215820312, "__label__games": 0.0013523101806640625, "__label__hardware": 0.0160980224609375, "__label__health": 0.0006656646728515625, "__label__history": 0.0005602836608886719, "__label__home_hobbies": 0.0002422332763671875, "__label__industrial": 0.00130462646484375, "__label__literature": 0.0003268718719482422, "__label__politics": 0.0004417896270751953, "__label__religion": 0.000789642333984375, "__label__science_tech": 0.297119140625, "__label__social_life": 7.95125961303711e-05, "__label__software": 0.01036834716796875, "__label__software_dev": 0.66455078125, "__label__sports_fitness": 0.0005078315734863281, "__label__transportation": 0.0013341903686523438, "__label__travel": 0.0002932548522949219}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 66685, 0.03026]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 66685, 0.55425]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 66685, 0.8874]], "google_gemma-3-12b-it_contains_pii": [[0, 4166, false], [4166, 9134, null], [9134, 14078, null], [14078, 18430, null], [18430, 21872, null], [21872, 26381, null], [26381, 32072, null], [32072, 36937, null], [36937, 41087, null], [41087, 45854, null], [45854, 49122, null], [49122, 55013, null], [55013, 58915, null], [58915, 64349, null], [64349, 66685, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4166, true], [4166, 9134, null], [9134, 14078, null], [14078, 18430, null], [18430, 21872, null], [21872, 26381, null], [26381, 32072, null], [32072, 36937, null], [36937, 41087, null], [41087, 45854, null], [45854, 49122, null], [49122, 55013, null], [55013, 58915, null], [58915, 64349, null], [64349, 66685, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 66685, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 66685, null]], "pdf_page_numbers": [[0, 4166, 1], [4166, 9134, 2], [9134, 14078, 3], [14078, 18430, 4], [18430, 21872, 5], [21872, 26381, 6], [26381, 32072, 7], [32072, 36937, 8], [36937, 41087, 9], [41087, 45854, 10], [45854, 49122, 11], [49122, 55013, 12], [55013, 58915, 13], [58915, 64349, 14], [64349, 66685, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 66685, 0.14041]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
3155ebddfddb2a224b60535b2d327ff4becae1e4
|
[REMOVED]
|
{"Source-Url": "https://hal-emse.ccsd.cnrs.fr/emse-01372223/file/WISTP-2016-preprint.pdf", "len_cl100k_base": 9182, "olmocr-version": "0.1.53", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 39435, "total-output-tokens": 11071, "length": "2e13", "weborganizer": {"__label__adult": 0.0006561279296875, "__label__art_design": 0.0004148483276367187, "__label__crime_law": 0.0010862350463867188, "__label__education_jobs": 0.0003216266632080078, "__label__entertainment": 7.796287536621094e-05, "__label__fashion_beauty": 0.0002416372299194336, "__label__finance_business": 0.00028634071350097656, "__label__food_dining": 0.0004935264587402344, "__label__games": 0.0009870529174804688, "__label__hardware": 0.005916595458984375, "__label__health": 0.0008931159973144531, "__label__history": 0.00034356117248535156, "__label__home_hobbies": 0.00017213821411132812, "__label__industrial": 0.0008893013000488281, "__label__literature": 0.00022327899932861328, "__label__politics": 0.0004897117614746094, "__label__religion": 0.0006985664367675781, "__label__science_tech": 0.061859130859375, "__label__social_life": 7.909536361694336e-05, "__label__software": 0.00606536865234375, "__label__software_dev": 0.916015625, "__label__sports_fitness": 0.0005121231079101562, "__label__transportation": 0.001094818115234375, "__label__travel": 0.0002701282501220703}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 44055, 0.04976]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 44055, 0.36768]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 44055, 0.85146]], "google_gemma-3-12b-it_contains_pii": [[0, 1163, false], [1163, 3904, null], [3904, 7291, null], [7291, 10355, null], [10355, 12848, null], [12848, 14931, null], [14931, 18330, null], [18330, 20829, null], [20829, 23822, null], [23822, 26637, null], [26637, 28492, null], [28492, 30040, null], [30040, 31865, null], [31865, 34676, null], [34676, 37813, null], [37813, 41038, null], [41038, 44055, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1163, true], [1163, 3904, null], [3904, 7291, null], [7291, 10355, null], [10355, 12848, null], [12848, 14931, null], [14931, 18330, null], [18330, 20829, null], [20829, 23822, null], [23822, 26637, null], [26637, 28492, null], [28492, 30040, null], [30040, 31865, null], [31865, 34676, null], [34676, 37813, null], [37813, 41038, null], [41038, 44055, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 44055, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 44055, null]], "pdf_page_numbers": [[0, 1163, 1], [1163, 3904, 2], [3904, 7291, 3], [7291, 10355, 4], [10355, 12848, 5], [12848, 14931, 6], [14931, 18330, 7], [18330, 20829, 8], [20829, 23822, 9], [23822, 26637, 10], [26637, 28492, 11], [28492, 30040, 12], [30040, 31865, 13], [31865, 34676, 14], [34676, 37813, 15], [37813, 41038, 16], [41038, 44055, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 44055, 0.10471]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
85643d076b50f3479f42ba9ef81678ef95b0f8ce
|
Package ‘genefilter’
January 9, 2024
Title genefilter: methods for filtering genes from high-throughput experiments
Version 1.84.0
Description Some basic functions for filtering genes.
Suggests class, hgu95av2.db, tkWidgets, ALL, ROC, RColorBrewer, BiocStyle, knitr
Imports MatrixGenerics (>= 1.11.1), AnnotationDbi, annotate, Biobase, graphics, methods, stats, survival, grDevices
License Artistic-2.0
LazyLoad yes
LazyData yes
biocViews Microarray
VignetteBuilder knitr
git_url https://git.bioconductor.org/packages/genefilter
git_branch RELEASE_3_18
git_last_commit 94eca03
git_last_commit_date 2023-10-24
Repository Bioconductor 3.18
Date/Publication 2024-01-09
Author Robert Gentleman [aut], Vincent J. Carey [aut], Wolfgang Huber [aut], Florian Hahne [aut], Emmanuel Taiwo [ctb] (howtogenefilter vignette translation from Sweave to RMarkdown / HTML.), Khadijah Amusat [ctb] (Converted genefilter vignette from Sweave to RMarkdown / HTML.), Bioconductor Package Maintainer [cre]
**Description**
Anova returns a function of one argument with bindings for `cov` and `p`. The function, when evaluated, performs an ANOVA using `cov` as the covariate. It returns `TRUE` if the p value for a difference in means is less than `p`.
**Usage**
```r
Anova(cov, p=0.05, na.rm=TRUE)
```
**Arguments**
- **cov**: The covariate. It must have length equal to the number of columns of the array that `Anova` will be applied to.
- **p**: The p-value for the test.
- **na.rm**: If set to `TRUE` any `NA`'s will be removed.
**Details**
The function returned by `Anova` uses `lm` to fit a linear model of the form `lm(x ~ cov)`, where `x` is the set of gene expressions. The F statistic for an overall effect is computed and if it has a `p`-value less than `p` the function returns `TRUE`, otherwise it returns `FALSE` for that gene.
**Value**
`Anova` returns a function with bindings for `cov` and `p` that will perform a one-way ANOVA.
The covariate can be continuous, in which case the test is for a linear effect for the covariate.
**Author(s)**
R. Gentleman
**See Also**
`kOverA`, `lm`
**Examples**
```r
set.seed(123)
af <- Anova(c(rep(1,5),rep(2,5)), .01)
af(rnorm(10))
```
---
**coxfilter**
*A filter function for univariate Cox regression.*
**Description**
A function that performs Cox regression with bindings for `surt`, `cens`, and `p` is returned. This function filters genes according to the attained p-value from a Cox regression using `surt` as the survival times, and `cens` as the censoring indicator. It requires `survival`.
**Usage**
```r
coxfilter(surt, cens, p)
```
Arguments
surt Survival times.
cens Censoring indicator.
p The p-value to use in filtering.
Value
Calls to the `coxph` function in the `survival` library are used to fit a Cox model. The filter function returns `TRUE` if the p-value in the fit is less than `p`.
Author(s)
R. Gentleman
See Also
`Anova`
Examples
```r
set.seed(-5)
sfun <- coxfilter(rexp(10), ifelse(runif(10) < .7, 1, 0), .05)
ffun <- filterfun(sfun)
dat <- matrix(rnorm(1000), ncol=10)
out <- genefilter(dat, ffun)
```
---
`cv`
*Arguments*
A filter function for the coefficient of variation.
**Description**
`cv` returns a function with values for `a` and `b` bound. This function takes a single argument. It computes the coefficient of variation for the input vector and returns `TRUE` if the coefficient of variation is between `a` and `b`. Otherwise it returns `FALSE`.
**Usage**
`cv(a=1, b=Inf, na.rm=TRUE)`
**Arguments**
- `a` The lower bound for the cv.
- `b` The upper bound for the cv.
- `na.rm` If set to `TRUE` any NA’s will be removed.
**Details**
The coefficient of variation is the standard deviation divided by the absolute value of the mean.
Value
It returns a function of one argument. The function has an environment with bindings for \( a \) and \( b \).
Author(s)
R. Gentleman
See Also
pOverA, kOverA
Examples
```r
set.seed(-3)
cvfun <- cv(1, 10)
cvfun(rnorm(10, 10))
cvfun(rnorm(10))
```
dist2
Calculate an \( n \)-by-\( n \) matrix by applying a function to all pairs of columns of an \( m \)-by-\( n \) matrix.
Description
Calculate an \( n \)-by-\( n \) matrix by applying a function to all pairs of columns of an \( m \)-by-\( n \) matrix.
Usage
```r
dist2(x, fun, diagonal=0)
```
Arguments
- \( x \): A matrix.
- \( \text{fun} \): A symmetric function of two arguments that may be columns of \( x \).
- \( \text{diagonal} \): The value to be used for the diagonal elements of the resulting matrix.
Details
With the default value of \( \text{fun} \), this function calculates for each pair of columns of \( x \) the mean of the absolute values of their differences (which is proportional to the L1-norm of their difference). This is a distance metric.
The implementation assumes that \( \text{fun}(x[,i], x[,j]) \) can be evaluated for all pairs of \( i \) and \( j \) (see examples), and that \( \text{fun} \) is symmetric, i.e. \( \text{fun}(a, b) = \text{fun}(b, a) \). \( \text{fun}(a, a) \) is not actually evaluated, instead the value of \( \text{diagonal} \) is used to fill the diagonal elements of the returned matrix.
Note that \textit{dist} computes distances between rows of \( x \), while this function computes relations between columns of \( x \) (see examples).
Value
A symmetric matrix of size $n \times n$.
Author(s)
Wolfgang Huber, James Reid
Examples
```r
# example matrix
z = matrix(1:15693, ncol=3)
matL1 = dist2(z)
matL2 = dist2(z, fun=function(a,b) sqrt(sum((a-b)^2, na.rm=TRUE)))
euc = as.matrix(dist(t(z)));
stopifnot(identical(dim(matL2), dim(euc)),
all(euc==matL2))
```
---
**eSetFilter**
* A function to filter an *eSet* object
**Description**
Given a Bioconductor’s ExpressionSet object, this function filters genes using a set of selected filters.
**Usage**
```r
eSetFilter(eSet)
getFilterNames()
getFuncDesc(lib = "genefilter", funcs = getFilterNames())
getRdAsText(lib)
parseDesc(text)
parseArgs(text)
showESet(eSet)
setESetArgs(filter)
isESet(eSet)
```
**Arguments**
- **eSet**: an ExpressionSet object
- **lib**: a character string for the name of an R library where functions of interests reside
- **funcs**: a vector of character strings for names of functions of interest
- **text**: a character of string from a file (e. g. description, argument, ..) filed of an Rd file for a function
- **filter**: a character string for the name of a filter function
filtered_p
Details
These functions are deprecated. Please use the 'iSee' package instead.
A set of filters may be selected to filter genes in through each of the filters in the order the filters have been selected
Value
A logical vector of length equal to the number of rows of 'expr'. The values in that vector indicate whether the corresponding row of 'expr' passed the set of filter functions.
Author(s)
Jianhua Zhang
See Also
genefilter
Examples
if( interactive() ) {
data(sample.ExpressionSet)
res <- eSetFilter(sample.ExpressionSet)
}
filtered_p
Compute and adjust p-values, with filtering
Description
Given filter and test statistics in the form of unadjusted p-values, or functions able to compute these statistics from the data, filter and then correct the p-values across a range of filtering stringencies.
Usage
filtered_p(filter, test, theta, data, method = "none")
filtered_R(alpha, filter, test, theta, data, method = "none")
Arguments
alpha A cutoff to which p-values, possibly adjusted for multiple testing, will be compared.
filter A vector of stage-one filter statistics, or a function which is able to compute this vector from data, if data is supplied.
test A vector of unadjusted p-values, or a function which is able to compute this vector from the filtered portion of data, if data is supplied. The option to supply a function is useful when the value of the test statistic depends on which hypotheses are filtered out at stage one. (The limma t-statistic is an example.)
theta A vector with one or more filtering fractions to consider. Actual cutoffs are then computed internally by applying \texttt{quantile} to the filter statistics contained in (or produced by) the filter argument.
data If filter and/or test are functions rather than vectors of statistics, they will be applied to data. The functions will be passed the whole data object, and must work over rows, etc. themselves as appropriate.
method The unadjusted p-values contained in (or produced by) test will be adjusted for multiple testing after filtering, using the \texttt{p.adjust} function in the \texttt{stats} package. See the method argument there for options.
\texttt{p}
Value
For \texttt{filtered\_p}, a matrix of p-values, possible adjusted for multiple testing, with one row per null hypothesis and one column per filtering fraction given in theta. For a given column, entries which have been filtered out are \texttt{NA}.
For \texttt{filtered\_R}, a count of the entries in the \texttt{filtered\_p} result which are less than \texttt{alpha}.
Author(s)
Richard Bourgon <bourgon@ebi.ac.uk>
See Also
See \texttt{rejection\_plot} for visualization of \texttt{filtered\_p} results.
Examples
# See the vignette: Diagnostic plots for independent filtering
\begin{verbatim}
fILTERFUN
arg_def filterfun
\begin{verbatim}
filterfun(...) Creates a first FALSE exiting function from the list of filter functions it is given.
\end{verbatim}
Description
This function creates a function that takes a single argument. The filtering functions are bound in the environment of the returned function and are applied sequentially to the argument of the returned function. When the first filter function evaluates to FALSE the function returns FALSE otherwise it returns TRUE.
Usage
\texttt{filterfun(...)}
Arguments
\begin{verbatim}
... Filtering functions.
\end{verbatim}
Value
filterfun returns a function that takes a single argument. It binds the filter functions given to it in the environment of the returned function. These functions are applied sequentially (in the order they were given to filterfun). The function returns FALSE (and exits) when the first filter function returns FALSE otherwise it returns TRUE.
Author(s)
R. Gentleman
See Also
genefilter
Examples
set.seed(333)
x <- matrix(rnorm(100,2,1),nc=10)
cvfun <- cv(.5,2.5)
ffun <- filterfun(cvfun)
which <- genefilter(x, ffun)
---
filter_volcano
Volcano plot for overall variance filtering
Description
Generate a volcano plot contrasting p-value with fold change (on the log scale), in order to visualize the effect of filtering on overall variance and also assign significance via p-value.
Usage
filter_volcano(
d, p, S,
n1, n2,
alpha, S_cutoff,
cex = 0.5, pch = 19,
xlab = expression(paste(log[2], " fold change")),
ylab = expression(paste("-", log[10], " p")),
cols = c("grey80", "grey50", "black"),
ltys = c(1, 3),
use_legend = TRUE,
... )
findLargest
Arguments
d Fold changes, typically on the log scale, base 2.
p The p-values
S The overall standard deviation filter statistics, i.e., the square roots of the overall variance filter statistics.
n1 Sample size for group 1.
n2 Sample size for group 2.
alpha Significance cutoff used for p-values.
S_cutoff Filter cutoff used for the overall standard deviation in S.
cex Point size for plotting.
pch Point character for plotting.
xlab Label for x-axis.
ylab Label for y-axis.
cols A vector of three colors used for plotting. These correspond to filtered data, data which pass the filter but are insignificant, and data pass the filter and are also statistically significant.
lty The induced bound on log-scale fold change is plotted, as is the significance cutoff for data passing the filter. The lty argument gives line styles for these drawing these two thresholds on the plot.
use_legend Should a legend for point color be produced?
... Other arguments for plot.
Author(s)
Richard Bourgon <bourgon@ebi.ac.uk>
Examples
# See the vignette: Diagnostic plots for independent filtering
findLargest(gN, testStat, data = "hgu133plus2")
Description
Most microarrays have multiple probes per gene (Entrez). This function finds all replicates, and then selects the one with the largest value of the test statistic.
Usage
findLargest(gN, testStat, data = "hgu133plus2")
**Arguments**
- **gN**: A vector of probe identifiers for the chip.
- **testStat**: A vector of test statistics, of the same length as gN with the per probe test statistics.
- **data**: The character string identifying the chip.
**Details**
All the probe identifiers, gN, are mapped to Entrez Gene IDs and the duplicates determined. For any set of probes that map to the same Gene ID, the one with the largest test statistic is found. The return vector is the named vector of selected probe identifiers. The names are the Entrez Gene IDs. This could be extended in different ways, such as allowing the user to use a different selection criterion. Also, matching on different identifiers seems like another alternative.
**Value**
A named vector of probe IDs. The names are Entrez Gene IDs.
**Author(s)**
R. Gentleman
**See Also**
- sapply
**Examples**
```r
library("hgu95av2.db")
set.seed(124)
gN <- sample(ls(hgu95av2ENTREZID), 200)
stats <- rnorm(200)
findLargest(gN, stats, "hgu95av2")
```
---
**Description**
The `gapFilter` looks for genes that might usefully discriminate between two groups (possibly unknown at the time of filtering). To do this we look for a gap in the ordered expression values. The gap must come in the central portion (we exclude jumps in the initial Prop values or the final Prop values). Alternatively, if the IQR for the gene is large that will also pass our test and the gene will be selected.
Usage
gapFilter(Gap, IQR, Prop, na.rm=TRUE, neg.rm=TRUE)
Arguments
- **Gap**: The size of the gap required to pass the test.
- **IQR**: The size of the IQR required to pass the test.
- **Prop**: The proportion (or number) of samples to exclude at either end.
- **na.rm**: If TRUE then NA’s will be removed before processing.
- **neg.rm**: If TRUE then negative values in x will be removed before processing.
Details
As stated above we are interested in
Value
A function that returns either TRUE or FALSE depending on whether the vector supplied has a gap larger than Gap or an IQR (inter quartile range) larger than IQR. For computing the gap we want to exclude a proportion, Prop from either end of the sorted values. The reason for this requirement is that genes which differ in expression levels only for a few samples are not likely to be interesting.
Author(s)
R. Gentleman
See Also
ttest, genefilter
Examples
```r
set.seed(256)
x <- c(rnorm(10, 100, 3), rnorm(10, 100, 10))
y <- x + c(rep(0, 10), rep(100, 10))
tmp <- rbind(x, y)
Gfilter <- gapFilter(200, 100, 5)
ffun <- filterfun(Gfilter)
genefilter(tmp, ffun)
```
Description
genefilter filters genes in the array expr using the filter functions in flist. It returns an array of logical values (suitable for subscripting) of the same length as there are rows in expr. For each row of expr the returned value is TRUE if the row passed all the filter functions. Otherwise it is set to FALSE.
Usage
genefilter(expr, flist)
Arguments
eexpr A matrix or ExpressionSet that the filter functions will be applied to.
flist A list of filter functions to apply to the array.
Details
This package uses a very simple but powerful protocol for filtering genes. The user simply constructs any number of tests that they want to apply. A test is simply a function (as constructed using one of the many helper functions in this package) that returns TRUE if the gene of interest passes the test (or filter) and FALSE if the gene of interest fails.
The benefit of this approach is that each test is constructed individually (and can be tested individually). The tests are then applied sequentially to each gene. The function returns a logical vector indicating whether the gene passed all tests functions or failed at least one of them.
Users can construct their own filters. These filters should accept a vector of values, corresponding to a row of the expr object. The user defined function should return a length 1 logical vector, with value TRUE or FALSE. User-defined functions can be combined with filterfun, just as built-in filters.
Value
A logical vector of length equal to the number of rows of expr. The values in that vector indicate whether the corresponding row of expr passed the set of filter functions.
Author(s)
R. Gentleman
See Also
genefilter, kOverA
Examples
```r
set.seed(-1)
f1 <- kOverA(5, 10)
flist <- filterfun(f1)
exprA <- matrix(rnorm(1000, 10), ncol = 10)
ans <- genefilter(exprA, flist)
```
Description
These functions are provided for compatibility with older versions of ‘genefilter’ only, and will be defunct at the next release.
Details
The following functions are deprecated and will be made defunct; use the replacement indicated below:
- eSetFilter
- getFilterNames
- getFuncDesc
- getRdAsText
- parseDesc
- parseArgs
- showESet
- setESetArgs
- isESet
Description
Finds genes that have similar patterns of expression.
Usage
```r
genefinder(X, ilist, numResults=25, scale="none", weights, method="euclidean")
```
Arguments
- **X**: A numeric matrix where columns represent patients and rows represent genes.
- **ilist**: A vector of genes of interest. Contains indices of genes in matrix X.
- **numResults**: Number of results to display, starting from the least distance to the greatest.
- **scale**: One of "none", "range", or "zscore". Scaling is carried out separately on each row.
- **weights**: A vector of weights applied across the columns of X. If no weights are supplied, no weights are applied.
- **method**: One of "euclidean", "maximum", "manhattan", "canberra", "correlation", "binary".
Details
If the scale option is "range", then the input matrix is scaled using genescale(). If it is "zscore", then the input matrix is scaled using the scale builtin with no arguments.
The method option specifies the metric used for gene comparisons. The metric is applied, row by row, for each gene specified in ilist.
The "correlation" option for the distance method will return a value equal to 1-correlation(x).
See **dist** for a more detailed description of the distances.
Value
The returned value is a list containing an entry for each gene specified in ilist. Each list entry contains an array of distances for that gene of interest.
Author(s)
J. Gentry and M. Kajen
See Also
- **genescale**
Examples
```
set.seed(12345)
# create some fake expression profiles
m1 <- matrix(1:12, 4, 3)
v1 <- 1
nr <- 2
# find the 2 rows of m1 that are closest to row 1
gefinder(m1, v1, nr, method="euc")
v2 <- c(1,3)
gefinder(m1, v2, nr)
```
genescale (m1, v2, nr, scale="range")
genescale (m1, v2, nr, method="manhattan")
m2 <- matrix (rnorm(100), 10, 10)
v3 <- c(2, 5, 6, 8)
nr2 <- 6
genescale (m2, v3, nr2, scale="zscore")
genescale Scales a matrix or vector.
Description
genescale returns a scaled version of the input matrix m by applying the following formula to each column of the matrix:
\[ y[i] = (x[i] - \min(x))/(\max(x) - \min(x)) \]
Usage
genescale(m, axis=2, method=c("Z", "R"), na.rm=TRUE)
Arguments
- **m**: Input a matrix or a vector with numeric elements.
- **axis**: An integer indicating which axis of m to scale.
- **method**: Either "Z" or "R", indicating whether a Z scaling or a range scaling should be performed.
- **na.rm**: A boolean indicating whether NA's should be removed.
Details
Either the rows or columns of m are scaled. This is done either by subtracting the mean and dividing by the standard deviation ("Z") or by subtracting the minimum and dividing by the range.
Value
A scaled version of the input. If m is a matrix or a dataframe then the dimensions of the returned value agree with that of m, in both cases the returned value is a matrix.
Author(s)
R. Gentleman
half.range.mode
See Also
genefinder, scale
Examples
m <- matrix(1:12, 4, 3)
genescale(m)
Description
For data assumed to be drawn from a unimodal, continuous distribution, the mode is estimated by the “half-range” method. Bootstrap resampling for variance reduction may optionally be used.
Usage
half.range.mode(data, B, B.sample, beta = 0.5, diag = FALSE)
Arguments
data A numeric vector of data from which to estimate the mode.
B Optionally, the number of bootstrap resampling rounds to use. Note that B = 1 resamples 1 time, whereas omitting B uses data as is, without resampling.
B.sample If bootstrap resampling is requested, the size of the bootstrap samples drawn from data. Default is to use a sample which is the same size as data. For large data sets, this may be slow and unnecessary.
beta The fraction of the remaining range to use at each iteration.
diag Print extensive diagnostics. For internal testing only... best left FALSE.
Details
Briefly, the mode estimator is computed by iteratively identifying densest half ranges. (Other fractions of the current range can be requested by setting beta to something other than 0.5.) A densest half range is an interval whose width equals half the current range, and which contains the maximal number of observations. The subset of observations falling in the selected densest half range is then used to compute a new range, and the procedure is iterated. See the references for details.
If bootstrapping is requested, B half-range mode estimates are computed for B bootstrap samples, and their average is returned as the final estimate.
Value
The mode estimate.
Author(s)
Richard Bourgon <bourgon@stat.berkeley.edu>
References
See Also
`shorth`
Examples
```r
## A single normal-mixture data set
x <- c( rnorm(10000), rnorm(2000, mean = 3) )
M <- half.range.mode( x )
M.bs <- half.range.mode( x, B = 100 )
if(interactive()){
hist( x, breaks = 40 )
abline( v = c( M, M.bs ), col = "red", lty = 1:2 )
legend( 1.5, par("usr")[4],
c( "Half-range mode", "With bootstrapping (B = 100)" ),
lwd = 1, lty = 1:2, cex = .8, col = "red"
)
}
# Sampling distribution, with and without bootstrapping
X <- rbind(
matrix( rnorm(1000 * 100), ncol = 100 ),
matrix( rnorm(200 * 100, mean = 3), ncol = 100 )
)
M.list <- list(
Simple = apply( X, 2, half.range.mode ),
BS = apply( X, 2, half.range.mode, B = 100 )
)
if(interactive()){
boxplot( M.list, main = "Effect of bootstrapping" )
abline( h = 0, col = "red" )
}
```
kappa_p
Compute proportionality constant for fold change bound.
Description
Filtering on overall variance induces a lower bound on fold change. This bound depends on the significance of the evidence against the null hypothesis, an is a multiple of the cutoff used for an overall variance filter. It also depends on sample size in both of the groups being compared. These functions compute the multiplier for the supplied p-values or t-statistics.
Usage
kappa_p(p, n1, n2 = n1)
kappa_t(t, n1, n2 = n1)
Arguments
- **p**: The p-values at which to compute the multiplier.
- **t**: The t-statistics at which to compute the multiplier.
- **n1**: Sample size for class 1.
- **n2**: Sample size for class 2.
Value
A vector of multipliers: one per p-value or t-static in p or t.
Author(s)
Richard Bourgon <bourgon@ebi.ac.uk>
Examples
# See the vignette: Diagnostic plots for independent filtering
k0verA
A filter function for k elements larger than A.
Description
k0verA returns a filter function with bindings for k and A. This function evaluates to TRUE if at least k of the arguments elements are larger than A.
Usage
k0verA(k, A=100, na.rm=TRUE)
Arguments
A The value you want to exceed.
k The number of elements that have to exceed A.
na.rm If set to TRUE any NA’s will be removed.
Value
A function with bindings for A and k.
Author(s)
R. Gentleman
See Also
p0verA
Examples
fg <- kOverA(5, 100)
fg(90:100)
fg(98:110)
Description
maxA returns a function with the parameter A bound. The returned function evaluates to TRUE if any element of its argument is larger than A.
Usage
maxA(A=75, na.rm=TRUE)
Arguments
A The value that at least one element must exceed.
na.rm If TRUE then NA’s are removed.
Value
maxA returns a function with an environment containing a binding for A.
Author(s)
R. Gentleman
nsFilter
See Also
pOverA
Examples
```r
ff <- maxA(30)
ff(1:10)
ff(28:31)
```
---
Filtering of Features in an ExpressionSet
Description
The function `nsFilter` tries to provide a one-stop shop for different options of filtering (removing) features from an ExpressionSet. Filtering features exhibiting little variation, or a consistently low signal, across samples can be advantageous for the subsequent data analysis (Bourgon et al.). Furthermore, one may decide that there is little value in considering features with insufficient annotation.
Usage
```r
nsFilter(eset, require.entrez=TRUE,
require.GOBP=FALSE, require.GOCC=FALSE,
require.GOMF=FALSE, require.CytoBand=FALSE,
remove.dupEntrez=TRUE, var.func=IQR,
var.cutoff=0.5, var.filter=TRUE,
filterByQuantile=TRUE, feature.exclude="^AFFX", ...)
```
```r
varFilter(eset, var.func=IQR, var.cutoff=0.5, filterByQuantile=TRUE)
```
```r
featureFilter(eset, require.entrez=TRUE,
require.GOBP=FALSE, require.GOCC=FALSE,
require.GOMF=FALSE, require.CytoBand=FALSE,
remove.dupEntrez=TRUE, feature.exclude="^AFFX")
```
Arguments
- `eset` : an ExpressionSet object
- `var.func` : The function used as the per-feature filtering statistic. This function should return a numeric vector of length one when given a numeric vector as input.
- `var.filter` : A logical indicating whether to perform filtering based on `var.func`.
- `filterByQuantile` : A logical indicating whether `var.cutoff` is to be interpreted as a quantile of all `var.func` values (the default), or as an absolute value.
var.cutoff A numeric value. If var.filter is TRUE, features whose value of var.func is
less than either: the var.cutoff-quantile of all var.func values (if filterByQuantile
is TRUE), or var.cutoff (if filterByQuantile is FALSE) will be removed.
require.entrez If TRUE, filter out features without an Entrez Gene ID annotation. If using an
annotation package where an identifier system other than Entrez Gene IDs is
used as the central ID, then that ID will be required instead.
require.GOBO, require.GOC, require.GOMF
If TRUE, filter out features whose target genes are not annotated to at least one
GO term in the BP, CC or MF ontology, respectively.
require.CytoBand
If TRUE, filter out features whose target genes have no mapping to cytoband
locations.
remove.dupEntrez
If TRUE and there are features mapping to the same Entrez Gene ID (or equiva-
lent), then the feature with the largest value of var.func will be retained and
the other(s) removed.
feature.exclude
A character vector of regular expressions. Feature identifiers (i.e. value of
featureNames(eset)) that match one of the specified patterns will be filtered
out. The default value is intended to filter out Affymetrix quality control probe
sets.
... Unused, but available for specializing methods.
Details
In this Section, the effect of filtering on the type I error rate estimation / control of subsequent
hypothesis testing is explained. See also the paper by Bourgon et al.
Marginal type I errors: Filtering on the basis of a statistic which is independent of the test statistic
used for detecting differential gene expression can increase the detection rate at the same marginal
type I error. This is clearly the case for filter criteria that do not depend on the data, such as
the annotation based criteria provided by the nsFilter and featureFilter functions. However,
marginal type I error can also be controlled for certain types of data-dependent criteria. Call \( U_I \)
the stage 1 filter statistic, which is a function that is applied feature by feature, based on whose value
the feature is or is not accepted to pass to stage 2, and which depends only on the data for that
feature and not any other feature, and call \( U_{II} \) the stage 2 test statistic for differential expression.
Sufficient conditions for marginal type-I error control are:
- \( U_I \) the overall (across all samples) variance or mean, \( U_{II} \) the t-statistic (or any other scale
and location invariant statistic), data normal distributed and exchangeable across samples.
- \( U_I \) the overall mean, \( U_{II} \) the moderated t-statistic (as in limma’s eBayes function), data
normal distributed and exchangeable.
- \( U_I \) a sample-class label independent function (e.g. overall mean, median, variance, IQR), \( U_{II} \)
the Wilcoxon rank sum statistic, data exchangeable.
Experiment-wide type I error: Marginal type-I error control provided by the conditions above is
sufficient for control of the family wise error rate (FWER). Note, however, that common false dis-
ccovery rate (FDR) methods depend not only on the marginal behaviour of the test statistics under the
null hypothesis, but also on their joint distribution. The joint distribution can be affected by filtering, even when this filtering leaves the marginal distributions of true-null test statistics unchanged. Filtering might, for example, change correlation structure. The effect of this is negligible in many cases in practice, but this depends on the dataset and the filter used, and the assessment is in the responsibility of the data analyst.
Annotation Based Filtering Arguments require.entrez, require.GOBP, require.GOCC, require.GOMF and require.CytoBand filter based on available annotation data. The annotation package is determined by calling annotation(eset).
Variance Based Filtering The var.filter, var.func, var.cutoff and varByQuantile arguments control numerical cutoff-based filtering. Probes for which var.func returns NA are removed. The default var.func is IQR, which we here define as rowQ(eset, ceiling(0.75 * ncol(eset))) - rowQ(eset, floor(0.25 * ncol(eset))); this choice is motivated by the observation that unexpressed genes are detected most reliably through low variability of their features across samples. Additionally, IQR is robust to outliers (see note below). The default var.cutoff is 0.5 and is motivated by a rule of thumb that in many tissues only 40% of genes are expressed. Please adapt this value to your data and question.
By default the numerical-filter cutoff is interpreted as a quantile, so with the default settings, 50% of the genes are filtered.
Variance filtering is performed last, so that (if varByQuantile=TRUE and remove.dupEntrez=TRUE) the final number of genes does indeed exclude precisely the var.cutoff fraction of unique genes remaining after all other filters were passed.
The stand-alone function varFilter does only var.func-based filtering (and no annotation based filtering). featureFilter does only annotation based filtering and duplicate removal; it always performs duplicate removal to retain the highest-IQR probe for each gene.
Value
For nsFilter a list consisting of:
- eset the filtered ExpressionSet
- filter.log a list giving details of how many probe sets were removed for each filtering step performed.
For both varFilter and featureFilter the filtered ExpressionSet.
Note
IQR is a reasonable variance-filter choice when the dataset is split into two roughly equal and relatively homogeneous phenotype groups. If your dataset has important groups smaller than 25% of the overall sample size, or if you are interested in unusual individual-level patterns, then IQR may not be sensitive enough for your needs. In such cases, you should consider using less robust and more sensitive measures of variance (the simplest of which would be sd).
Author(s)
Seth Falcon (somewhat revised by Assaf Oron)
References
Examples
```r
library("hgu95av2.db")
library("Biobase")
data(sample.ExpressionSet)
ans <- nsFilter(sample.ExpressionSet)
ans$eset
ans$filter.log
## skip variance-based filtering
ans <- nsFilter(sample.ExpressionSet, var.filter=FALSE)
a1 <- varFilter(sample.ExpressionSet)
a2 <- featureFilter(sample.ExpressionSet)
```
rejection_plot
See Also
cv
Examples
```r
ff <- pOverA(p = .1, 10)
ff(1:20)
ff(1:5)
```
---
**rejection_plot**
Plot rejections vs. p-value cutoff
**Description**
Plot the number, or fraction, of null hypotheses rejected as a function of the p-value cutoff. Multiple sets of p-values are accepted, in a list or in the columns of a matrix, in order to permit comparisons.
**Usage**
```r
rejection_plot(p,
col, lty = 1, lwd = 1,
xlab = "p cutoff", ylab = "number of rejections",
xlim = c(0, 1), ylim,
legend = names(p),
at = c("all", "sample"),
n_at = 100,
probability = FALSE,
...)
```
**Arguments**
- **p**
- The p-values to be used for plotting. These may be in the columns of a matrix, or in the elements of a list. One curve will be generated for each column/element, and all NA entries will be dropped. If column or element names are supplied, they are used by default for a plot legend.
- **col**
- Colors to be used for each curve plotted. Recycled if necessary. If `col` is omitted, `rainbow` is used to generate a set of colors.
- **lty**
- Line styles to be used for each curve plotted. Recycled if necessary.
- **lwd**
- Line widths to be used for each curve plotted. Recycled if necessary.
- **xlab**
- X-axis text label.
- **ylab**
- Y-axis text label.
- **xlim**
- X-axis limits.
- **ylim**
- Y-axis limits.
legend
Text for legend. Matrix column names or list element names (see p above) are used by default. If NULL, no legend is plotted.
at
Should step functions be plotted with a step at every value in p, or should linear interpolation be used at a sample of points spanning xlim? The latter looks when there are many p-values.
n_at
When at = "sample" is given, how many sample points should be used for interpolation and plotting?
probability
Should the fraction of null hypotheses rejected be reported instead of the count? See the probability argument to hist.
...
Other arguments to pass to the plot call which sets up the axes. Note that the ... argument will not be passed to the lines calls which actually generate the curves.
Value
A list of the step functions used for plotting is returned invisibly.
Author(s)
Richard Bourgon <bourgon@ebi.ac.uk>
Examples
# See the vignette: Diagnostic plots for independent filtering
rowFtests t-tests and F-tests for rows or columns of a matrix
Description
t-tests and F-tests for rows or columns of a matrix, intended to be speed efficient.
Usage
rowttests(x, fac, tstatOnly = FALSE, na.rm = FALSE)
colttests(x, fac, tstatOnly = FALSE, na.rm = FALSE)
fastT(x, ig1, ig2, var.equal = TRUE)
rowFtests(x, fac, var.equal = TRUE)
colFtests(x, fac, var.equal = TRUE)
Arguments
x Numeric matrix. The matrix must not contain NA values. For rowttests and colttests, x can also be an ExpressionSet.
fac Factor which codes the grouping to be tested. There must be 1 or 2 groups for the t-tests (corresponding to one- and two-sample t-test), and 2 or more for the F-tests. If fac is missing, this is taken as a one-group test (i.e. is only allowed for the t-tests). The length of the factor needs to correspond to the sample size: for the row* functions, the length of the factor must be the same as the number of columns of x, for the col* functions, it must be the same as the number of rows of x.
If x is an ExpressionSet, then fac may also be a character vector of length 1 with the name of a covariate in x.
tstatOnly A logical variable indicating whether to calculate p-values from the t-distribution with appropriate degrees of freedom. If TRUE, just the t-statistics are returned. This can be considerably faster.
na.rm A logical variable indicating whether to remove NA values prior to calculation test statistics.
ig1 The indices of the columns of x that correspond to group 1.
ig2 The indices of the columns of x that correspond to group 2.
var.equal A logical variable indicating whether to treat the variances in the samples as equal. If 'TRUE', a simple F test for the equality of means in a one-way analysis of variance is performed. If 'FALSE', an approximate method of Welch (1951) is used, which generalizes the commonly known 2-sample Welch test to the case of arbitrarily many samples.
Details
If fac is specified, rowttests performs for each row of x a two-sided, two-class t-test with equal variances. fac must be a factor of length ncol(x) with two levels, corresponding to the two groups. The sign of the resulting t-statistic corresponds to "group 1 minus group 2". If fac is missing, rowttests performs for each row of x a two-sided one-class t-test against the null hypothesis 'mean=0'.
rowttests and colttests are implemented in C and should be reasonably fast and memory-efficient. fastT is an alternative implementation, in Fortran, possibly useful for certain legacy code. rowFtests and colFtests are currently implemented using matrix algebra in R. Compared to the rowttests and colttests functions, they are slower and use more memory.
Value
A data.frame with columns statistic, p.value (optional in the case of the t-test functions) and dm, the difference of the group means (only in the case of the t-test functions). The row.names of the data.frame are taken from the corresponding dimension names of x.
The degrees of freedom are provided in the attribute df. For the F-tests, if var.equal is 'FALSE', nrow(x)+1 degree of freedoms are given, the first one is the first degree of freedom (it is the same for each row) and the other ones are the second degree of freedom (one for each row).
Author(s)
Wolfgang Huber <whuber@embl.de>
References
See Also
mt.teststat
Examples
```r
##
## example data
##
x = matrix(runif(40), nrow=4, ncol=10)
f2 = factor(floor(runif(ncol(x))*2))
f4 = factor(floor(runif(ncol(x))*4))
##
## one- and two group row t-test; 4-group F-test
##
r1 = rowttests(x)
r2 = rowttests(x, f2)
r4 = rowFtests(x, f4)
##
## approximate equality
about.equal = function(x,y,tol=1e-10)
stopifnot(is.numeric(x), is.numeric(y), length(x)==length(y), all(abs(x-y) < tol))
##
## compare with the implementation in t.test
##
for (j in 1:nrow(x)) {
s1 = t.test(x[j,])
about.equal(s1$statistic, r1$statistic[j])
about.equal(s1$p.value, r1$p.value[j])
s2 = t.test(x[j,] ~ f2, var.equal=TRUE)
about.equal(s2$statistic, r2$statistic[j])
about.equal(s2$p.value, r2$p.value[j])
dm = -diff(tapply(x[j,], f2, mean))
about.equal(dm, r2$dm[j])
s4 = summary(lm(x[j,] ~ f4))
about.equal(s4$fstatistic["value"], r4$statistic[j])}
##
```
## rowpAUCs
### Methods
Rowwise ROC and pAUC computation
#### Description
Methods for fast rowwise computation of ROC curves and (partial) area under the curve (pAUC) using the simple classification rule \( x > \theta \), where \( \theta \) is a value in the range of \( x \).
#### Usage
\[
\text{rowpAUCs}(x, \text{fac}, p=0.1, \text{flip}=\text{TRUE}, \text{caseNames}=c("1", "2"))
\]
#### Arguments
- \( x \): ExpressionSet or numeric matrix. The matrix must not contain NA values.
Rowwise calculation of Receiver Operating Characteristic (ROC) curves and the corresponding partial area under the curve (pAUC) for a given data matrix or ExpressionSet. The function is implemented in C and thus reasonably fast and memory efficient. Cutpoints (theta) are calculated before the first, in between and after the last data value. By default, both classification rules \( x > \theta \) and \( x < \theta \) are tested and the (partial) area under the curve of the better one of the two is returned. This is only valid for symmetric cases, where the classification is independent of the magnitude of \( x \) (e.g., both over- and under-expression of different genes in the same class). For unsymmetric cases in which you expect \( x \) to be consistently higher/lower in of the two classes (e.g. presence or absence of a single biomarker) set flip=FALSE or use the functionality provided in the \textit{ROC} package. For better control over the classification (i.e., the choice of "Disease" and "Control" class in the sense of the Pepe et al paper), argument fac can be an integer in \([0,1]\) where 1 indicates "Disease" and 0 indicates "Control".
**Value**
An object of class \textit{rowROC} with the calculated specificities and sensitivities for each row and the corresponding pAUCs and AUCs values. See \textit{rowROC} for details.
**Methods**
Methods exist for \textit{rowPAUCs}:
- \texttt{signature(x="matrix", fac="factor")}
- \texttt{rowPAUCs} signature(x="matrix", fac="numeric")
- \texttt{rowPAUCs} signature(x="ExpressionSet")
- \texttt{rowPAUCs} signature(x="ExpressionSet", fac="character")
**Author(s)**
Florian Hahne <fhahne@fhcrc.org>
rowpAUCs-methods
References
See Also
rocdemo.sca, pAUC, rowROC
Examples
```r
library(Biobase)
data(sample.ExpressionSet)
r1 = rowttests(sample.ExpressionSet, "sex")
r2 = rowpAUCs(sample.ExpressionSet, "sex", p=0.1)
plot(area(r2, total=TRUE), r1$statistic, pch=16)
sel <- which(area(r2, total=TRUE) > 0.7)
plot(r2[sel])
## this compares performance and output of rowpAUCs to function pAUC in package ROC
if(require(ROC)){
## performance
myRule = function(x)
pAUC(rocdemo.sca(truth = as.integer(sample.ExpressionSet$sex)-1 ,
data = x, rule = dxrule.sca), t0 = 0.1)
nGenes = 200
cat("computation time for ", nGenes, "genes:\n")
cat("function pAUC: ")
print(system.time(r3 <- esApply(sample.ExpressionSet[1:nGenes, ], 1, myRule)))
cat("function rowpAUCs: ")
print(system.time(r2 <- rowpAUCs(sample.ExpressionSet[1:nGenes, ],
"sex", p=1)))
## compare output
myRule2 = function(x)
pAUC(rocdemo.sca(truth = as.integer(sample.ExpressionSet$sex)-1 ,
data = x, rule = dxrule.sca), t0 = 1)
r4 <- esApply(sample.ExpressionSet[1:nGenes, ], 1, myRule2)
plot(r4, area(r2), xlab="function pAUC", ylab="function rowpAUCs",
main="pAUCs")
plot(r4, area(rowpAUCs(sample.ExpressionSet[1:nGenes, ],
"sex", p=1, flip=FALSE)), xlab="function pAUC", ylab="function rowpAUCs",
main="pAUCs")
r4[r4<0.5] <- 1-r4[r4<0.5]
plot(r4, area(r2), xlab="function pAUC", ylab="function rowpAUCs",
main="pAUCs")
}
```
Description
A class to model ROC curves and corresponding area under the curve as produced by rowpAUCs.
Objects from the Class
Objects can be created by calls of the form new("rowROC", ...).
Slots
data: Object of class "matrix" The input data.
ranks: Object of class "matrix" The ranked input data.
sens: Object of class "matrix" Matrix of sensitivity values for each gene at each cutpoint.
spec: Object of class "matrix" Matrix of specificity values for each gene at each cutpoint.
pAUC: Object of class "numeric" The partial area under the curve (integrated from 0 to p).
AUC: Object of class "numeric" The total area under the curve.
factor: Object of class "factor" The factor used for classification.
cutpoints: Object of class "matrix" The values of the cutpoints at which specificity and sensitivity was calculated. (Note: the data is ranked prior to computation of ROC curves, the cutpoints map to the ranked data.
caseNames: Object of class "character" The names of the two classification cases.
p: Object of class "numeric" The limit to which pAUC is integrated.
Methods
show signature(object="rowROC") Print nice info about the object.
[ signature(x="rowROC", j="missing") Subset the object according to rows/genes.
plot signature(x="rowROC", y="missing") Plot the ROC curve of the first row of the object along with the pAUC. To plot the curve for a specific row/gene subsetting should be done first (i.e. plot(rowROC[1])).
pAUC signature(object="rowROC", p="numeric", flip="logical") Integrate area under the curve from 0 to p. This method returns a new rowROC object.
AUC signature(object="rowROC") Integrate total area under the curve. This method returns a new rowROC object.
sens signature(object="rowROC") Accessor method for sensitivity slot.
spec signature(object="rowROC") Accessor method for specificity slot.
area signature(object="rowROC", total="logical") Accessor method for pAUC slot.
**rowSds**
Row variance and standard deviation of a numeric array
---
### Description
Row variance and standard deviation of a numeric array
### Usage
```r
rowVars(x, ...)
rowSds(x, ...)
```
### Arguments
- **x**
- An array of two or more dimensions, containing numeric, complex, integer or logical values, or a numeric data frame.
- **...**
- Further arguments that get passed on to `rowMeans` and `rowSums`.
---
**Author(s)**
Florian Hahne <fhahne@fhcrc.org>
**References**
**See Also**
`rowpAUCs`
**Examples**
```r
library("Biobase")
data("sample.ExpressionSet")
roc <- rowpAUCs(sample.ExpressionSet, "sex", p=0.5)
roc
area(roc[1:3])
if(interactive()) {
par(ask=TRUE)
plot(roc)
plot(1-spec(roc[1]), sens(roc[2]))
par(ask=FALSE)
}
pAUC(roc, 0.1)
roc
```
Details
These are very simple convenience functions, the main work is done in rowMeans and rowSums. See the function definition of rowVars, it is very simple.
Value
A numeric or complex array of suitable size, or a vector if the result is one-dimensional. The ‘dimnames’ (or ‘names’ for a vector result) are taken from the original array.
Author(s)
Wolfgang Huber http://www.ebi.ac.uk/huber
See Also
rowMeans and rowSums
Examples
```r
a = matrix(rnorm(1e4), nrow=10)
rowSds(a)
```
shorth
A location estimator based on the shorth
Description
A location estimator based on the shorth
Usage
shorth(x, na.rm=FALSE, tie.action="mean", tie.limit=0.05)
Arguments
x Numeric
na.rm Logical. If TRUE, then non-finite (according to is.finite) values in x are ignored. Otherwise, presence of non-finite or NA values will lead to an error message.
tie.action Character scalar. See details.
tie.limit Numeric scalar. See details.
Details
The shorth is the shortest interval that covers half of the values in x. This function calculates the mean of the x values that lie in the shorth. This was proposed by Andrews (1972) as a robust estimator of location.
Ties: if there are multiple shortest intervals, the action specified in ties.action is applied. Allowed values are mean (the default), max and min. For mean, the average value is considered; however, an error is generated if the start indices of the different shortest intervals differ by more than the fraction tie.limit of length(x). For min and max, the left-most or right-most, respectively, of the multiple shortest intervals is considered.
Rate of convergence: as an estimator of location of a unimodal distribution, under regularity conditions, the quantity computed here has an asymptotic rate of only \(n^{-1/3}\) and a complicated limiting distribution.
See half.range.mode for an iterative version that refines the estimate iteratively and has a builtin bootstrapping option.
Value
The mean of the x values that lie in the shorth.
Author(s)
Wolfgang Huber http://www.ebi.ac.uk/huber, Ligia Pedroso Bras
References
- G Sawitzki, “The Shorth Plot.” Available at http://lshorth.r-forge.r-project.org/TheShorthPlot.pdf
See Also
half.range.mode
Examples
```r
x = c(rnorm(500), runif(500) * 10)
meth = c("mean", "median", "shorth", "half.range.mode")
est = sapply(meth, function(m) get(m)(x))
if(interactive()) {
colors = 1:4
hist(x, 40, col="orange")
abline(v=est, col=colors, lwd=3, lty=1:2)
legend(5, 100, names(est), col=colors, lwd=3, lty=1:2)
}
```
**tdata**
*A small test dataset of Affymetrix Expression data.*
**Description**
The `tdata` data frame has 500 rows and 26 columns. The columns correspond to samples while the rows correspond to genes. The row names are Affymetrix accession numbers.
**Usage**
```r
data(tdata)
```
**Format**
This data frame contains 26 columns.
**Source**
An unknown data set.
**Examples**
```r
data(tdata)
```
**ttest**
*A filter function for a t.test*
**Description**
`ttest` returns a function of one argument with bindings for `cov` and `p`. The function, when evaluated, performs a t-test using `cov` as the covariate. It returns `TRUE` if the `p` value for a difference in means is less than `p`.
**Usage**
```r
ttest(m, p=0.05, na.rm=TRUE)
```
**Arguments**
- `m`
If `m` is of length one then it is assumed that elements one through `m` of `x` will be one group. Otherwise `m` is presumed to be the same length as `x` and constitutes the groups.
- `p`
The `p`-value for the test.
- `na.rm`
If set to `TRUE` any NA's will be removed.
Details
When the data can be split into two groups (diseased and normal for example) then we often want to select genes on their ability to distinguish those two groups. The t-test is well suited to this and can be used as a filter function.
This helper function creates a t-test (function) for the specified covariate and considers a gene to have passed the filter if the p-value for the gene is less than the prespecified p.
Value
ttest returns a function with bindings for m and p that will perform a t-test.
Author(s)
R. Gentleman
See Also
kOverA, Anova, t.test
Examples
dat <- c(rep(1,5),rep(2,5))
set.seed(5)
y <- rnorm(10)
af <- ttest(dat, .01)
af(y)
af2 <- ttest(5, .01)
af2(y)
y[8] <- NA
af(y)
af2(y)
af(y)
Index
* arith
shorth, 34
* array
rowSds, 33
* classes
rowROC-class, 32
* datasets
tdata, 36
* manip
Anova, 2
coxfilter, 3
cv, 4
dist2, 5
eSetFilter, 6
filterfun, 8
findLargest, 10
gapFilter, 11
genefilter, 13
genefinder, 14
genescale, 16
kOverA, 19
maxA, 20
nsfilter, 21
pOverA, 24
rowSds, 33
ttest, 36
* math
rowFtests, 26
rowPUCs-methods, 29
* robust
half.range.mode, 17
* univar
half.range.mode, 17
[,rowROC,ANY,ANY,ANY-method
(rowROC-class), 32
Anova, 2, 4, 37
area(rowROC-class), 32
area,rowROC-method(rowROC-class), 32
AUC(rowROC-class), 32
AUC,rowROC-method(rowROC-class), 32
colFtests(rowFtests), 26
colFtests,ExpressionSet,character-method
(rowFtests), 26
colFtests,ExpressionSet,factor-method
(rowFtests), 26
colFtests,matrix,factor-method
(rowFtests), 26
colttests(rowFtests), 26
colttests,ExpressionSet,character-method
(rowFtests), 26
colttests,ExpressionSet,factor-method
(rowFtests), 26
colttests,ExpressionSet,missing-method
(rowFtests), 26
colttests,matrix,missing-method
(rowFtests), 26
colttests,matrix,missing-method
(rowFtests), 26
coxfilter, 3
coxph, 4
cv, 4, 25
dist, 5, 15
dist2, 5
eBayes, 22
eSetFilter, 6
ExpressionSet, 27
fastT(rowFtests), 26
featureFilter(nsfilter), 21
filter_volcano, 9
filtered_p, 7
filtered_R(filtered_p), 7
filterfun, 8, 13
findLargest, 10
gapFilter, 11
genefilter, 7, 9, 12, 13, 13
INDEX
- genefilter-deprecated, 14
- genefinder, 14, 17
- genefinder, ExpressionSet, vector-method (genefinder), 14
- genefinder, matrix, vector-method (genefinder), 14
- genescale, 15, 16
- getFilterNames (eSetFilter), 6
- getFuncDesc (eSetFilter), 6
- getRdAsText (eSetFilter), 6
- half.range.mode, 17, 35
- hist, 26
- is.finite, 34
- isESet (eSetFilter), 6
- kappa_p, 19
- kappa_t (kappa_p), 19
- kOverA, 3, 5, 13, 19, 37
- lines, 26
- lm, 3
- maxA, 20
- mt.teststat, 28
- nsFilter, 21
- nsFilter, ExpressionSet-method (nsFilter), 21
- p.adjust, 8
- parseArgs (eSetFilter), 6
- parseDesc (eSetFilter), 6
- pAUC, 31
- pAUC (rowROC-class), 32
- pAUC, rowROC, numeric-method (rowROC-class), 32
- plot, 26
- plot, rowROC, missing-method (rowROC-class), 32
- pOverA, 5, 20, 21, 24
- quantile, 8
- rainbow, 25
- rejection_plot, 8, 25
- rocdemo.sca, 31
- rowFtests, 26
- rowFtests, ExpressionSet, character-method (rowFtests), 26
- rowFtests, ExpressionSet, factor-method (rowFtests), 26
- rowFtests, matrix, factor-method (rowFtests), 26
- rowMeans, 33, 34
- rowpAUCs, 33
- rowpAUCs (rowpAUCs-methods), 29
- rowpAUCs, ExpressionSet, ANY-method (rowpAUCs-methods), 29
- rowpAUCs, ExpressionSet, character-method (rowpAUCs-methods), 29
- rowpAUCs, matrix, factor-method (rowpAUCs-methods), 29
- rowpAUCs, matrix, numeric-method (rowpAUCs-methods), 29
- rowpAUCs-methods, 29
- rowROC, 30, 31
- rowROC (rowROC-class), 32
- rowROC-class, 32
- rowSds, 33
- rowSums, 33, 34
- rowttests (rowFtests), 26
- rowttests, ExpressionSet, character-method (rowFtests), 26
- rowttests, ExpressionSet, factor-method (rowFtests), 26
- rowttests, ExpressionSet, missing-method (rowFtests), 26
- rowttests, matrix, factor-method (rowFtests), 26
- rowttests, matrix, missing-method (rowFtests), 26
- rowVars (rowSds), 33
- sapply, 11
- scale, 17
- sens (rowROC-class), 32
- sens, rowROC-method (rowROC-class), 32
- setESetArgs (eSetFilter), 6
- shorth, 18, 34
- show, rowROC-method (rowROC-class), 32
- showESet (eSetFilter), 6
- spec (rowROC-class), 32
- spec, rowROC-method (rowROC-class), 32
- t.test, 37
- tdata, 36
- ttest, 12, 36
- varFilter (nsFilter), 21
|
{"Source-Url": "https://www.bioconductor.org/packages/release/bioc/manuals/genefilter/man/genefilter.pdf", "len_cl100k_base": 14404, "olmocr-version": "0.1.46", "pdf-total-pages": 39, "total-fallback-pages": 0, "total-input-tokens": 86839, "total-output-tokens": 17500, "length": "2e13", "weborganizer": {"__label__adult": 0.00040531158447265625, "__label__art_design": 0.001148223876953125, "__label__crime_law": 0.0004456043243408203, "__label__education_jobs": 0.0022735595703125, "__label__entertainment": 0.0004301071166992187, "__label__fashion_beauty": 0.000240325927734375, "__label__finance_business": 0.0006666183471679688, "__label__food_dining": 0.0005197525024414062, "__label__games": 0.0014934539794921875, "__label__hardware": 0.001758575439453125, "__label__health": 0.0006918907165527344, "__label__history": 0.0005812644958496094, "__label__home_hobbies": 0.00029397010803222656, "__label__industrial": 0.0008101463317871094, "__label__literature": 0.0004646778106689453, "__label__politics": 0.0004551410675048828, "__label__religion": 0.0005412101745605469, "__label__science_tech": 0.349609375, "__label__social_life": 0.00024580955505371094, "__label__software": 0.0753173828125, "__label__software_dev": 0.560546875, "__label__sports_fitness": 0.00045371055603027344, "__label__transportation": 0.00033926963806152344, "__label__travel": 0.0002770423889160156}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 54399, 0.03469]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 54399, 0.61653]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 54399, 0.73576]], "google_gemma-3-12b-it_contains_pii": [[0, 1254, false], [1254, 1552, null], [1552, 2863, null], [2863, 4017, null], [4017, 5581, null], [5581, 6709, null], [6709, 8248, null], [8248, 10131, null], [10131, 11185, null], [11185, 12583, null], [12583, 14022, null], [14022, 15158, null], [15158, 16861, null], [16861, 17553, null], [17553, 19090, null], [19090, 20268, null], [20268, 21901, null], [21901, 23061, null], [23061, 24214, null], [24214, 24891, null], [24891, 26433, null], [26433, 29598, null], [29598, 32380, null], [32380, 32852, null], [32852, 34206, null], [34206, 35528, null], [35528, 38395, null], [38395, 39480, null], [39480, 39973, null], [39973, 41643, null], [41643, 43320, null], [43320, 45239, null], [45239, 46181, null], [46181, 47116, null], [47116, 49084, null], [49084, 50143, null], [50143, 50889, null], [50889, 52262, null], [52262, 54399, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1254, true], [1254, 1552, null], [1552, 2863, null], [2863, 4017, null], [4017, 5581, null], [5581, 6709, null], [6709, 8248, null], [8248, 10131, null], [10131, 11185, null], [11185, 12583, null], [12583, 14022, null], [14022, 15158, null], [15158, 16861, null], [16861, 17553, null], [17553, 19090, null], [19090, 20268, null], [20268, 21901, null], [21901, 23061, null], [23061, 24214, null], [24214, 24891, null], [24891, 26433, null], [26433, 29598, null], [29598, 32380, null], [32380, 32852, null], [32852, 34206, null], [34206, 35528, null], [35528, 38395, null], [38395, 39480, null], [39480, 39973, null], [39973, 41643, null], [41643, 43320, null], [43320, 45239, null], [45239, 46181, null], [46181, 47116, null], [47116, 49084, null], [49084, 50143, null], [50143, 50889, null], [50889, 52262, null], [52262, 54399, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 54399, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 54399, null]], "pdf_page_numbers": [[0, 1254, 1], [1254, 1552, 2], [1552, 2863, 3], [2863, 4017, 4], [4017, 5581, 5], [5581, 6709, 6], [6709, 8248, 7], [8248, 10131, 8], [10131, 11185, 9], [11185, 12583, 10], [12583, 14022, 11], [14022, 15158, 12], [15158, 16861, 13], [16861, 17553, 14], [17553, 19090, 15], [19090, 20268, 16], [20268, 21901, 17], [21901, 23061, 18], [23061, 24214, 19], [24214, 24891, 20], [24891, 26433, 21], [26433, 29598, 22], [29598, 32380, 23], [32380, 32852, 24], [32852, 34206, 25], [34206, 35528, 26], [35528, 38395, 27], [38395, 39480, 28], [39480, 39973, 29], [39973, 41643, 30], [41643, 43320, 31], [43320, 45239, 32], [45239, 46181, 33], [46181, 47116, 34], [47116, 49084, 35], [49084, 50143, 36], [50143, 50889, 37], [50889, 52262, 38], [52262, 54399, 39]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 54399, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-23
|
2024-11-23
|
a57c4c6fbb13e8394330d4fe41bd7573e29b6ae7
|
Architectural method to design and control dynamic composite web services
Manel Amel Djenouhat*
Cédric Laboratory,
CNAM,
Paris, France
and
LIRE Laboratory,
TLSI Department,
Abdelhamid Mehri University,
PB 67A, Nouvelle ville Ali Mendjeli, Constantine, Algeria
Email: manel.djenouhat@univ-constantine2.dz
*Corresponding author
Faiza Belala
LIRE Laboratory,
TLSI Department,
Abdelhamid Mehri University,
PB 67A, Nouvelle ville Ali Mendjeli, Constantine, Algeria
Email: faiza.belala@univ-constantine2.dz
Kamel Barkaoui
Cédric Laboratory,
CNAM,
Paris, France
Email: kamel.barkaoui@cnam.fr
Abstract: Nowadays, Web services constitute the core technology of IT infrastructure that has emerged in response to a fundamental shift in the way enterprises conduct their business. A componentised model emerges as the natural architecture for Web services-based applications. Using Mop-ECATNets formalism (Meta-Open Extended Concurrent Algebraic Term Nets) a sort of high-level Petri nets we show, in this paper, how we can ensure the formal specification of the dynamic Web services and control their interactions as well as their dynamic composition. Furthermore, in order to formally verify and execute Web services-based systems specifications, we implement Mop-ECATNet model in Maude system using an MDA (Model-Driven Architecture)-based approach.
Keywords: service-oriented architecture; dynamic web service composition; Meta-Open ECATNets; MDA; Maude.
Biographical notes: Manel Amel Djenouhat is a PhD student in the Computer Science Departments of the University of Constantine 2, Algeria, and of the Conservatoire National des Arts et Métiers of Paris, France. She is advised by Faiza Belala and Kamel Barkaoui. Her research interests include software engineering and formal methods, in particular, model-based software development and automated software engineering.
Faiza Belala is Professor in the Department of Software Technology and Information System at the University Abdelhamid Mehri, Constantine 2. She also currently serves as Chief of the GLSD team of the LIRE Laboratory in the same University. She received her PhD in Computer Science from Constantine University in 2002. Her research interests lie in the general field of theoretical computer science, with special focuses on formal methods (based on Petri nets, Rewriting logic, Bigraphs), applied to several domains as Web services computing and architecture description language. Her research has been published in various international journals and conferences. She frequently serves as a program committee member for various international conferences and workshops.
1 Introduction
Architectural design of large Web service-based applications has always played a significant role in determining the success of these systems. The current recognition of the importance of Service-Oriented Architecture (SOA) would appear to signal the emergence of a more disciplined basis for architectural design particularly if it combines formal models. This will significantly improve our ability to construct effective applications: on one hand, the SOA constitutes an architectural style defining an interaction model between the service providers, the service consumer and the service broker, and, on the other hand, formal models, as those related to Petri nets, use existing theoretical results and analysis tools to control and verify these interactions. Specifically, a principled use of their integration can have a positive impact on at least the following aspects of the development of services-oriented software: Specification, Understanding evolution, Reuse, Formal analysis, etc.
Our work aims to propose a high-level Petri nets-based approach, respecting the architecture style defining a SOA. It gives a set of patterns and guidelines for creating and controlling dynamic composite services that, because of the separation of concerns between description, implementation, and deployment, provide unprecedented flexibility in responsiveness to new reuses.
Effectively, several approaches exist in the literature to deal with specification and analysis of complex dynamic Web services. We may classify them in two main classes. The formal model-based approaches and the business process based ones. But, little work has been interested to achieve this goal respecting the SOA style. The first category is based on mathematical formalisms, the most common are: Petri nets and their different extensions, such as recursive ECATNets (Kheldoun et al., 2015), Coloured and Temporal Petri nets (Zhang et al., 2008; Suzuki and Lu, 1989), Process algebra, such as CCS and π-calculus (Milner, 1989; Lucchi and Mazzara, 2007), and Automata (Lee, 1960).
The second category includes the business processes modelling languages, such as BPMN (Business Process Modelling Notation) (Ouyang et al., 2008) and UML activities diagrams (Bock, 2003) which are considered less or not formal approaches.
Although formal models have proven an effective means to specify and deploy Web services, providing a precise mathematic semantics for the Web services composition, they give limits when the service properties deal not only with causality of events, but also with data types, recursive and timed constructs and failure recovery mechanisms (Friedrich et al., 2010). Besides, their implemented tools are too generic, they are used regardless of the applied domain; they do not support all the inherent aspects of dynamic and complex Web services management.
In this work, we propose a refined dynamic Web services composition model. In this respect, we address the dynamic services composition issue using a layered Petri net-based model, called Mop-ECATNet (Meta-Open Extended Concurrent Algebraic Term Nets) (Latreche and Belala, 2014). Unlike other approaches, this model may ensure, in a natural way, the formal specification of the recursive composition of dynamic Web services. We argue that their Meta-level controls the Web services interactions and also their dynamic composition. The contribution of this paper is twofold. On one hand, we propose an extension of the Mop-ECATNet model given in Latreche and Belala (2014), refining its Meta-level by defining new structures and patterns for its control layer to better manage the dynamic reconfiguration of Web services-based systems. On the other hand, we propose to implement Mop-ECATNet models in Maude system by defining a Meta-transformation, translating automatically a graph-based specification of Mop-ECATNet in Maude modules via ATL and ACCELEO tools. This gives arise to exploit the formal verification and analysis tools of Maude.
Section 2 presents some existing works that are related to our approach. Section 3 motivates our work using a much known example of Travel Agency. Section 4 recalls the basic concepts useful for understanding the paper notations. Section 5 describes how Mop-ECATNet model is used to control dynamic Web services composition, illustrating the defined patterns through our application example. The automatic execution principle of Mop-ECATNets is presented in Section 6. We conclude with a summary of contributions and an overview of future research directions.
2 Related work
While the best known formalisms alone provide a specification to compose and constrain flows of Web service invocation, there is little to support their models implementation. Process algebras and Petri nets-based formal approaches for Web services aim to fulfil the requirement of a coordinated and collaborative service invocation specification without any attention or particular support for atomic and compensable transactions.
In the context of process algebras, we cite the work of Câmara et al. (2006), which has proposed a formal method based on CCS for Web service choreography. Liu et al. (2007) have also used CCS algebra to model and specify Web services, in order to reason about the composition behavioural properties. In Lucchi and Mazzara (2007), the semantics of BPEL orchestration language is specified using π-calculus; however, parts of BPEL as data management are not addressed in this work.
By the same way and in order to consider the exchange of data during interactions and Web services dynamic composition, further work based on more advanced algebra have been suggested. Salaün et al. (2004) proposed a Bidirectional transformation from LOTOS to BPEL and vice versa. This translation includes exception handling, and enables verification of temporal properties. In Dumez et al. (2008), the authors also used the LOTOS to model the composition of Web services and use the CADP tool (Fernandez et al., 1996) for the formal verification. Although the process algebra is well suited for describing complex systems, their textual notation makes them less legible than Petri nets or automata.
Petri nets are very popular in the area of Business Process Management (BPM) due to the variety of control flow process that can model (van der Aalst, 2003). To demonstrate the ability of Petri nets to model Web service composition, a rigorous translation of BPEL in Petri nets was presented by Hinz et al. (2005). This translation covers all control structures and BPEL communication actions. The resulting models were used to verify the BPEL process through the WofBPEL tool (Ouyang et al., 2005).
Similarly, Hamadi and Benatallah (2003) have proposed a Petri net model which is more expressive for specifying the composition of Web services with no distinction between involved data types that may be complex in the Web service area (exchanged messages).
The work of Zhang et al. (2008) has addressed this problem by modelling Web services and their composition using coloured Petri nets (Jensen, 1989). The same model was reused later in Chemaa et al. (2012) in order to complete its associating semantics using Maude modules. This was achieved thanks to the graph transformation tool ATOM.
By the same thought, we propose in this paper an alternative definition of Mop-ECATNets model, allowing using their Meta-level, represented by an ECATNet, to control the Web services interactions and also their dynamic and transactional composition. In addition, to facilitate the implementation of this model in Maude, especially for user not familiar with rewrite logic concepts, we propose an automatic Mop-ECATNets/Maude translation using Atlas and ACCEOLEO tools.
We will note that some existing work attempt to give solution for managing transactional Web services composition (Bhiri et al., 2005; El Hadad et al., 2010; Orriëns et al., 2003) but these contributions still limited in terms of using adequate formalisms and tools.
3 Motivating example
Nowadays, Web services composition allows answering the increasingly complex users’ needs, by combining several Web services into a common business process. Composition in this case is classified into two approaches: orchestration and choreography. In orchestration, composition schemes are defined by specifying coordinator internal behaviour. Whereas in choreography approach, all peers participate in composition scheme definition. Besides, Web services composition may be accomplished manually (or semi-automatically) by giving semantic suggestions that allow to effectively selecting services, or automatically without human intervention. In this case, it permits to efficiently select and integrate heterogeneous services on the Web at runtime, reducing thus costs and development time.
Other problems may arise owing to the constant change of the environment. This will impose to consider the flexibility requirements. We mean by flexibility here, the ability to take into account the need for change, not only at the conceptual stage, considering the specification language expressiveness, but also and, above all, during the execution process.
Through the following example (see Figure 1), we will identify some problems that may occur during the execution phase of this composite Web service: Travel Agency. We will notice that there exist independent business services: ‘Hotel booking’, ‘Flight booking’, etc. If we aim to compose them into a ‘Travel Agency’ service for example, several scenarios may be considered, but not all lead to a valid or meaningful composition. Indeed, some customer requests may require simultaneous intervention of more than one service to be performed. The best is to compose them into a single entity while allowing them to collaborate with each other.
To simplify the complexity of Web services composition, we have to distinguish among the used services, during their execution process, the composite service, also called the dynamic service and the partner services that interact with it. In our case, the Travel Agency constitutes the dynamic service, while the partner services are: the Book Airline, the Book Room and the Rent Car ones. So, if a partner service fails to achieve this composition, it is therefore necessary to provide new ones in order to replace it. This situation may occur in our example for instance:
- When booking a hotel room, a connection problem can be observed (Service failure),
- When any partner service takes a long time to issue a response (Deadlock),
- When a partner service returns a response which does not correspond with client request (Negative Answer).
In this dynamic behaviour of reconfigurable Web services, we emphasise that the vitality of a service and its possibility to be replaced are transactional properties that should be expressed at the composite Web service level. These
Based on the atomicity degree of each service transactions, we may identify atomic or business ones. Recursively, a service is said atomic (i.e. with the semantics of ‘all or nothing’) when it becomes an indivisible component and its transactions (set of operations) cannot be subject to decomposition. In contrast, business service provides a flexibility allowing its compensation. Indeed, service compensation can resort a system to its original state upon the occurrence of an error or a situation where the business transactions could not successfully proceed any further. See Table 1 for a better illustration of the relevant notations.
<table>
<thead>
<tr>
<th>Transaction</th>
<th>Transaction kind</th>
<th>Partner service</th>
</tr>
</thead>
<tbody>
<tr>
<td>Booking hotel service</td>
<td>Atomic re-executable [three times]</td>
<td>Access or rejection</td>
</tr>
<tr>
<td>Request (set place)</td>
<td>Business compensable</td>
<td>Response (list of available rooms) or (unavailable service, deadlock, service failure)</td>
</tr>
<tr>
<td>Selecting a room from the list or aborting</td>
<td>Atomic</td>
<td>Booking confirmation</td>
</tr>
<tr>
<td>Booking airline service</td>
<td>Atomic re-executable [three times]</td>
<td>Access or rejection</td>
</tr>
<tr>
<td>Request (set itinerary)</td>
<td>Business compensable</td>
<td>Response (list of available flies) or (unavailable service, deadlock, service failure)</td>
</tr>
<tr>
<td>Selecting an airline from the list or aborting</td>
<td>Atomic</td>
<td>Booking confirmation</td>
</tr>
<tr>
<td>Rent car service</td>
<td>Atomic re-executable [three times]</td>
<td>Access or rejection</td>
</tr>
<tr>
<td>Request (set itinerary)</td>
<td>Business compensable</td>
<td>Response (list of available cars) or (unavailable service, deadlock, service failure)</td>
</tr>
<tr>
<td>Selecting a car from the list or aborting</td>
<td>Atomic</td>
<td>Reservation confirmation</td>
</tr>
</tbody>
</table>
In these situations, it is imperative to maintain the flexibility control of the Web service composition with the capacity to act dynamically in a reduced time. It is, therefore, necessary to provide new partner services in order to replace the failed ones and, also, very important to locate failure points. This is achieved through the control of service transactions.
The main objective of this work is to put into practice effective solutions based on sound existing formalisms able to manage all the previous situations in a dynamic and automatic way.
4 Background
4.1 Maude and rewriting logic
Marti-Oliet and Meseguer (2002) have introduced rewriting logic as a consequence of their work on the general logics to describe concurrent systems. Several languages based on rewriting logic were created, the most known are: CAFEOBJ (Diaconescu and Futatsugi, 2002), ELAN (Borovanský et al., 1998) and MAUDE (Clavel et al., 2002).
In rewriting logic, a dynamic system is represented by a rewriting theory $\mathcal{T} = (S, E, L, R)$ describing the complex structure of its states and the various possible transitions between them. In rewriting theory definition, $(S, E)$ represents an equational membership theory, $L$ is a set of labels and $R$ is a set of labelled conditional rewriting rules. These rewriting rules can be of the following form:
$$r : [t] \rightarrow [t'] \text{ if } C$$
where $r$ is a labelled rule, all the terms $[t], [t']$ and $C$ are $\Sigma$-terms and the conditions $C$ can be rewriting rules, membership equations in $T_{\Sigma}(\lambda)$ or any combination of both.
Given a rewriting theory, we say $\mathcal{T}$ implies a formula $t \rightarrow t'$ and we wrote $\mathcal{T} \vdash [t] \rightarrow [t']$ if and only if it is obtained by a finite application of the rewriting logic deduction rules, Reflexivity, Congruence, Replacement and Transitivity (Marti-Oliet and Meseguer, 2002).
Architectural method to design
The theoretical concepts of the rewriting logic are implemented through the Maude language (Clavel et al., 2002). Its objective is to extend the use of the declarative programming and the formal methods to specify and verify critical and concurrent systems. It regroups three types of modules mainly: the functional modules that define the static aspects of a system, they form a Maude sub-language (extension of OBJ3) based on the equational logic; the system modules specify the dynamic aspect of the system using the rewriting rules; as well as the objects-oriented modules to specify the objects-oriented systems.
A Maude program represents a rewriting theory, i.e. a signature and a set of rewriting rules. The computation in this language corresponds to the deduction in rewriting logic. Furthermore, it is implemented through a running environment, allowing prototyping and formal analysis of concurrent and complex systems. MAUDE offers other tools like a theorem prover and a model-checker (LTL).
4.2 Mop-ECATNet definition
Mop-ECATNets (Meta-Open Extended Concurrent Algebraic Term Nets) (Latreche and Belala, 2014) are a sound combination of Meta-Petri Nets and Open-ECATNets. They inherit flexibility of control from Meta-nets and data structure, concurrency and composability from Open-ECATNets (Latreche et al., 2011). Indeed, this model consists of two levels, the higher level, represented by an ECATNet (Bettaz et al., 1992), a class of high-level algebraic Petri nets, and the lower one represented by a set of interacting Open-ECATNets.
Open-ECATNets extend ECATNets by adding a set of interfaces places in order to model, in a compact manner, open systems able to interact with their environment. This formalism has been showed well suited to specify and execute Web services interaction models, but it was not able to manage their reconfiguration at run time and in process failure case.
The main contribution of the Mop-ECATNets formalism is the presence of Meta-level nets, able to control transitions of the Lower level nets, thanks to their Meta-places. Mop-ECATNets are also equipped with timing constraints: on one hand, all lower level transitions are forced to fire when they are enabled; firing in this case is an atomic action. On the other hand, tokens of some places are constrained by timestamp. This allows specifying, for some places, the processing time of requests, and for other ones, the requests adaptation timers. Besides, the Mop-ECATNets originality lies in their semantics behaviour expressed naturally in terms of rewriting logic. Formally, the Mop-ECATNet definition given by Latreche and Belala (2014) is as follows:
Definition 1: (Latreche and Belala, 2014) A Mop-ECATNet \( ME = \langle E, O, Q, \lambda \rangle \) is a Meta-transitional net having as higher level net the marked ECATNet \( E \) and as lower level the Open-ECATNet \( O \). \( Q \) is the incidence function mapping Meta-places of \( E \) to transitions of \( O \) and \( \lambda \). \( ST \rightarrow N \) is a function that maps each token of some state place \( \mu \) (belonging to \( O \)) to its recon definition 1:
- A module representing the conveyed tokens through system states (places),
- Another one representing the active transitions across which tokens are firing, and
- A module representing the different system configurations (initial, intermediate and final).
The Mop-ECATNet Meta-level that manages the reconfiguration of the system in case of failure is specified by rewrite rules. Running the example in Maude gives the following scenario:
1. The transition \( t \) is activated upon the application of a rewrite rule, the initial marking of the dynamic place \( P1 \) is decreased by 1 and a timed token (3 time/units) is added in place \( P2 \) (time denotes the duration of the processing).
2. A conditional rewrite rule is also associated with the transition \( t \). It is applied when the processing time is shorter than token lifetime.
3. The reconfiguration process is insured by the Meta-level. So, the transition \( t \) is controlled by the Mp Meta-place, in case of failure, the Meta-level intervenes and activates a rewrite rule allowing the firing of the \( mt \) transition and the Meta-place \( Mp' \) to consume the token from \( Mp \). The place \( Mp' \) controls the transition \( t' \) and allows performing a new solution to address this failure.
Thus, Maude provides a platform for an easy development and effective implementation of Mop-ECATNets. In the following, we will show how we integrate automatically this model in Maude, using an MDA (Model-Driven Architecture)-based approach, in order to formally verify and execute Web service-based systems specifications.
## 5 Complex and dynamic web services as Mop-ECATNet
### 5.1 The principle of our approach
The field of Web services is dynamic and evolving; new services can be added, existing services are constantly changed, temporarily suspended, or finally deleted. Volatility of Web services is reflected in the choice of participant Services (or alternative Web services) and this, at the composition execution time (dynamically), which can increase the chances of the composition validation.
The volatility of Web services, the variability and the dynamic change of Web services transactional properties make difficult, if not impossible, the static prediction of all scenarios that may arise during the composition process, at the design phase. We present in this work an alternative definition of the Mop-ECATNet model that will manage, most prominently, the control and flexibility of the dynamic composition of Web services in terms of their transactions (or operations) properties.
We define a composite Web service as a set of some Open-ECATNets. Each net models a given service that may fail to achieve the composition process. In this case, it can be replaced by an equivalent Web service providing the same functionalities. A Web service or its Open-ECATNet is said: compensated, if it provides a set of operations to acquire (if possible) definitely a resource, it also offers an offsetting transaction. So, a compensation process aims to cancel the effects of a Web service operation which could not be completed successfully. In the same context of composition failure, an Open-ECATNet of any participant service is Re-executable, if it is characterised by a finite number of allowed attempts and a waiting time between service execution attempts. Besides, operations or transitions of each Open-ECATNet may be vital or no, replaceable or no.
In order to provide flexibility to a composite Web service, our approach aims to act at two stages:
**The design stage:**
1. Give a clear separation between the kind of the intervening services and their relationships, providing new compact structures to reduce the graph complexity of the composition.
2. Refine the Mop-ECATNet model by introducing new generic notations, which provide a more abstract view identifying all dynamic composition key elements in upstream.
The behavioural stage:
1. Classify participant services according to their atomicity and vitality degree.
2. Improve the Meta-level control strategies (for instance, remove all Meta-edges linked unnecessarily to dynamic transitions, etc.).
3. Propose new reusable patterns for the control level.
4. Introduce the parallelism concept in the Meta-level to allow a flexible reconfiguration.
5.2 The flexibility control based Mop-ECATNet
Although the Mop-ECATNet model is graphic, the growth of service participants in a composition makes the specification more complex. We propose, in this section, to refine the control layer in this model in order to manage conveniently the reconfiguration and the failure in Web services dynamic composition. This new executable and refined composition model is staffed with control structures and patterns.
Table 2 summarises the mapping rules specifying dynamic and complex Web services with Mop-ECATNet.
Figure 3 Traditional Mop-ECATNet model for the Travel Agency example (see online version for colours)
We illustrate through our running example of the Travel Agency composite Web service the proposed design structures that allow having a compact and readable graphical specification of the Mop-ECATNet model. A list of services is first identified:
1. **D**: the dynamic Web service that requests a fly ticket, a hotel room and a car.
2. **P1**: the partner service to book a fly ticket.
3. **P2**: a second partner service to book a hotel room.
4. **P3**: the third partner service to rent a car.
5. **S**: the composite Web service, specifying the interaction of **D** with **P1**, **P2** and **P3**.
Then, in a second step, the service controller provides the lists of alternative services of the participant ones in order to invoke them in case of dynamic system reconfiguration, as for instance:
- **P1’**: an alternative service of **P1**, it books a fly ticket in case of the **P1** service fails.
Finally, the dependence relationships between services are highlighted.
Using the traditional definition of the Mop-ECATNet model, we give an ad-hoc specification of this example in Figure 3. It reflects the complexity of the composition graph representing, in this case, only three composed services.
Indeed, it is hard to distinguish between the Mop-ECATNet levels and we are forced to highlight each Op-ECATNet representing a participant service.
Obviously, we note that the complexity of the graph and the dependence of its various elements greatly limit their reuse. Hence, a clear separation is naturally made between the different levels of the composite Web service structure, to provide a more compact and comprehensible model, especially in dynamic composition involving a large number of Web services. The same example used through its graphical model in Figure 4 illustrates this contribution.
We observe in this figure that the structures offer an abstract view and better reflect the system behaviour.
The distinction between the two levels (Meta and low levels) is obvious and the relations between services and their mutual dependencies are systematically highlighted.
Formally, an alternative definition of Mop-ECATNet model is given in the following.
**Definition 2**: A Mop-ECATNet MEWS modelling a Web service WS is a tuple \(<C, S, A>\) where:
- **C** is the marked controller service,
- **S** is the composite service, such that: \( S = (D, R, P, P', CP, Pi, Ti) \), **D** is the dynamic Web service having a set of requests \( R \); **P** and **P’** are respectively finite sets of Partner services and alternative Partner services; **CP** is a set of service control points; **Pi** and **Ti** are respectively finite sets of interface places and transitions (with \( Pi \cap Ti = \emptyset \)).
- **A** is the set of arcs joining the two Mop-ECATNet levels at the service control points **CP**.
Till now, we have shown how to use the Mop-ECATNets as a formal specification support for defining the structural aspect of the complex services composition. In the following, we will be closely interested by how to enrich the control level of the Mop-ECATNet model in order to ensure that the behaviour aspect of the configuration and reconfiguration is conform to the behaviour expected by dynamic services in their composition. In this formalisation part, the control model should present all possible branches and scenarios, explored at run time to reach a reliable system reconfiguration. Table 3 summarises some additional features, proposed to refine the control layer of a Mop-ECATNet.
**Table 3** Mop-ECATNet control layer extension
<table>
<thead>
<tr>
<th>Behaviour of dynamic controlled service</th>
<th>Mop-ECATNet-based formalisation</th>
<th>Notation</th>
</tr>
</thead>
<tbody>
<tr>
<td>Atomic transaction</td>
<td>Unbound state (low-level transition)</td>
<td>start (\rightarrow) end</td>
</tr>
<tr>
<td>Business transaction</td>
<td>Controlled state (Meta-edge linked to the low-level transition)</td>
<td>Interruption</td>
</tr>
<tr>
<td>System reconfiguration</td>
<td>Rewrite rules</td>
<td><img src="image" alt="Rewrite rules" /></td>
</tr>
</tbody>
</table>
According to the Mop-ECATNet control layer extension, we propose a set of patterns to formalise the dynamic
A combination of the above rewriting rules.
Definition 3: Given $ME_{WS}$, a Mop-ECATNet modelling a composite service, strategies set defining its behaviour is given by $S_{ME_{WS}}$, recursively formed as follows:
- A strategy $S \in S_{ME_{WS}}$ is a simple rewriting rule representing atomic transaction, or
- A finite sequence of rewriting rules defining a compensable business transaction, or
- A set of rewriting rules expressing executable service operations, or
- A set of coordinated conditional rewrite rules representing the direct or/and the indirect dependency synchronisation patterns, or
- A combination of the above rewriting rules.
First, we have to identify some transactional properties (atomic and business transactions) of the given Web services. Then, some Petri nets structures have to be adjusted in order to respect these properties, for instance: (1) we remove Meta-edges linking dynamic transitions to Metaplaces, knowing that atomic transaction cannot be compensated, and (2) we introduce parallel transitions in the control layer to avoid the unnecessary reconfiguration of all partner services. Also, it is more judicious to conceive a set of reusable patterns to be directly applied in some known situations avoiding thus redundancy in complex services composition.
Indeed, services are running in a parallel way and may depend on each other. The dependencies between services require the dynamic synchronisation of the concerned services at specific points during the execution of their respective processes.
There are two types of dependencies, direct dependencies and indirect ones.
The IDP and DDP patterns are reusable and dedicated to deal with two specific synchronisation cases.
Some dependencies between services do not interrupt the execution processes of the concerned services.
This dependency refers to the indirect dependency; the IDP pattern is implemented to manage dynamic synchronisation at specific points outside the execution context of each process (non-blocking synchronisation).
The pattern uses the concept of appointments (rendezvous) to synchronise processes.
For instance, to ensure the airport transfer (from the airport to the hotel) in the Travel Agency example, both the hotel and airport addresses are required to invoke the rental car service. The Meta-level must thus handle the synchronisation of the two services at the point $t13$ (Figure 3) and ensure that their respective processes are correctly achieved.
On the other hand, to avoid deadlocks that can occur in case of direct dependency between services, we use the concept of semaphore (DDP pattern).
Direct dependency between services involves the interruption of one or more processes until synchronisation; the dependent process is stopped by the controller until the other processes reach the breakpoint. The controller reports this breakpoint with a semaphore to inform all the concerned entities.
Booking the hotel room service can have a direct dependency on the fly-ticket service; in fact, the fly ticket can be booked without constraint while the arrival date is necessary to book the hotel room. It is the Mop-ECATNet control layer’s task to abort the hotel service process until the arrival date is known.
### 6 Implementing Mop-ECATNet in Maude
This section aims to show how to provide an automatic generation of a Maude specification (the target model) from a Mop-ECATNet model (the source model), using a MDA transformation (Brown, 2004). This will be achieved by conceiving a tool support that interprets a given set of transformation rules.
Figure 5 presents a global view of our transformation approach; it describes the approach with its different abstraction levels: Metamodel level, model level, transformation level, and source code level. The transformation process produces Maude modules from the specifications contained in the Mop-ECATNet model. The transformation rules are defined upon the two designed Metamodels; the source and the target models which conform to their
Metamodels, and constitute respectively the input and the output of the transformation process. Our proposed transformation is exogenous and horizontal since both source and target models have the same abstraction level according to the classification presented by Mens and Van Gorp (2006). We may notice that we have implemented our transformation process thanks to ATL (Jouault et al., 2008) and ACCELEO (Musset et al., 2006) tools. The ATLAS Transformation Language (ATL) is a hybrid model transformation language developed as a part of the ATLAS Model Management Architecture. ATL is supported by a set of development tools built on top of the Eclipse environment: a compiler, a virtual machine, an editor, and a debugger.
**Figure 5** Mop-ECATNet2Maude transformation approach (see online version for colours)
An ATL transformation program is composed of rules that define how source model elements are matched and navigated to create and initialise the elements of the target models. By the other hand, ACCELEO is an open-source code generator from the Eclipse foundation that allows to use a model-driven approach to perform model-to-text transformation. Indeed, ATL transformation generates only models and ACCELEO continues the automatic transformation by associating the text code of the obtained (target) model.
### 6.1 Metamodels
#### 6.1.1 Source metamodel
Figure 6 shows the abstract metamodel defined for representing any Mop-ECATNet. We identify in our proposed metamodel the following classes:
- **Meta-level classes**: Meta-Place, Meta-Transition, Meta-Edge, Meta-Control-Edge, and Meta-Token.
- **Lower-level classes**: DPlace, DTransition, DEdge, Interface Place, and Token.
**Figure 6** The Mop-ECATNet metamodel (see online version for colours)
#### 6.1.2 Target metamodel
We were inspired from the Maude Metamodel published in Rivera et al. (2008), all Maude concepts are represented in its Metamodel (Figure 7). For our transformation approach, we use the following Maude classes: FModule, SModule, Sort, Statement, Equation, Condition, Operation, Rule and Term.
Architectural method to design
Figure 7 Maude metamodel (see online version for colours)
We note that the proposed source and target Metamodels are used as a syntactic reference for the introduced and the resulting models.
6.2 Transformation rules
In this section, we establish the set of correspondences between the two Metamodels respecting the syntax dictated by the ATL tool.
Table 5 enumerates some rules describing the correspondence between Mop-ECATNets and Maude Metamodel elements.
Table 5 Some transformation rules
<table>
<thead>
<tr>
<th>Rules</th>
<th>Mop-ECATNet metamodel</th>
<th>Maude-metamodel</th>
</tr>
</thead>
<tbody>
<tr>
<td>Place2Module</td>
<td>Place</td>
<td>Module Place</td>
</tr>
<tr>
<td></td>
<td>Name</td>
<td>Supersort :Place</td>
</tr>
<tr>
<td></td>
<td>Sort</td>
<td>Arity:String</td>
</tr>
<tr>
<td></td>
<td>Capacity</td>
<td>Coarity:Place</td>
</tr>
<tr>
<td>Transition2Module</td>
<td>Transition</td>
<td>Module Transition</td>
</tr>
<tr>
<td></td>
<td>Name</td>
<td>Supersort :Transition</td>
</tr>
<tr>
<td></td>
<td>TC</td>
<td>Arity:Transition</td>
</tr>
<tr>
<td></td>
<td></td>
<td>Coarity:Boolean</td>
</tr>
<tr>
<td></td>
<td></td>
<td>Opn:_</td>
</tr>
<tr>
<td>Edge2Module</td>
<td>Edge</td>
<td>Module Edge</td>
</tr>
<tr>
<td></td>
<td></td>
<td>Supersort:Edge</td>
</tr>
<tr>
<td>MetaTransition2Subsort</td>
<td>MetaTransition</td>
<td>Subsort of Transition</td>
</tr>
<tr>
<td>Meta Place 2Subsort</td>
<td>MetaPlace</td>
<td>Subsort of Place</td>
</tr>
</tbody>
</table>
For example, the rule Place2Module bellow enables the transformation of a Mop-ECATNet place to a functional Maude module.
Rule Place2Module {
From Source:Mop ECATNetMM!Place
To Target: MaudeMM!ModulePlace (Name ← Source.Name)
}
It is composed of two mandatory (the from and the to parts) and is identified by a unique name (Place2module).
This rule specify the source model elements that must be matched (Place), the number and the type of the generated target model elements (Module Place), and the way these target model elements must be initialised from the matched source elements (Name). Thus, we define for each syntactic element of the Mop-ECATNet model its Maude-based semantics (see Table 5).
6.3 Mop-ECATNet2Maude tool application example
In order to illustrate our proposal, we reconsider the Travel Agency service (Figure 4).
To reach to the service Maude specification via the transformation approach, we create the Airline Web service source model from the Mop-ECATNet Metamodel (Mop.ecore). After conceiving the Metamodels plugins: Mop.ecore and Maude.ecore and giving all the corresponding transformation rules, we generate via the ACCELEO tool the output model as a Maude file.
In order to achieve a Maude textual specification from the graphical representation of the Travel Agency composition model, we proceed step by step, as follows:
- We give first, a sketch of the Travel Agency model based on the predefined structures in Table 1 and we get the abstract view of the model (Figure 4).
- Once the structure is valid, we use Tables 2 and 3 to define services deployment strategies (where we identify transactions, control points, patterns that may be used, etc.).
- We perform the first (model-to-model) transformation using the ATL tool.
For that, we need to provide:
- The Mop-ECATNet source Metamodel (Figure 6).
- The Maude target Metamodel (Figure 7).
- The Travel Agency source Model (Figure 3) in XMI which will be conform to the Mop-ECATNet Metamodel.
- The transformation model Mop-ECATNet2Maude.
The resulting XMI model is preserved for the next step of the transformation process.
- To obtain a Maude textual specification (in rewrite logic), we have to give a Maude template and a new transformation rules set according to the ACCELEO tool features.
Finally, the resulted Maude textual specification (see Figure 8) may be directly executed and checked via the existing tools around Maude system.
Figure 8 The Maude part code of the Travel Agency model
7 Conclusion
In this paper, we considered a kind of high-level Petri net for designing Web services composition models. Taking advantage of the powerful theoretical foundations of the Mop-ECATNet formalism, the proposed approach contributes to the development of modular, flexible and reconfigurable service-oriented system.
We enhanced and refined the model on both the structural and behavioural levels. At the structural level, we have provided new structures (more compact) to reduce the complexity of the graph composition by introducing a new grammar offering a more abstract view identifying, at the beginning, the key components of the composition.
At the behavioural level, we have extended the model control layer by defining new control strategies and implementing reusable patterns that lead to a recursive composition.
We have associated a Maude-based semantics to this proposed model by using a Meta-transformation technique, this allows to run and formal check the model, automatically on the Maude environment.
ATL and ACCELEO tools enabled this transformation (model-to-model and model-to-text transformations) and led us to obtain automatically a Maude textual specification from the initial composition graph.
This transformation has tended to make easier the specification for users unfamiliar with the Maude language since they automatically can get the source code from a graphical representation. It is also a time advantage for experts by allowing them to reach directly other phases such as checking system properties using Maude tools (e.g. model-checker).
In future, we aim to address other aspects (such as time constraints) and demonstrate the wealth of the Maude environment through its analysis and checking tools.
References
|
{"Source-Url": "http://www.inderscience.com/papers/../storage/f145726111293810.pdf", "len_cl100k_base": 9129, "olmocr-version": "0.1.50", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 38690, "total-output-tokens": 11841, "length": "2e13", "weborganizer": {"__label__adult": 0.00043082237243652344, "__label__art_design": 0.0013723373413085938, "__label__crime_law": 0.0004222393035888672, "__label__education_jobs": 0.0011148452758789062, "__label__entertainment": 0.00012046098709106444, "__label__fashion_beauty": 0.00022935867309570312, "__label__finance_business": 0.0003330707550048828, "__label__food_dining": 0.0004639625549316406, "__label__games": 0.0004775524139404297, "__label__hardware": 0.0007348060607910156, "__label__health": 0.0006823539733886719, "__label__history": 0.00038814544677734375, "__label__home_hobbies": 0.00010144710540771484, "__label__industrial": 0.0005030632019042969, "__label__literature": 0.0005636215209960938, "__label__politics": 0.0004203319549560547, "__label__religion": 0.0006747245788574219, "__label__science_tech": 0.05133056640625, "__label__social_life": 0.00013136863708496094, "__label__software": 0.006591796875, "__label__software_dev": 0.931640625, "__label__sports_fitness": 0.00029349327087402344, "__label__transportation": 0.0007672309875488281, "__label__travel": 0.0002841949462890625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 48344, 0.01808]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 48344, 0.21735]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 48344, 0.87297]], "google_gemma-3-12b-it_contains_pii": [[0, 2892, false], [2892, 7921, null], [7921, 13841, null], [13841, 17429, null], [17429, 19374, null], [19374, 24526, null], [24526, 25574, null], [25574, 29757, null], [29757, 33797, null], [33797, 35893, null], [35893, 38549, null], [38549, 42765, null], [42765, 48344, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2892, true], [2892, 7921, null], [7921, 13841, null], [13841, 17429, null], [17429, 19374, null], [19374, 24526, null], [24526, 25574, null], [25574, 29757, null], [29757, 33797, null], [33797, 35893, null], [35893, 38549, null], [38549, 42765, null], [42765, 48344, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 48344, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 48344, null]], "pdf_page_numbers": [[0, 2892, 1], [2892, 7921, 2], [7921, 13841, 3], [13841, 17429, 4], [17429, 19374, 5], [19374, 24526, 6], [24526, 25574, 7], [25574, 29757, 8], [29757, 33797, 9], [33797, 35893, 10], [35893, 38549, 11], [38549, 42765, 12], [42765, 48344, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 48344, 0.12062]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
6316a3fe6cb206ba583dbf89353aea74856a0940
|
[REMOVED]
|
{"Source-Url": "http://www.dtic.mil/dtic/tr/fulltext/u2/a436479.pdf", "len_cl100k_base": 12608, "olmocr-version": "0.1.50", "pdf-total-pages": 19, "total-fallback-pages": 0, "total-input-tokens": 61779, "total-output-tokens": 15359, "length": "2e13", "weborganizer": {"__label__adult": 0.0003826618194580078, "__label__art_design": 0.0002608299255371094, "__label__crime_law": 0.0003561973571777344, "__label__education_jobs": 0.0003349781036376953, "__label__entertainment": 4.684925079345703e-05, "__label__fashion_beauty": 0.00015783309936523438, "__label__finance_business": 0.00019872188568115232, "__label__food_dining": 0.0004048347473144531, "__label__games": 0.00054931640625, "__label__hardware": 0.001415252685546875, "__label__health": 0.0004982948303222656, "__label__history": 0.00020062923431396484, "__label__home_hobbies": 0.00011354684829711914, "__label__industrial": 0.0004835128784179687, "__label__literature": 0.00021696090698242188, "__label__politics": 0.00026869773864746094, "__label__religion": 0.000522613525390625, "__label__science_tech": 0.0120086669921875, "__label__social_life": 6.377696990966797e-05, "__label__software": 0.0034637451171875, "__label__software_dev": 0.9765625, "__label__sports_fitness": 0.0003414154052734375, "__label__transportation": 0.0007615089416503906, "__label__travel": 0.0002084970474243164}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 55216, 0.01776]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 55216, 0.50351]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 55216, 0.87036]], "google_gemma-3-12b-it_contains_pii": [[0, 2533, false], [2533, 3364, null], [3364, 7190, null], [7190, 9450, null], [9450, 11816, null], [11816, 15152, null], [15152, 18392, null], [18392, 19570, null], [19570, 22817, null], [22817, 24810, null], [24810, 28402, null], [28402, 31647, null], [31647, 34755, null], [34755, 38630, null], [38630, 42107, null], [42107, 45906, null], [45906, 49270, null], [49270, 52218, null], [52218, 55216, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2533, true], [2533, 3364, null], [3364, 7190, null], [7190, 9450, null], [9450, 11816, null], [11816, 15152, null], [15152, 18392, null], [18392, 19570, null], [19570, 22817, null], [22817, 24810, null], [24810, 28402, null], [28402, 31647, null], [31647, 34755, null], [34755, 38630, null], [38630, 42107, null], [42107, 45906, null], [45906, 49270, null], [49270, 52218, null], [52218, 55216, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 55216, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 55216, null]], "pdf_page_numbers": [[0, 2533, 1], [2533, 3364, 2], [3364, 7190, 3], [7190, 9450, 4], [9450, 11816, 5], [11816, 15152, 6], [15152, 18392, 7], [18392, 19570, 8], [19570, 22817, 9], [22817, 24810, 10], [24810, 28402, 11], [28402, 31647, 12], [31647, 34755, 13], [34755, 38630, 14], [38630, 42107, 15], [42107, 45906, 16], [45906, 49270, 17], [49270, 52218, 18], [52218, 55216, 19]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 55216, 0.04659]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.