id
stringlengths 40
40
| text
stringlengths 9
86.7k
| metadata
stringlengths 3k
16.2k
| source
stringclasses 1
value | added
stringdate 2024-11-21 00:00:00
2024-12-12 00:00:00
| created
stringdate 2024-11-21 00:00:00
2024-12-12 00:00:00
|
|---|---|---|---|---|---|
1c66848fd722c6581eb66ca82c164f12aa4708de
|
[REMOVED]
|
{"Source-Url": "https://rucforsk.ruc.dk/ws/files/116978/LNCS_35730053.pdf", "len_cl100k_base": 8144, "olmocr-version": "0.1.53", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 42668, "total-output-tokens": 11078, "length": "2e12", "weborganizer": {"__label__adult": 0.0003044605255126953, "__label__art_design": 0.0002593994140625, "__label__crime_law": 0.0004069805145263672, "__label__education_jobs": 0.00046539306640625, "__label__entertainment": 6.35981559753418e-05, "__label__fashion_beauty": 0.00013875961303710938, "__label__finance_business": 0.00015997886657714844, "__label__food_dining": 0.0003249645233154297, "__label__games": 0.0006074905395507812, "__label__hardware": 0.000629425048828125, "__label__health": 0.0004458427429199219, "__label__history": 0.0001704692840576172, "__label__home_hobbies": 8.350610733032227e-05, "__label__industrial": 0.0003838539123535156, "__label__literature": 0.0002512931823730469, "__label__politics": 0.0002932548522949219, "__label__religion": 0.00044155120849609375, "__label__science_tech": 0.016632080078125, "__label__social_life": 7.18832015991211e-05, "__label__software": 0.005039215087890625, "__label__software_dev": 0.9716796875, "__label__sports_fitness": 0.0003113746643066406, "__label__transportation": 0.0004482269287109375, "__label__travel": 0.00016057491302490234}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 40050, 0.02514]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 40050, 0.43846]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 40050, 0.8526]], "google_gemma-3-12b-it_contains_pii": [[0, 1351, false], [1351, 2990, null], [2990, 4750, null], [4750, 7239, null], [7239, 8896, null], [8896, 12156, null], [12156, 15351, null], [15351, 18719, null], [18719, 21384, null], [21384, 24111, null], [24111, 26284, null], [26284, 28186, null], [28186, 29219, null], [29219, 31989, null], [31989, 34878, null], [34878, 38419, null], [38419, 40050, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1351, true], [1351, 2990, null], [2990, 4750, null], [4750, 7239, null], [7239, 8896, null], [8896, 12156, null], [12156, 15351, null], [15351, 18719, null], [18719, 21384, null], [21384, 24111, null], [24111, 26284, null], [26284, 28186, null], [28186, 29219, null], [29219, 31989, null], [31989, 34878, null], [34878, 38419, null], [38419, 40050, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 40050, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 40050, null]], "pdf_page_numbers": [[0, 1351, 1], [1351, 2990, 2], [2990, 4750, 3], [4750, 7239, 4], [7239, 8896, 5], [8896, 12156, 6], [12156, 15351, 7], [15351, 18719, 8], [18719, 21384, 9], [21384, 24111, 10], [24111, 26284, 11], [26284, 28186, 12], [28186, 29219, 13], [29219, 31989, 14], [31989, 34878, 15], [34878, 38419, 16], [38419, 40050, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 40050, 0.03249]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
ae33fea6714bd8202fd6cb2a29bcc7626b7502c7
|
Q76: I have not been able to determine what I am supposed to do with the LOE spreadsheet, please advise.
A76: There are two tabs. The first tab provides you with an example for how you will fill out the second tab. The second tab is labeled Template.
Q75: Is there a straightforward way to determine whether our submission would be a procurement contract or cooperative agreement? My university’s Contracts and Grants people were unsure and advised that I contact you. The proposal we are preparing is done in collaboration among researchers from four universities.
A75: The answer depends upon the proposed research. Generally speaking, the Government award contracts to procure the research outcomes for its own direct benefit. If on the other hand the Government wants to support the research in order for it to benefit the general public, the award of an assistance instrument may be more appropriate. Please refer to Section II: Award Information. Proposals selected for award negotiation may result in a procurement contract, cooperative agreement, or Other Transaction (OT) depending upon the nature of the work proposed, the required degree of interaction between parties, and other factors. Grants will NOT be awarded under this program. In all cases, the Government contracting officer shall have sole discretion to select award instrument type, regardless of instrument type proposed, and to negotiate all instrument terms and conditions with selectees.
Q74: In regards to the cost proposal template, one of our university partners does not report and propose in terms of labor hours but rather in person months. Would it be acceptable if our university partner deviates from the template or is there an alternative format that can be used to accommodate proposing in labor months?
A74: The Prime or the University should try and convert the labor cost into hours and hourly rates for inclusion in the spreadsheet. They should also provide their original proposal developed using their typical methodology, along with the DARPA CMO Spreadsheet. However, the template is not mandatory, but a preferred cost submission tool to be submitted along with the Cost Volume.
Q73: I have a question on the bids: can a company be a sub on multiple bids / teams for TA1 to 3 and then be awarded twice?
A73: DARPA does not place any restrictions on team membership other than those set forth in the BAA. However, the answer depends on the proposed effort and if it makes sense to have it occur under multiple awards. Duplication of effort in selected TA1, TA2, or TA3 proposals could be addressed through a partial selection to remove the duplicitous effort or in contract negotiations. As stated in the BAA, no one in TA4 can participate in any other TA.
Q72: If we pursue a “cost +” proposal what additional review and/or certification is required in terms of accounting compliance? If we pursue a “fixed price” proposal what additional review and/or certification is required in terms of accounting compliance?
A72: The Government can award a cost reimbursement contract to a performer that has a DCAA approved cost accounting system. If you are requesting a cost reimbursement contract and have an existing DCAA audit stating your cost accounting system is approved, submit that as part of the Cost Volume. If you do not, submit (as addressed in the BAA) a completed Standard Form (SF) 1408 along with your proposal. Under a fixed price contract, the Government pays for the acceptance of a tangible deliverable or deliverables. If you propose a fixed price effort, identify in your Cost Volume the tangible deliverable(s), the delivery timeframe(s) and the dollar value price(s) for the deliverable(s). Regardless of the proposed award mechanism, the Contracting Officer shall have sole discretion to select the award instrument type, regardless of instrument type proposed, and to negotiate all instrument terms and conditions with selectees.
As of August 13, 2020
Q71: What kind of cross-layer optimizations are referred to by the BAA? Again, if you can give more concrete examples it will be helpful.
A71: Knowledge of higher-level domain abstractions and explicit component dependencies will help to overcome performance reduction due to added layers of abstraction, while maintaining the system’s security guarantees. For example, in a DSL-based enhancement scenario, DSL code that composes with the rest of the system by being notionally interpreted by a virtual machine layer, may be “smashed” to create compiled native cross-layer binary executable artifacts that integrate with the system directly, without the virtual machine abstraction. Higher-level domain abstractions and inferred specifications should be leveraged to both optimize these resulting executable artifacts, and prove their safe and secure composability with the system.
Q70: What is the point of distribution? Is it because of security guarantees (e.g., isolation) or just for performance reasons? Do we want to replace the hardware of part or all our machines in a distributed system? If you can give us concrete examples of the distribution you are looking for it will be great.
A70: V-SPELLS aims to address both security and performance enhancements, and calls for general and unified approaches to achieve this aim. V-SPELLS BAA discusses examples of use cases where distribution serves both security and performance purposes. There is a known need for legacy software understanding and hardware platform modernization. The BAA is not prescriptive in regard to any tools or approaches to be used, but emphasizes relevance and effectiveness for large legacy code bases. The BAA further emphasizes achieving the strongest assurance guarantees possible for legacy code bases.
Q69: What is the difference between the safe composability provided by TA2 and that provided by TA3? In particular, what additional proofs are needed after TA2 solution had finished running and produced output (and proof).
A69: TA3 will leverage recovered higher-level domain abstractions and inferred specifications to optimize the resulting executable artifacts, prove their safe composability with the system after optimizations, and improve their security. For example, TA3 challenges include reconstructing the code to distribute it among the target platforms’ available processors, computing enclaves, nodes, or devices; safe composability will need to be assured as the distribution of legacy code is shifted to modern hardware or as its functions are partially offloaded to specialized hardware accelerators.
Q68: Do you have specific tools in mind that you would like the awardees to work with, or if there is latitude in the tools being chosen?
A68: The BAA is not prescriptive in regard to any tools or approaches to be used, but emphasizes relevance and effectiveness for large legacy code bases. The BAA further emphasizes achieving the strongest assurance guarantees possible for legacy code bases.
Q67: The V-SPELLS program schedule provided in Figure 2 on page 18 (shown below) of the BAA doesn’t seem to match the travel detailed in the text. To ensure that we’re budgeting for the correct travel requirements, can you please confirm the following is the correct required travel for TA1-3 performers: Phase 1: - Kickoff meeting, - (3) PI Meetings, - End-of-phase Evaluation/Challenge Exercise Phase 2: - (3) PI Meetings - End-of-phase Evaluation/Challenge Exercise Phase 3: - (2) PI Meetings - End-of-phase Evaluation/Challenge Exercise
A67: For each year of the effort, there will be quarterly meetings with the Program Manager (PM), consisting of two site visits and two Principal Investigator (PI) meetings. Quarterly meetings will include demonstrations of the V-SPELLS technology to determine maturity of the technology. Evaluation/challenge exercises will be held at the TA4 performer’s site following a PI meeting to reduce travel cost. The kick-off meeting is the first meeting that kicks off the V-SPELLS program and is considered a quarterly meeting. The proposer would include any other travel they deem necessary (e.g., subcontractor visits, etc.) to accomplish their solution.
Q66: I was wondering whether there is any rule against the same person being in multiple teams. (Note: the teams would be fine with it, and the effort would be suitably scoped.)
A66: No one in TA4 can participate in any other TA. DARPA does not place any restrictions on team membership other than those set forth in the BAA.
Q65: Figure 2 (V-SPELLS Tentative Program Evaluation Schedule) on page 18 is unclear on the target dates for the demonstration meetings and evaluation exercises. Can you provide actual month numbers (or number of months after program start) for each of these expected meetings/exercises? Also, the text (page 18) refers to kickoff meetings for each phase, but these are not on the schedule; should we assume a separate meeting for the kickoffs beyond the ones on the graphic?
A65: Phase 1 and Phase 2 goals should be achieved in 18 months and Phase 3 goals should be achieved in 12 months. Dates will be determined from the project start date. Kick-Off meetings should be scheduled within the first 2 weeks of each phase and there are quarterly meetings in each phase. Quarterly meetings will include demonstrations of the V-SPELLS technology to determine maturity of the technology. Evaluation/challenge exercises will be held at the TA4 performer’s site following a PI meeting to reduce travel cost. You should budget for four meetings per year.
Q64: Which language(s) the legacy code is written currently? Conversion expected for large legacy codes - this will be in which language (framework for conversion can be created)? Expected solution will run on which platform? What is meant by 70-80% accuracy?
A64: 1) Strong proposals would consider languages and platforms representative of the DoD code base. Addressing C/C++ and embedded systems is envisioned. Strong TA1 proposals would aim to provide the strongest assurance possible for a variety of legacy code bases. 2) The BAA and the slides provide examples of the domains where DSL successes were achieved. No specific technology is prescribed by the BAA. The V-SPELLS slides and FAQ will be on the DARPA Opportunities Page. 3) The goal of phase 1 is 70-80% efficiency of flattened DSL, relative to the
initial state of the system. This metric sets the Phase 1 goal for allowable performance loss of the DSL-derived replacement code compared to the respective parts of legacy code.
As of August 3, 2020
Q63: The contracting presentation said that combined proposals must address all TA1, TA2, and TA3. Sergey said any combination of the three.
A63: All combinations of TA1, TA2, and TA3 are allowed per the BAA. Each single TA proposal is limited to 30 pages, proposals that combine two to three TAs are limited to 50 pages. See BAA HR001120S0058 Amendment 1.
Q62: Is there a way of getting access to any documentation or recordings of the event and does not attending the proposers day event preclude a vendor from proposing on this acquisition? Also, has DARPA determined an acquisition strategy for this solicitation yet? BAA, OTA or other?
A62: You are welcome to submit to this opportunity, there was not a requirement to attend the proposer’s day. Please see the following BAA link:
https://beta.sam.gov/opp/7dc5798bf5e74d8aa3df767edd3e0815/view#general
There will be updates posted including answers to FAQ’s and slides from the proposer’s day. All documents associated with the opportunities are posted to the DARPA opportunities page at the following link: http://www.darpa.mil/work-with-us/opportunities?tFilter=&oFilter=3&sort=date
Q61: Where is the BAA posted?
A61: BAA is posted at the following link:
https://beta.sam.gov/opp/7dc5798bf5e74d8aa3df767edd3e0815/view#general
Q60: Please provide details on the item missing from the original V-SPELLS BAA (deliverables) that was included in the updated Addendum
A60: Phased and Final Technical Reporting – Phase 1 and Phase 2 reports are due at the end of their respective phase. The reports, due at phased or contract completion, will concisely summarize the effort conducted and provide any lessons learned during the development of the V-SPELLS technology.
Q59: Are sub-contractors permitted to participate in multiple proposals (excluding for TA4)?
A59: Yes.
Q58: Is there any constraint on DSL such as open source, freeware or in-house DSLs? Can the iterative process go from TA1 to TA2 to TA3 and come back to TA1 to be repeated again?
A58: The BAA encourages open source technologies but is not prescriptive with respect to particular DSLs to be used or created. Close and iterative collaboration among TA1, TA2, and TA3 is envisioned; see also A44.
Q57: Might it be the case that multiple DSLs arise from a single target system in order to support composition of different aspects of the system? For example, lifting into one DSL to control the network behavior of a component while lifting a separate DSL to control physical pumps over
General Purpose Input Output (GPIO)?
A57: Yes. Safe composition of multiple DSLs is envisioned among the program challenges.
Q56: Is hardware provided if the program involves hardware?
A56: The TA4 performer is expected to curate and provide hardware or access to hardware for evaluation when necessary. Please also see A32.
Q55: Can you elaborate on the “automated, iterative interactive” aspects of TA1 technology—automated and interactive are kind of at odds with each other and could you explain their interplay/level of acceptable manual interaction?
A55: It is expected that substantial and novel automation will enable human domain-expert developers engaged in replacement or enhancement of legacy systems to achieve the goals of the BAA. The BAA recognizes that practical program understanding requires and starts from domain expert knowledge, and that automation leverages this knowledge interactively and iteratively.
Q54: Is the legacy code base provided with instructions on how to compile and run? Is there a case only binary provided without source code?
A54: Yes. Please see A26 and A27, and also A23.
Q53: Is the choice of domains for DSL’s targeted by the program driven by expertise from TA1-3, the TA4 integrator, or the transition targets?
A53: Please see A20.
Q52: Is development of next-generation ABI’s predominately the providence of TA3 or is it a joint effort of TA1-3?
A52: It is expected that TA3 will lead this development, in close collaboration with TA2.
Q51: How do we collaborate between TA’s, for instance TA1 and TA2? How do we know what DSL TA1 uses to start the work on TA2?
A51: Please see A38 and A18.
Q50: To what extent is it desired for hardware models to be incorporated into the operational semantics of extracted DSLs?
A50: Strong proposals would consider exploration of hardware interfaces to validate their models.
Q49: Are the mix of different programming languages expected?
A49: Please see A35.
Q48: As an add-on was DevSecOps looked at as being an insufficient process for this DoD legacy code issue and would it be considered a starting point or is this considered a completely different task area?
A48: The BAA makes no such implication. Please see A43.
Q47: What does it mean by virtual machine extracted from low-level operations?
A47: Virtual machine is a technical metaphor for domain-specific operational semantics.
Q46: Do you anticipate that every legacy program that goes through the V-Spells tool chain may give rise to a new DSL, capturing the “domain” of that program? Or would a smaller number of DSLs applicable to wider domains be appropriate?
A46: The appropriate number of DSLs depends on the proposed solution and how well it achieves the goals of the V-SPELLS program. DSLs suitable for categories of legacy systems would strengthen the proposal if they increase its practicality.
Q45: One of the metrics highlighted is “initial memory load reduction”. What does this refer to?
A45: Initial memory load reduction refers to the size of the executable code in the memory footprint.
Q44: Are there interactive iterative processes between TAs or internal to each TA?
A44: Both are envisioned and addressed in the BAA.
Q43: Much of this program seems to incorporate the principles of DevSecOps as outlined by the DoD. How does this differ from that?
A43: The program seeks to provide theoretical foundation and practical tools for effective maintenance and enhancement of legacy software. As such, the program shares some goals with DevSecOps and aims to contribute to these goals.
Q42: How will the correctness of TA1 produced DSL code be measured? Proof of equivalence to original code? Testing?
A42: Strong TA1 proposals would aim to provide the strongest assurance possible for legacy code bases.
Q41: What about conflicts between correctness and compatibility? (i.e., when there was a bug in the original code, the derived DSL (after improvement to ensure correctness) does not have that bug and the corrected behavior is incompatible with another module that was (implicitly) expecting the incorrect behavior?)
A41: It is expected that practical solutions would consider allowing the developer to account for bug compatibility where it matters to the BAA goals of assured replacement or enhancement of legacy components.
Q40: Does this require formal methods-based enhancement of genetic programming, eg. constraints on what mutations and recombinations are allowed because they preserve correctness and/or compatibility?
A40: See A39.
Q39: With regards to the technical definition of “construction”: Can “correct by construction” and/or “compatible by construction” be addressed using evolutionary computation as a means of construction, as in the “genetic improvement” software engineering (testing, bug-fixing, performance optimization, etc.) application of genetic programming?
A39: Any means of construction for which resulting assurance guarantees can be verified are in scope.
Q38: Is the definition (syntax, semantics, and verification systems) of the DSLs part of TA1 or TA2? The BAA says "TA1's analysis tools are required to provide the extracted domain model and domain-tuned structures, as well as, the architectural information recovered from the legacy code base, to TA2's reasoning about the component specifications and DSL code enhancements, as well as, for TA2's compatibility and compositional safety analysis of the new DSL code" which suggests that the actual DSL definition comes from TA2.
A38: TA1 and TA2 are expected to closely collaborate on defining, deriving, and inferring the appropriate DSLs and semantic models to achieve the goals of the respective TAs, such as automated, interactive, iterative program understanding and compositional DSL programming.
Q37: Can you give examples of “domain virtual machines?”
A37: This term is used to mean well-structured implementations of operational semantics for domain-specific data types and structures.
Q36: Are you envisioning a pragmatic DSL (e.g., Java-based) or a formal DSL (e.g., Algebra-based)?
A36: The BAA is not prescriptive in regard to DSLs and approaches to be used, but emphasizes relevance and effectiveness for large legacy code bases. The BAA further emphasizes achieving the strongest assurance guarantees possible for legacy code bases.
Q35: Although the “legacy code” term is understood intuitively, is it expected/advised to focus on specific programming languages?
A35: Strong proposals would consider languages relevant to DoD legacy code bases. Addressing C/C++ is envisioned.
Q34: Could you clarify what you mean by “hook system”?
A34: This term is used to refer to designs and mechanisms for composing newly developed code with a pre-existing system. The BAA emphasizes safety of such composition.
Q33: Do you expect to see formal proofs of correctness for the TA3 generated components?
A33: Strong proposals would consider providing the strongest assurance guarantees possible for legacy systems in practice.
Q32: For Evaluation platforms and test beds, will TA4 be responsible for furnishing hardware to the rest of the team or will DAPRA provide assistance?
A32: Strong TA4 proposals would consider covering the full scope of evaluation activities, including curating and providing hardware where necessary.
Q31: How close should the DSL abstraction be to the implementation of the legacy software? Is a behavioral abstraction reasonable?
A31: The BAA emphasizes effectiveness of practical component replacement or enhancement in a large code base. It is not prescriptive of particular abstractions to be leveraged.
Q30: How generic must the DSL extraction tool be with respect to domains? Is it acceptable for the tool to support creating DSLs only for a set of domains chosen a priori? E.g., would a tool be acceptable if it is very good at extracting a "networking" DSL from C code that manipulates network packet fields, but not good at extracting a DSL from arbitrary C code. Or, must the tool strictly extract a full DSL from any code?
A30: Strong proposals would consider methods that apply across multiple domains of interest, including those mentioned in the BAA and are practical for large code bases. It is, however, understood that a single universal methodology suitable for arbitrary C code may not be possible.
Q29: Is it reasonable to expect that the system will be runnable and/or that test cases (or inputs) would be available?
A29: Yes. However, performers should not assume that any available tests will be exhaustive or functionally complete.
Q28: Can foreign research institutions participate as subcontractors? Will TA4 evaluation (and possible inclusion of DoD systems) preclude foreign organizations from participating in the program?
A28: Foreign researchers may participate, subject to relevant laws and regulations. Please refer to the Contract Management Office briefing slides. Evaluation challenges prepared by TA4 for TA1, TA2, and TA3 performers will include no Controlled Technical Information (CTI). However, TA4 performer will be expected to have suitable access to DoD systems to facilitate transition, and will include appropriately cleared personnel, as described in the BAA.
Q27: Is it correct to assume that for legacy code, tool settings (i.e. compiler settings, etc.) are known? Will sample legacy code and settings be provided to performers?
A27: Proposers may assume that the build process is available. Evaluation challenges will provide the code and build process.
Q26: Can TA1 expect to have compilable source code and/or runnable binaries for analysis? Is TA1 expected to be a strictly static analysis, or is the use of dynamic analysis in scope?
A26: Yes, proposers may assume that compilable source code is available, with possible exceptions of small well-specific enclaves that are available for interaction. Dynamic analysis is in scope.
Q25: For TA3 packaging proofs with ABIs, does a security consideration come into play? I can imagine that it is possible to attach proofs with linkers, but these may be easily hacked and tampered with. So, would we have to devise strategies such that the proofs cannot be hacked?
A25: Strong proposals would consider practical security considerations for the novel ABI extensions they develop.
Q24: The BAA references cyber-physical systems, for such systems some constraints (such as an operating safety envelope) may not be directly reflected in software. Should solutions attempt to infer such constraints, or is this expected to be contributed directly through interaction with domain experts?
A24: Strong proposals would consider both of these cases.
Q23: Would it be appropriate to consider legacy software settings for TA1 where sources (or components thereof) are not available?
A23: Yes. However, proposers may assume that source code is predominantly available.
Q22: How much human assistance (from the developer) is acceptable for the DSL extraction tool? For example, an application that both manipulates network packets and draws shapes on a screen, may warrant extracting two different DSLs from respective parts of the codebase: Is it acceptable for the human to decide the subsets of the code from which to generate DSLs?
A22: Strong proposals would consider practical aspects of analyzing legacy source code bases. Some human expert interactive participation is expected.
Q21: Should we address the aspects related to multicore platforms and related issues like dynamic allocation of the memory, shared resources between CPU, etc.?
A21: All of these considerations are in scope.
Q20: Do you envision the choice of domains for DSLs developed by TA1 (and supported by TA2-3) driven by proposals from TA1-3, by the TA4 evaluator, or by transition use cases?
A20: Strong proposals would consider techniques that will apply across multiple domains, including those mentioned in the BAA. Successful TA1, TA2, and TA3 performers will be able to effectively address challenges curated by TA4 and their performance will be evaluated on these challenges. Strong proposals would aim for effective transition.
Q19: Would you clarify the word "domain", this is very broad. Are there specific ones that we should focus on?
A19: The BAA uses the word “domain” in the sense that it is commonly used in programming languages research when referring to “domain specific languages”. For example, it is assumed that there exist sets of non-trivial data types and structures associated with the domain and sets of well-defined operations on these types, which can be described formally and unambiguously. The BAA mentions several examples of domains of interest.
Q18: I understood that TA2 will be performing program analysis over code written in DSLs recovered by TA1, where the DSL is presumably ultimately specified by some sort of grammar and
operational semantics, and in turn, that DSL grammar and operational semantics will itself assumedly be recorded in an agreed upon DSL specification language. Will all the TA1 performers be expected to recover DSLs in the same DSL specification format, so that all TA2 solutions are compatible with all TA1 solutions? And if so, who will be responsible for creating that cross-TA interchange language for defining recovered DSLs?
A18: TA1 performers are expected to closely collaborate with TA2 performers on the means and methods of iterative program understanding, including the construction of DSLs and semantic models. The responsibility for creating the means of relevant information interchange will be shared, and strong proposals would include technical plans for collaboration between TAs.
Q17: Does the DSL extracted (for TA1) need to be complete in some sense, or does it only need to be sufficient to allow the existing legacy software to be programmed?
A17: The DSL need not conform to any theoretical idea of completeness, as long as it is sufficient to address the BAA goals, such as safe composable enhancement.
Q16: For TA3, are the following approaches in scope? Middle-ware, approaches requiring recompilation after a deployment change (configuration files a-la Oil for Autosar)? Or the scope is strictly ABIs and dynamic linking/loading?
A16: All approaches relevant to the BAA goals are in scope.
Q15: Can it be safely assumed that what is called here "legacy code" is code that does not carry along the source-language (compiled) compiler as a runtime-available function for on-the-fly synthesized code compilation?
A15: Yes.
Q14: Is deploying the code on hardware in scope? If so, is there a set of hardware the proposers should aim for? Is it sufficient to have the code built for different OSs or should we target multiple types of hardware such as GPUs, ASICS, etc.?
A14: Hardware-supported approaches are in scope, however, TA1-TA3 performers are not expected to provide any custom hardware as a part of their approach.
Q13: From my understanding, DSLs are automatically derived from legacy code. Is this expected to be a supervised, semi-supervised, or unsupervised process? Are DSLs expected to be unique to each legacy system or can/should they target categories of legacy systems?
A13: Strong proposals would consider practical aspects of analyzing legacy source code bases. Some interactive participation from human experts is expected. DSLs suitable for categories of legacy systems would strengthen the proposal if they increase its practicality.
Q12: Are there specific languages we should expect to be able to handle for the legacy code? Are there languages that are excluded for legacy code?
A12: Strong proposals would consider languages relevant to the DoD legacy code bases. Addressing C/C++ is envisioned.
Q11: Are security enhancements to legacy code also in scope? E.g., hardening software code against hardware attacks such as fault injection?
A11: Yes, it is in scope under the goals of the BAA. However, strong proposals would aim to address the broad range of enhancements outlined in the BAA, rather than focus on security enhancements narrowly targeting any particular threat.
Q10: Does TA1 need to take a language-agnostic approach? Should performers select the languages their system will work with? Are any specific source languages prioritized?
A10: Strong proposals would consider languages relevant to the DoD legacy code bases. Addressing C/C++ is envisioned.
Q9: Are there specific DSLs the program is targeting for?
A9: The BAA is not prescriptive with respect to specific DSLs or DSL technologies.
Q8: In terms of legacy code targeted, (1) should we be targeting both source code and binaries? (2) Are we free to pick our internal legacy code as a use case to demonstrate the system?
A8: Proposers may presume that the source code is available with the exception of small opaque enclaves that are well-specified and available for interaction. The BAA emphasizes the use of open code source bases to demonstrate the effectiveness of the proposed approach but any additional discussion of internal legacy code is allowed.
Q7: Could you provide a couple of examples of the "successful industry DSL s" that Sergey mentioned in his presentation?
A7: The BAA and the slides provide examples of the domains where DSL successes were achieved. No specific technology is prescribed by the BAA. The V-SPELLS slides and FAQ will be on the DARPA Opportunities Page.
Q6: Is it envisioned that the performers will need to produce verified code? Or, is it sufficient to produce verified composition and performance/security? As alluded to, generating verifiable code for legacy software may be impossible due to the absence of specifications.
A6: Strong proposals would aim to provide the strongest assurance possible for legacy code bases.
Q5: Sergey mentioned side channel protection as part of his presentation. Under what TA should this be? Also, I'm assuming that we are mining for automated protection as opposed to manual engineering. Is this correct?
A5: The BAA makes no specific emphasis on side channel protection, but strong proposals may want to address enhancement scenarios in which parts of the software are distributed to separate computing nodes or hardware enclaves, among other scenarios described in the BAA.
Q4: Will the existing software from which TA1 will be extracting DSLs from be in some specific programming language (e.g., Ada) or in several languages (e.g, COBOL Ada, C+ +)?
A4: Strong proposals would consider languages representative of the DoD code base. Addressing C/C++ is envisioned. Strong TA1 proposals would aim to provide the strongest assurance possible for legacy code bases.
Q3: How much of TA3 is expected to be formal? Do you envision that TA1 and TA2 produce coq theories and TA3 will be guided by those theories?
A3: Strong TA3 proposals would consider technologies that offer strong assurance guarantees. The BAA is not prescriptive with respect to any particular theories or tools. Strong proposals would aim to provide the strongest assurance possible for legacy code bases.
Q2: The metrics seem to be for the program. Are there specific metric for each TA?
A2: The metrics are all outlined in the BAA’s Table 1. Strong proposals would offer additional metrics, specific to the TA(s) that they address.
Q1: Are software construction techniques such as Evolutionary Programming in scope?
A1: All software construction methods are in scope, so long as they provide strong assurance guarantees.
|
{"Source-Url": "https://www.darpa.mil/attachments/HR001120S0058FAQ20Aug2020_CMO.pdf", "len_cl100k_base": 6753, "olmocr-version": "0.1.50", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 23095, "total-output-tokens": 7452, "length": "2e12", "weborganizer": {"__label__adult": 0.00038361549377441406, "__label__art_design": 0.0003285408020019531, "__label__crime_law": 0.0006041526794433594, "__label__education_jobs": 0.003875732421875, "__label__entertainment": 0.00012063980102539062, "__label__fashion_beauty": 0.0001647472381591797, "__label__finance_business": 0.0011758804321289062, "__label__food_dining": 0.00025653839111328125, "__label__games": 0.001312255859375, "__label__hardware": 0.0015716552734375, "__label__health": 0.00024580955505371094, "__label__history": 0.00023996829986572263, "__label__home_hobbies": 0.00013065338134765625, "__label__industrial": 0.0006184577941894531, "__label__literature": 0.0002644062042236328, "__label__politics": 0.00034546852111816406, "__label__religion": 0.00031948089599609375, "__label__science_tech": 0.01806640625, "__label__social_life": 0.00016045570373535156, "__label__software": 0.01087188720703125, "__label__software_dev": 0.9580078125, "__label__sports_fitness": 0.0002770423889160156, "__label__transportation": 0.000598907470703125, "__label__travel": 0.00017881393432617188}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 32548, 0.03288]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 32548, 0.23865]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 32548, 0.9294]], "google_gemma-3-12b-it_contains_pii": [[0, 3020, false], [3020, 6591, null], [6591, 10378, null], [10378, 13088, null], [13088, 15951, null], [15951, 19058, null], [19058, 22479, null], [22479, 25937, null], [25937, 29351, null], [29351, 32548, null], [32548, 32548, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3020, true], [3020, 6591, null], [6591, 10378, null], [10378, 13088, null], [13088, 15951, null], [15951, 19058, null], [19058, 22479, null], [22479, 25937, null], [25937, 29351, null], [29351, 32548, null], [32548, 32548, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 32548, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 32548, null]], "pdf_page_numbers": [[0, 3020, 1], [3020, 6591, 2], [6591, 10378, 3], [10378, 13088, 4], [13088, 15951, 5], [15951, 19058, 6], [19058, 22479, 7], [22479, 25937, 8], [25937, 29351, 9], [29351, 32548, 10], [32548, 32548, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 32548, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
dc95f605b16045dc683ccef12769b1291a256915
|
A Middleware for the Deployment of AmI Spaces
Diego López de Ipiña¹, Iñaki Vázquez¹, Daniel García¹, Javier Fernández¹, and Iván García¹
¹University of Deusto, Faculty of Engineering, Avda. Universidades 28, 48007 Bilbao, Spain
{dipina, ivazquez}@eside.deusto.es, {dgarcia, jafernan, ivgarcia}@ctme.deusto.es
Abstract. The latest mobile devices are offering more multimedia features, better communication capabilities (Bluetooth, Wi-Fi, GPRS/UMTS) and are more easily programmable than ever before. So far, those devices have been used mainly for communication, entertainment, and as electronic assistants. A radically different application domain for them may be represented by Ambient Intelligence (AmI), where mobile devices can be used as intermediaries between us and our surroundings. This paper proposes a middleware with a two-fold objective: (1) to simplify the creation and deployment of physical spaces hosting smart objects and (2) to transform mobile devices into universal remote controllers of those objects.
1 Introduction
Ambient Intelligence (AmI) [1] defines an interaction model between us and a context-aware environment, which adapts its behaviour intelligently to our preferences and habits, so that our life is facilitated and enhanced.
Current PDAs and mobile phones are equipped with interesting processing and storage capabilities, varied communications mechanisms (Bluetooth [2], Wi-Fi, GPRS/UMTS) and increasingly capable multimedia capture and playback facilities. Moreover, they are far more easily programmable (Compact.NET [3], J2ME [4] or Symbian [5]), i.e. extensible, than ever before.
Mobile devices equipped with Bluetooth, built-in cameras, barcode or RFID readers can be considered as sentient devices [6], since they are aware of what smart objects are within an AmI space. We understand by AmI space or environment, a location, either indoors or outdoors, where the objects present within (smart objects) are augmented with computing services. A smart object is an everyday object (door, classroom, parking booth) or a device augmented with some accessible computational service. Once a mobile device discovers a nearby smart object, it can operate over it.
We deem that mobile devices will play a key role within AmI, since they are always with us and can act as facilitators or intermediaries between us and the environment. In other words, mobile devices can act as our personal electronic butlers, facilitating and enhancing our daily activities, and even acting on our behalf based on our profiles or preferences.
In this paper, we describe the design and implementation of EMI²lets, a middleware to facilitate the development and deployment of mobile context-aware applications for AmI spaces. This software provides the software infrastructure to (1) convert physical environments into AmI spaces and (2) transform mobile devices into remote controllers of the smart objects in those spaces.
2 EMI²: an AmI Architecture
Regardless of the continuous progress achieved in all the related research topics which contribute to the AmI vision, we are still far away from its materialisation. A good starting point to solve this may be the definition of suitable software architectures and frameworks specially catered for AmI. The EMI² (Environment to Mobile Intelligent Interaction) architecture is our proposed solution.
EMI² defines a multi-agent software architecture, where agents of different types, modelling the different roles played by entities in AmI, communicate and cooperate to fulfil a common goal, i.e. to enhance and facilitate the user interactions with her AmI Space. For instance, a cinema may be enhanced with a Bluetooth mobile phone accessible ticket booking service, so preventing the user from long queuing to purchase tickets. Similarly, the door of our office may be augmented with an access control service which demands the user to enter a PIN in her mobile to be given access.

Fig. 1 portrays the main components of the EMI² architecture. We distinguish three main types of agents:
- **EMI² Proxy**: is an agent representing the user, which runs on the user’s mobile device (PDA or mobile phone). It acts on behalf of the user, adapting/controlling the environment for him, both explicitly, under the user’s control, or implicitly, on its own judgement based on the profiles, preferences and previous interactions.
A Middleware for the Deployment of Aml Spaces
- **EMI²Object**: is an agent representing any device or physical object (vending machine, door, ticket box) within a smart environment augmented with computational services, i.e. the capacity to adapt its behaviour based on ambient conditions or user commands. An EMI²Object cooperates with other EMI² agents.
- **EMI²BehaviourRepository**: is an agent where knowledge and intelligence are combined to support sensible adaptation. EMI²Objects may require the assistance of an external EMI²BehaviourRepository to coordinate their own adaptation according to the user’s preferences, behaviour patterns or even the explicit commands received from an **EMI²Proxy**. The user’s mobile device can also be powered with an internal EMI²BehaviourRepository.
2.1 Active and Passive Mechanisms
A concrete agent can influence the environment, and thus, its constituent agents’ state, via **active** (explicit interaction) or **passive** (implicit interaction) methods.
Active methods are those in which the agent explicitly commands other agents to change their state or perform an action. For example, when a user enters a building, a sensor identifies him and commands the lift to be ready at the ground floor. When the user stands by his office door his mobile phone commands the electric lock to open. Active methods can be implemented with any distributed computing technology capable of issuing commands, which will be transported in a local context by bearers such as Bluetooth or Wi-Fi and in a global context by GPRS/UMTS.
Passive methods are those in which an agent disseminates certain information (profiles, preferences), expecting that other agents change their state or perform an action at their discretion to create a more adapted environment. Using passive methods an agent does not command the target agents to do anything concrete, it simply publishes/broadcasts information preferences expecting the others to react changing their state in a positive way. Passive mechanisms are less intrusive than active methods, but they are less predictable and significantly more complex.
2.2 Active Influence over EMI²Objects
In this paper we want to concentrate on the design and implementation of a middleware to provide universal active influence capabilities to our mobile devices over the surrounding smart objects in our environment.
The minimum features such a middleware has to provide are: (1) a mechanism to discover through ad-hoc or wireless networking the computing services exported by surrounding smart objects, and (2) a mechanism to interact with those discovered services, so that the objects they represent adapt to the user’s preferences and commands.
The current state of the art in discovery and interaction platforms falls into three categories [9]. Firstly, solutions in which discovery protocols are supported by mobile code, e.g. Jini [10]. After discovery, the service (either a proxy or the full service) is downloaded onto the mobile device where it then operates. Secondly, solutions where the discovery protocols are integrated with specific interaction protocols, which are
used to invoke the service after the service has been discovered. A good example of this is Universal Plug and Play (UPnP) [11]. Finally, there are interaction independent discovery protocols such as the Service Location Protocol [12].
In what follows we explain the design and implementation of AmI-enabling middleware which addresses the service discovery and interaction aspects required for active influence (explicit invocation) on EMI\textsuperscript{2}Objects.
3 The EMI\textsuperscript{2}lets Platform
EMI\textsuperscript{2}lets is the result of mapping the EMI\textsuperscript{2} architecture into a software development platform to enable AmI scenarios. This platform is specially suited for active interaction mechanisms. However, it has been designed so that passive mechanisms may be incorporated in the future.
EMI\textsuperscript{2}lets is a development platform for AmI which addresses the intelligent discovery and interaction among EMI\textsuperscript{2}Objects and EMI\textsuperscript{2}Proxies. EMI\textsuperscript{2}lets follows a Jini-like mechanism by which once a service is discovered, a proxy of it (an EMI\textsuperscript{2}let) is downloaded into the user's device (EMI\textsuperscript{2}Proxy). An EMI\textsuperscript{2}let is a mobile component transferred from a smart object to a nearby handheld device, which offers a graphical interface for the user to interact over that smart object.
The EMI\textsuperscript{2}lets platform addresses three main aspects:
- **Mobility**, seamlessly to the user it encounters all the services available as he moves and selects the best possible mechanism to communicate with them. In other words, the EMI\textsuperscript{2}let platform ensures that an EMI\textsuperscript{2}Proxy is always using the communication means with best trade-off between performance and cost. For example, if Wi-Fi and Bluetooth are available, the former is chosen.
- **Interoperability**, the EMI\textsuperscript{2}lets are agnostic of the target device type, e.g. PC, a PDA or a mobile phone.
- **AmI** is the application domain that has driven the design of EMI\textsuperscript{2}lets. This platform provides the infrastructure and software tools required to ease the development and deployment of mobile context-aware application.
The objectives established for the design and implementation of the EMI\textsuperscript{2}lets platform are:
- Transform mobile devices (mobile phones and PDAs) into remote universal controllers of the smart objects located within an AmI space.
- Enable both local (Bluetooth, Wi-Fi) and global access (GPRS/UMTS) to the smart objects in an AmI space, seamlessly adapting to the most suitable underlying communication mechanisms.
- Develop middleware independent of a particular discovery or interaction mechanism. Abstract the programmer from the several available discovery (Bluetooth SDP or wireless UPnP discovery) and interaction mechanisms (RPC or publish/subscribe). Allow this middleware to easily adapt to newly emerging discovery (e.g. RFID identification) and interactions means.
- Make use of commonly available hardware and software features in mobile devices, without demanding the creation of proprietary hardware, or software.
• Generate software representatives (proxies) of smart objects which can be run in any platform, following a “write once run in any device type” philosophy. For instance, the same EMI\textsuperscript{2}let should be able to run in a mobile, a PDA or a PC.
3.1 The EMI\textsuperscript{2}lets Vision
Fig. 2 shows a possible deployment of an EMI\textsuperscript{2}let-aware environment. A group of devices running the EMI\textsuperscript{2}let Player and hosting the EMI\textsuperscript{2}let runtime can discover and interact with the software representatives (EMI\textsuperscript{2}lets) of surrounding EMI\textsuperscript{2}Objects. An EMI\textsuperscript{2}Object may be equipped with enough hardware resources to host an EMI\textsuperscript{2}let Server, or alternatively a group of EMI\textsuperscript{2}lets associated to different EMI\textsuperscript{2}Objects may all be hosted within an autonomous version of an EMI\textsuperscript{2}let Server. The EMI\textsuperscript{2}let Server acts as a repository of EMI\textsuperscript{2}Objects. It publishes the services offered by the hosted EMI\textsuperscript{2}Objects, transfers them on demand to the requesting EMI\textsuperscript{2}let Players, and, optionally acts as running environment for the EMI\textsuperscript{2}let server-side facets.

Some EMI\textsuperscript{2}lets may directly communicate with their associated EMI\textsuperscript{2}Objects in order to issue adaptation commands. However, often a specialised piece of software may need to be developed which is far too complex to be implemented in the embedded hardware with which a smart object is equipped. For those cases, it will be more convenient to delegate those cumbersome computing tasks to the server-side (back-end) counterpart of an EMI\textsuperscript{2}let. The EMI\textsuperscript{2}let on the hand-held device will communicate with its server-side counterpart in the EMI\textsuperscript{2}let Server by means of the EMI\textsuperscript{2}Protocol. For example, a light-controlling EMI\textsuperscript{2}let could communicate with its EMI\textsuperscript{2}let server-side, which would issue X10 commands over the power line.
3.2 Internal Architecture
The EMI\textsuperscript{2}lets platform consists of the following elements:
1. A programming framework defining a set of classes and rules that every EMI\textsuperscript{2}let component must follow.
2. An integrated development environment, named EMI²let Designer, which simplifies the development of EMI²lets, both its client- and (optional) server-side.
3. A runtime environment installed on EMI²let-aware devices for executing downloaded code.
4. An EMI²let Player to discover, download, verify and control the execution life of a downloaded EMI²let. A version of the player is available for each device type which may act as host of EMI²lets, e.g. PDA, mobile phone or PC.
5. An EMI²let Server which acts as repository of EMI²lets and as running environment of EMI²lets server-sides.
In order to achieve the design objectives previously listed, we have created the layered software architecture shown in Fig. 3. Programmers only deal with the first layer, the *EMI²let Abstract Programming Model API*, to develop the software counterparts of smart objects. This layer offers a set of generic interfaces (abstract classes) covering the main functional blocks of a mobile sentient application:
1. **Discovery** interface to undertake the search for available EMI²lets independently of the discovery mechanisms used underneath.
2. **Interaction** interface to issue commands over the services discovered.
3. **Presentation** interface to specify the graphical controls and events that represent the look and feel of an EMI²let.
4. **Persistency** interface to store EMI²let-related data in the target device.
The *EMI²let Abstract-to-Concrete Mapping* layer translates the invocations over the generic interfaces to the appropriate available mechanisms both in the mobile device and the EMI²Objects in the environment. The discovery, interaction, presentation and persistency abstractions encapsulate the concrete discovery, interaction, presentation or persistency models used. They implement an API for performing service discovery and interaction, graphical interface generation and data persistence independent of the actual implementation in the target device.
On deployment the code generated through these abstract interfaces is linked to the concrete implementations of the classes used which are part of the EMI²let runtime in the target device.
In the process of associating a generic invocation to an actual one, the *EMI²let Abstract-to-Concrete Mapping* will be responsible of selecting the actual mapping (or
A Middleware for the Deployment of AmI Spaces
A group of mappings) which best matches the invocation type. For example, if a downloaded EMI²let is installed on a device where both Bluetooth and GPRS communication are available, the abstract-to-concrete layer will have to choose one of those mechanisms to issue commands. Thus, if the mobile device is still within Bluetooth range of the EMI²let server-side, then it will translate the invocation into an EMI²Protocol message transported over Bluetooth RFCOMM. Otherwise, it will invoke via GPRS the generic web service (with methods corresponding to the EMI²Protocol commands) implemented by every EMI²let server-side.
Similarly, if a mobile device is Bluetooth and Wi-Fi capable, it will use both Bluetooth SDP and UPnP service discovery to concurrently search for smart objects in its surroundings.
With regards to the presentation abstraction, we have defined a minimum set of graphical controls with which we can generate the graphical interface of an EMI²let. Some examples of the classes defined are: EMI2Panel, EMI2Button or EMI2TextBox. This enables us to create EMI²let graphical interfaces agnostic of the target mobile device. Thus, when a programmer creates an EMI2Button, it is translated into a button control in a PC or a PDA, but into a menu option in a mobile phone.
The modus operandi of the plug-ins associated to any of the four available functional mapping is ruled by an XML configuration file, which states whether a plug-in may be run concurrently with other plug-ins of the same type or in isolation. In the latter case, a priority is assigned to each plug-in which will determine which of the plug-ins to select when several of them are available. We plan to establish a more sophisticated and flexible plug-in configuration model in due time.
Both the Abstract-to-Concrete Mappings and the Functional Mapping layers and plug-ins will be linked to the arriving EMI²let in an EMI²let Player.
3.3 Implementation
Reflection is paramount in the EMI²lets platform. It enables an EMI²let Player to verify that the code arriving as part of an EMI²let complies with the EMI²lets framework and can be trusted. Every EMI²let downloaded is signed with a private key only shared by the EMI²let designer and the player.
After verification, the player can start the EMI²let by invoking the methods defined in the EMI2let base class, from which every EMI²let must inherit. The methods defined by this class closely resemble the ones provided by a J2ME 3 MIDlet class:
- start, starts or resumes the execution of a downloaded EMI²let.
- pause, pauses its execution.
- destroy, destroys it.
In addition, the EMI2let class includes some EMI²let-specific methods such as:
- getUUID, returns the unique identifier of an EMI²let.
- setProperty/getProperty, sets or gets the properties associated to a EMI²let. For instance, the EMI2let.Durable property is set to true when an EMI²let has to be cached in the player after its execution. Thus, it can be executed
again in the future. Otherwise, an EMI\textsuperscript{2}let is wiped out from the Player either when its execution is completed or it is out of range of the EMI\textsuperscript{Object} it represents.
- **notifyDisconnected**, offers an EMI\textsuperscript{2}let the possibility of being aware of when the EMI\textsuperscript{Object} that it controls cannot be accessed any longer.
- **getAddresses**, enables the EMI\textsuperscript{2}let-hosting player to retrieve the addresses at which the EMI\textsuperscript{2}let server-side is available. For instance, an EMI\textsuperscript{2}let server-side may be accessible both through a Bluetooth address or a url pointing to a web service.
Our first reference implementation has used Microsoft .NET, a platform that fully supports reflection through the `System.Reflection` namespace. Moreover, the .NET platform addresses software development for all the client hardware platforms considered in EMI\textsuperscript{2}lets, namely PC, PDA and mobile phone. As a least common multiple for the definition of the presentation controls of an EMI\textsuperscript{2}let, we have chosen the Compact.NET framework graphical controls, which represent a superset of the ones in the SmartPhone framework and a subset of the standard .NET desktop-oriented ones.
The most noticeable part of our implementation is the assembly fusion undertaken at the player side merging the arriving EMI\textsuperscript{2}let assembly with the EMI\textsuperscript{2}let library installed in each target device. This library represents the player’s runtime, i.e. the abstract-to-concrete layer and the interaction, discovery, presentation and persistency mappings implementation with their corresponding plug-in modules. In other words, the assembly code downloaded is linked dynamically (late bound) with the runtime installed in the target device.
### 4 An EMI\textsuperscript{2}lets Application
The Parking EMI\textsuperscript{2}let, see Fig. 4, is an example application developed with EMI\textsuperscript{2}lets middleware. It shows how a physical object in an outdoors space can be augmented with AmI features. This application is meant to be deployed in any street parking booth, where we can purchase tickets to park our car for a limited period of time. Often, we have to keep returning to the parking place to renew the ticket so that the local police force does not issue a fine for parking time expiration. Thanks to the EMI\textsuperscript{2}lets platform a user could discover, download (from the ticket booth) and install a parking EMI\textsuperscript{2}let which would help him solve this situation. With the downloaded EMI\textsuperscript{2}let the user could purchase parking tickets via Bluetooth while in the parking, and remotely via GPRS when the EMI\textsuperscript{2}let warns her (at her office) that its parking ticket is about to expire. This scenario shows one of the biggest virtues of EMI\textsuperscript{2}lets, their capability to enact an action over an EMI\textsuperscript{Object} both locally, while in the environment, or remotely, far away from the environment.
Other EMI\textsuperscript{2}lets developed have allowed us to perform as diverse tasks as ordering food in a busy restaurant from our mobile phone, controlling the electronic devices and lights of a room, offering a voice synthesized bus arrival notification for blind people or provide subtitles on the mobile phones of people attending to an opera.
5 Related Work
The EMI²lets platform presents some resemblance to the Smoblets software framework proposed by [14]. Both frameworks download into a mobile device the software representatives of objects located in a smart space. However, Smoblets only operate when they are within range of the smart object they represent. On the contrary, EMI²lets can remain at the user’s terminal, even when he is far away from the smart object. This allows the user to control that smart object anytime and anywhere, both using local (Bluetooth) and global (GPRS) communication mechanisms. Furthermore, the main application of Smoblets is to transform mobile devices into execution platforms for code downloaded from smart items with limited processing resources, whereas EMI²lets are mainly thought to transform mobile devices into hosts of smart object proxies, which simplify their remote control.
The EMI²lets framework’s layered software architecture has been inspired by the ReMMoC framework [9]. However, EMI²lets does not only address the service discovery and interaction issues of mobile context-aware applications. It also tackles the graphical presentation and persistency aspects commonly used in those applications. Moreover, the EMI²let code generated is independent of the target platform where it will be run (PC, PDA or mobile).
6 Conclusion
This work has described the design and implementation of a novel middleware which provides universal active influence capabilities to mobile devices over the smart objects in an environment. This framework presents the following features:
- Transforms mobile devices into universal remote controllers of smart objects.
- Enables both local and global access to those smart objects (anywhere/anytime).
Diego López de Ipiña1, Iñaki Vázquez1P1P, Daniel GarcíaP1P, Javier FernándezP1P, and Iván GarcíaP1P
- Independent and extensible to the underlying service discovery and interaction, graphical representation and persistence mechanisms.
- Enables AmI spaces using conventional readily-available hardware and software.
- Follows a “write once run in any device type” philosophy for EMI2lets.
The EMI2lets middleware represents a good approach to make the AmI vision reality. With it, we have been able of prototyping several active influence AmI scenarios in a very simple manner.
Acknowledgements
This work has been financed by a SAIOTEK 2004-05 grant from the Basque Government and the Cátedra de Telefónica Móviles at the University of Deusto.
References
|
{"Source-Url": "http://paginaspersonales.deusto.es/dipina/publications/EMI2letsAmILifeFinal.pdf", "len_cl100k_base": 5329, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 25786, "total-output-tokens": 6412, "length": "2e12", "weborganizer": {"__label__adult": 0.0004107952117919922, "__label__art_design": 0.0005998611450195312, "__label__crime_law": 0.00033092498779296875, "__label__education_jobs": 0.00038242340087890625, "__label__entertainment": 8.594989776611328e-05, "__label__fashion_beauty": 0.00016891956329345703, "__label__finance_business": 0.0001742839813232422, "__label__food_dining": 0.0003273487091064453, "__label__games": 0.0005011558532714844, "__label__hardware": 0.00331878662109375, "__label__health": 0.0005841255187988281, "__label__history": 0.00028514862060546875, "__label__home_hobbies": 9.489059448242188e-05, "__label__industrial": 0.0003979206085205078, "__label__literature": 0.00020682811737060547, "__label__politics": 0.0002238750457763672, "__label__religion": 0.0004575252532958984, "__label__science_tech": 0.053558349609375, "__label__social_life": 8.052587509155273e-05, "__label__software": 0.00952911376953125, "__label__software_dev": 0.92724609375, "__label__sports_fitness": 0.00025844573974609375, "__label__transportation": 0.0005865097045898438, "__label__travel": 0.00023829936981201172}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 26664, 0.02365]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 26664, 0.40277]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 26664, 0.87785]], "google_gemma-3-12b-it_contains_pii": [[0, 2569, false], [2569, 4442, null], [4442, 7602, null], [7602, 10832, null], [10832, 13283, null], [13283, 15612, null], [15612, 18640, null], [18640, 22115, null], [22115, 23867, null], [23867, 26664, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2569, true], [2569, 4442, null], [4442, 7602, null], [7602, 10832, null], [10832, 13283, null], [13283, 15612, null], [15612, 18640, null], [18640, 22115, null], [22115, 23867, null], [23867, 26664, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 26664, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 26664, null]], "pdf_page_numbers": [[0, 2569, 1], [2569, 4442, 2], [4442, 7602, 3], [7602, 10832, 4], [10832, 13283, 5], [13283, 15612, 6], [15612, 18640, 7], [18640, 22115, 8], [22115, 23867, 9], [23867, 26664, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 26664, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
88fede1a7053f21d8043a000d77585132b2358e2
|
EasySDM: An Integrated and Easy to Use Spatial Data Mining Platform
Leila Hamdad, Amine Abdaoui, Nabila Belattar, Mohamed Ala
To cite this version:
Leila Hamdad, Amine Abdaoui, Nabila Belattar, Mohamed Ala. EasySDM: An Integrated and Easy to Use Spatial Data Mining Platform. KDIR: Knowledge Discovery and Information Retrieval, Nov 2015, Lisbonne, Portugal. lirmm-01229030
HAL Id: lirmm-01229030
https://hal-lirmm.ccsd.cnrs.fr/lirmm-01229030
Submitted on 16 Nov 2015
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
EasySDM:
An Integrated and Easy to Use Spatial Data Mining Platform
Leila Hamdad¹, Amine Abdaoui², Nabila Belattar¹, and Mohamed Ala Alchikha¹
¹LSCI, ESI, BP 68M Oued Smar, Algiers, Algeria
²LIRMM, 860 St Priest Street, Montpellier, France
{l_hamdad, n_belattar, m_al_chikha}@esi.dz, abdaoui@lirmm.fr
Keywords: Spatial Data Mining, Geo-visualization, Classification, Clustering, Association Rules.
Abstract: Spatial Data Mining allows users to extract implicit but valuable knowledge from spatial related data. Two main approaches have been used in the literature. The first one applies simple Data Mining algorithms after a spatial pre-processing step. While the second one consists of developing specific algorithms that considers the spatial relations inside the mining process. In this work, we first present a study of existing Spatial Data Mining tools according to the implemented tasks and specific characteristics. Then, we illustrate a new open source Spatial Data Mining platform (EasySDM) that integrates both approaches (pre-processing and dynamic mining). It proposes a set of algorithms belonging to clustering, classification and association rule mining tasks. Moreover and more importantly, it allows geographic visualization of both the data and the results. Either via an internal map display or using any external Geographic Information System.
1. INTRODUCTION
Spatially related data is present in many fields such as epidemiology, environmental science, image analysis, etc. In fact, many problems are spatially dependent. The study of any characteristic of such data can not be done without taking into account their respective geographical positions. In its most common form, a spatial data is divided into two parts: a descriptive part that can be of any standard type (Integer, Boolean, etc.) and a geographic part describing the geometry and geo-spatial position of the data. Voluminous geographic data are being collected with modern acquisition techniques such as Global Positioning Systems (GPS), high-resolution remote sensing, Geographic Information System (GIS), etc. Extracting unknown and unexpected information from these spatial data sets requires efficient methods that take into account the spatial dependencies (Guo, 2009). Spatial data are characterized by their interdependence, which comes from the following assumption: “The more objects are close to each other, the higher is the correlation between them” (Miller, 2004). Therefore, Spatial Data Mining (SDM) has emerged as an active area of research for extracting implicit and relevant knowledge from large spatial databases containing great, complex and interdependent data (Anselin et al., 2006). In general, SDM tasks are extensions of Data Mining (DM) ones by taking into account spatial relations. These tasks include clustering, classification, association rule mining and geo-visualization.
In the literature, several software of SDM exist, which function according to two main approaches. The first approach, which is the most intuitive one, consists of using classical DM algorithms on pre-processed spatial data. The pre-processing consists of extracting smoothed data table from the matrix between neighbouring objects, or by representing the spatial relations as new features (Ouattara, 2010; Rinzivillo et al, 2008). This approach is simple but time consuming (Guo, 2008). The second approach consists of developing specific SDM techniques that dynamically takes into consideration spatial relationships inside the mining process. Therefore, the exploratory process of this approach is faster than the first one but more complicated to implement.
In this work, we first propose a study on existing SDM tools focusing on their proposed tasks and specific characteristics. To our knowledge, no similar study has been proposed before in order to compare SDM tools, in contrast to the huge work done to compare classical DM tools (Goebel and Gruenwald, 1999; John F. Elder and Dean W. Abbott, 1998; Witten and Frank, 2005). Then, we
present EasySDM, our new integrated, open source and easy to use SDM platform. It integrates algorithms from both pre-processing and dynamic SDM approaches. On the one hand, algorithms from the Weka DM tool (Hall et al., 2009) have been used after a pre-processing step using the GDPM API (Bogorny et al., 2006). On the other hand, a naive region-alization algorithm and a simple spatial rules association extraction algorithm that can be directly applied on spatial data have been implemented. While existing SDM tools show a lack of visualisation especially for open source ones, EasySDM offers the possibility to visualize spatial data directly on an integrated geographical map before and after applying DM algorithms. Furthermore, a visualization is also possible via any external Geographic Information System (GIS). Due to its simplicity and visualization capabilities, we believe that EasySDM may be helpful, inter alia, in explaining SDM to students in the academic area. It has been produced under the GPL licence in order to allow researchers and programmers to access and improve the source code. The platform setup, source code and documentation are publically available on the internet.
The rest of the paper is organized as follow: First, a comparative study on existing SDM tools is presented in section 2. Then, EasySDM and its components are detailed in section 3. After that, we conduct some experiments using EasySDM in order to illustrate its functionalities and present them in section 4. Finally, section 5 concludes and gives our main perspectives.
2. COMPARATIVE STUDY OF SDM TOOLS
Many SDM tools have been proposed in the literature. (Han et al., 1997) proposed GeoMiner, the first knowledge extraction software from spatial databases, developed in 1997. It is an extension of the classical DM tool DBMiner (Jiawei Han, 1996) developed by the same team in 1996. Similarly, (Ouattara, 2010) developed GeoKnime, an extension of the Knime software (www.knime.org) to spatial data. (Appice et al., 2007) proposed Ingens, an extended platform for SDM within a GIS environment. (Lazarevic et al., 2000) developed SDAM, a software system for spatial data analysis and modelling that includes two tasks of SDM (clustering and classification). (May and Savinov, 2001) developed the SPIN system, a spatial information system that implements many clustering, classification and association rule mining algorithms. (Bogorny et al., 2006) developed a spatial pre-processing API that can be added to the Weka software in order to treat spatial data. Finally, an interesting application of clustering, named CrimeStat, has been proposed in (Levine and al, 2004) in order to detect hot spots of crime incidents.
In this section, we compare these tools according to their general characteristics. Table 1 presents for each tool: the year of its latest release, whether the software and the source code are publically accessible or not, whether a documentation is available or not and, finally, the type of the proposed visualization (if any).
Table 1: General characteristics of existing SDM tools
<table>
<thead>
<tr>
<th>Tool name</th>
<th>Year of last release</th>
<th>Tool public accessibility</th>
<th>Source code public accessibility</th>
<th>Documentation</th>
<th>Integrated map display</th>
<th>External map display</th>
</tr>
</thead>
<tbody>
<tr>
<td>GeoMiner</td>
<td>1999</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>Yes</td>
<td>No</td>
</tr>
<tr>
<td>GeoKnime</td>
<td>2010</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>No</td>
</tr>
<tr>
<td>Ingens</td>
<td>2007</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>Yes</td>
<td>No</td>
</tr>
<tr>
<td>SDAM</td>
<td>2000</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>No</td>
</tr>
<tr>
<td>SPIN</td>
<td>2003</td>
<td>Yes</td>
<td>No</td>
<td>Yes</td>
<td>Yes</td>
<td>No</td>
</tr>
<tr>
<td>GDPM</td>
<td>2007</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
<td>No</td>
</tr>
<tr>
<td>CrimeStat</td>
<td>2010</td>
<td>Yes</td>
<td>No</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
</tr>
</tbody>
</table>
Table 2 presents a comparison of these tools according to their technical characteristics. For each tool, it presents its architecture, the programming language, whether it functions with all operating systems, and the possible types of data input. Finally, Table 3 presents a functional comparison, which takes into consideration the used SDM approach, the types of the considered spatial relations, and the implemented SDM tasks.
1 http://www.lirmm.fr/~abdaoui/EasySDM
Table 2: Technical characteristics of SDM tools (Un: Unknown, Win: Windows)
<table>
<thead>
<tr>
<th>Tool name</th>
<th>System architecture</th>
<th>Programming language</th>
<th>Operating system</th>
<th>Data input types</th>
</tr>
</thead>
<tbody>
<tr>
<td>GeoMiner</td>
<td>Client/Server</td>
<td>Un</td>
<td>All</td>
<td>Database</td>
</tr>
<tr>
<td>GeoKnime</td>
<td>Desktop</td>
<td>Java</td>
<td>All</td>
<td>Database</td>
</tr>
<tr>
<td>Ingens</td>
<td>Client/Server</td>
<td>Java</td>
<td>All</td>
<td>Database</td>
</tr>
<tr>
<td>SDAM</td>
<td>Desktop</td>
<td>C++</td>
<td>Win</td>
<td>Database</td>
</tr>
<tr>
<td>SPIN</td>
<td>N-tier</td>
<td>Java</td>
<td>All</td>
<td>Database</td>
</tr>
<tr>
<td>GDPM</td>
<td>Desktop</td>
<td>Java</td>
<td>All</td>
<td>Database</td>
</tr>
<tr>
<td>CrimeStat</td>
<td>Desktop</td>
<td>C++</td>
<td>All</td>
<td>Files: dbf, shp and dat</td>
</tr>
</tbody>
</table>
Table 3: SDM characteristics of SDM tools (Pre-pro: Pre-processing, D: Distance, T: Topological, R: Directional, Un: Unknown)
<table>
<thead>
<tr>
<th>Tool name</th>
<th>SDM approach</th>
<th>Spatial relations</th>
<th>SDM tasks</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td></td>
<td>Classification</td>
<td>Clustering</td>
</tr>
<tr>
<td>GeoMiner</td>
<td>Dynamic</td>
<td>D</td>
<td>Yes</td>
</tr>
<tr>
<td>GeoKnime</td>
<td>Dynamic</td>
<td>D and T</td>
<td>Yes</td>
</tr>
<tr>
<td>Ingens</td>
<td>Dynamic</td>
<td>D, T and R</td>
<td>No</td>
</tr>
<tr>
<td>SDAM</td>
<td>Pre-pro</td>
<td>Un</td>
<td>Yes</td>
</tr>
<tr>
<td>SPIN</td>
<td>Dynamic</td>
<td>D and T</td>
<td>Yes</td>
</tr>
<tr>
<td>GDPM</td>
<td>Pre-pro</td>
<td>D and T</td>
<td>Yes</td>
</tr>
<tr>
<td>CrimeStat</td>
<td>Pre-pro</td>
<td>D and T</td>
<td>No</td>
</tr>
</tbody>
</table>
It is important to notice that GeoMiner and Ingens have been built on specific spatial query languages. When they were released, these two tools were not successful. Moreover, GeoKnime and SDAM are not publically accessible and do not seem to be massively used. Since we could not test these four tools, their characteristics have been extracted from the scientific papers describing them.
3. SYSTEM OVERVIEW
3.1 Architecture
As presented in the previous section, Weka-GDPM is accessible, open source and includes the main three SDM tasks using the pre-processing approach. However, it does not provide any geographical visualization of the results. Therefore, we decided to use and enrich Weka-GDPM with a geographic visualization. Figure 1 below presents the platform architecture. The visualization of the spatial data and the results can be performed within the platform or using any external GIS. The internal map display has been implemented using the MapWinGIS API. The external visualization can be done by any GIS to open the data and the results. In addition to the Weka algorithms that can be applied on pre-processed data, we implemented a naïve regionalization and a simple spatial rule association mining algorithms (spatial Apriori) that can be applied directly on spatial data without any pre-processing. The development was carried out on a Microsoft Visual Studio 2012 platform using the C# programming language. The jar files of Weka and GDPM have been converted to dll files accessible from the C# code using the IKVM tool (www.ikvm.net). The data sources can be either arff files (.arff), shape files (.shp) or a PostGis 2.0 database. The obtained results can be saved in .arff or .shp formats.
3.2 Graphical User Interface
The Graphical User Interface (GUI) is simple and intuitive, hence the name of EasySDM. As presented in Figure 2, the GUI is divided into three areas:
- **Region A (red rectangle):** Allows the user to interact with the platform by specifying the shape file and the .arff file, setting up the parameters, modifying the theme of or the attribute to be displayed on the map, launching the algorithm, and saving the results.
- **Region B (green rectangle):** Displays information about the run such as: the algorithm status, the success/fail of the algorithm, the execution time and error messages, etc.
- **Region C (blue rectangle):** This area is dedicated to the map display and the legend. It allows to visualize on the map both the data and the results. For example the same colour is used for objects that are in the same cluster.
Figure 1: EasySDM architecture.
Figure 2: EasySDM Graphical User Interface and its three areas. Here, the K-means clustering algorithm has been applied (k=4).
3.3 Geographic pre-processing
The geographic pre-processing is performed using the GDPM API. This step consists of extracting spatial relations and including them as new features to the data. These features represent the spatial objects as we used the instance granularity level. Each new feature will take as value, the existing spatial relation between the object represented by the new feature (columns) and the object represented by the corresponding instance (lines). If no spatial relation is found between the two objects, the corresponding cell takes the value ‘no’. Two types of spatial pre-processing are available according to the types of the extracted spatial relationships:
3.3.1 Distance relationships
Three types of distance spatial relations have been considered: close, very close and far. The spatial relationship is chosen according to thresholds. Figure 3 presents the structure of the obtained arff file.
<table>
<thead>
<tr>
<th>Obj 1</th>
<th>Obj 2</th>
<th>...</th>
<th>Obj n</th>
</tr>
</thead>
<tbody>
<tr>
<td>Close</td>
<td>Very close</td>
<td>no</td>
<td>Class</td>
</tr>
<tr>
<td>Obj 2</td>
<td>no</td>
<td>Very close</td>
<td>no</td>
</tr>
<tr>
<td>Obj 3</td>
<td>no</td>
<td>no</td>
<td>no</td>
</tr>
<tr>
<td>Obj n</td>
<td>Close</td>
<td>no</td>
<td>Very close</td>
</tr>
</tbody>
</table>
Figure 3: Structure of the arff file after the extraction of distance relationships.
3.3.2 Topological relationships
Eight types of topological spatial relationships have been considered: equal, disjoint, touches, within, overlaps, crosses, contains and covers. Figure 4 presents the structure of the obtained arff file.
<table>
<thead>
<tr>
<th>Obj 1</th>
<th>Obj 2</th>
<th>...</th>
<th>Obj n</th>
</tr>
</thead>
<tbody>
<tr>
<td>Touch</td>
<td>no</td>
<td>Touch</td>
<td>Touch</td>
</tr>
<tr>
<td>Obj 2</td>
<td>no</td>
<td>Touch</td>
<td>no</td>
</tr>
<tr>
<td>Obj 3</td>
<td>no</td>
<td>no</td>
<td>no</td>
</tr>
<tr>
<td>...</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Obj n</td>
<td>Contains</td>
<td>no</td>
<td>Touch</td>
</tr>
</tbody>
</table>
Figure 4: Structure of the arff file after the extraction of topological relationships.
Once the geographical pre-processing step has been applied, the data can be used with classical DM algorithms. In this first version, the following representative algorithms have been integrated using the Weka software:
- Clustering by partitioning: K-means, Farthest First and Expectation Maximization.
- Density based clustering: Cobweb and DBscan.
- Classification: J48 and Naïve Bayes.
3.4 Dynamic SDM algorithms
Two naive SDM algorithms that can be applied directly on the spatial data have been implemented. The goal here is to illustrate the dynamic processing approach by simple examples.
3.4.1 Regionalization
While classical clustering methods do not guarantee that objects in the same cluster are contiguous, regionalization groups similar objects in contiguous regions. Figure 5 presents the difference between clustering and regionalization. Our implemented regionalization algorithm is divided into two steps. First, a classical clustering algorithm is applied (here we applied the FarthestFirst algorithm). Then, the obtained clusters are organized into contiguous regions. If non-contiguous objects exist in each cluster, they are separated in order to form a new cluster. This process is repeated until all regions become contiguous.
The algorithm is presented below:
**Input:** Dataset: D
Minimal number of regions: k.
**Output:** The assignments of elements to different regions.
1. Apply Farthest First (D, k)
2. For each cluster c not yet checked, we create a region r containing the first object of c
2.1. For all other objects o from c:If (o touches at least one object of r): r = r union {o}
2.2. All remaining objects (if any) are affected to a new cluster.
### 3.4.2 Spatial Apriori
Spatial association rule mining extracts rules in the form: X \( \Rightarrow \) Y, where X and Y are spatial predicates (intersection, contains, overlaps, disjointed, crosses, covers, covered and touches). We implemented a spatial variation of the Apriori algorithm that we call Spatial Apriori. It outputs the rule that has the highest confidence and the geographical objects that participate in this rule. Hence, Spatial Apriori can extract the best association rule found in a shape file with considered minimum support and minimum confidence. The different steps of Spatial Apriori algorithm are detailed below:
**Input:** Dataset: D
Minimal support: MinSup
Minimal confidence: MinConf.
**Output:** Best rule found
Confidence
Support.
**Begin**
1. Predicates <- Apply all the families of spatial relations on all the elements of D.
2. Predicates <- Select only those for which the support is greater than MinSup.
3. Predicates <- Select the best predicate for each family.
4. Rules <- Combine the predicates and generate all possible association rules.
5. Rules <- Select rules with confidence greater than MinConf.
6. Return the association rule with maximal confidence.
**End**
### 3.5 Licence
EasySDM is distributed under the GPL licence in order to guarantee free access to the platform, the source code and to any software build on it. Weka, GDPM and IKVM are already under the GPL licence. However, MapWinGis has been distributed under the MPL 1.1 licence, which is incompatible with GPL. Therefore, we contacted MapWinGis owners and they gave us authorization to use their API along with GPL licences we are using.
### 4. EXPERIMENTS
In order to test the functionalities of our new SDM platform, we conducted experiments using publically available spatial data on the GeoDa website (www.geodacenter.asu.edu/sdata). The main goal here is to illustrate the visualisation capabilities of EasySDM on a real benchmark.
#### 4.1 Description of the benchmark
It contains Malaria incidence and population in Colombia. We selected information corresponding to the year 1998. The obtained benchmark contained 33 spatial objects (polygons) representing the 33 Colombia departments. These 33 departments were described only with 5 features: department name, department code, number of malaria incidences (MALARI98), total population (TP1998), rural population (RP1998) and urban population (UP1998). Geographical pre-processing added 33 new features to the data. Due to the nature of this benchmark (polygons) topological relations have been considered.
#### 4.1.1 Clustering
We first conduct a clustering experiment using K-means (k=3) on the geographically pre-processed data. In this experiment, the department “San Andres” has been removed in order to be used later in the classification. As shown in Figure 6, three clusters have been created:
- **The red cluster** contains many departments especially from the center. Therefore, they contain many common borders. The number of Malaria incidences may be very high.
- **The orange cluster** contains departments from the North West of the country. The number of Malaria incidences is high as well as the population, which is more urban than rural.
- **The yellow cluster** groups departments which has less common borders, less population and less Malaria cases. The population is more rural than urban.
4.1.2 **Classification**
We used the 32 already classified departments to build a Naive Bayes classifier which was used to classify the left “San Andres” department (composed of two islands). As shown in Figure 7, “San Andres” has been affected to the yellow cluster since it has no Malaria incidence (0), a small population (69525) and no common border with any other department.
4.1.3 **Association rules**
Finally, we apply the naive variation of Apriori to this benchmark. The extracted rule is the following (support and confidence have been set to 0):
\[
\text{Intersects(Antioquia)} \Rightarrow \text{Disjoint(San Andres)}
\]
This rule has a value of 0.24 in terms of support and 1 in terms of confidence. The departments verifying this rule are presented in yellow in Figure 8. The obtained rule is obvious and may not be valuable for the user. Since the purpose of the platform is to explain SDM to novice users we did not remove obvious rules. This can be included in future versions.
5. CONCLUSION
In this work, we propose an open source and easy to use SDM platform named EasySDM. It integrates classical DM methods implemented in the Weka platform after a Geographical pre-processing step. Moreover, it contains two naïve algorithms that consider the spatial relations inside the mining process without any pre-processing. The main contribution concerns the integration of an internal geographic visualization of the spatial data before and after applying the algorithms. EasySDM can also be interfaced with any GIS to offer external visualization and to take advantage of GIS functionalities. Therefore, we believe that it can be used in the academic area. The main expected improvements of EasySDM consist of enriching it with existing algorithms from the literature.
REFERENCES
Jiawei Han, Y.F., 1996. DBMiner: A System for Mining Knowledge in Large Relational Databases. KDD-96 Proceedings, 250–255.
|
{"Source-Url": "https://hal-lirmm.ccsd.cnrs.fr/lirmm-01229030/document", "len_cl100k_base": 5682, "olmocr-version": "0.1.53", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 30525, "total-output-tokens": 6466, "length": "2e12", "weborganizer": {"__label__adult": 0.0004115104675292969, "__label__art_design": 0.0007338523864746094, "__label__crime_law": 0.0012540817260742188, "__label__education_jobs": 0.0034084320068359375, "__label__entertainment": 0.00010186433792114258, "__label__fashion_beauty": 0.00024366378784179688, "__label__finance_business": 0.0005583763122558594, "__label__food_dining": 0.0004167556762695313, "__label__games": 0.0009965896606445312, "__label__hardware": 0.0011281967163085938, "__label__health": 0.000835418701171875, "__label__history": 0.0015039443969726562, "__label__home_hobbies": 0.00022935867309570312, "__label__industrial": 0.0010099411010742188, "__label__literature": 0.00046443939208984375, "__label__politics": 0.0005326271057128906, "__label__religion": 0.00048732757568359375, "__label__science_tech": 0.4443359375, "__label__social_life": 0.00027751922607421875, "__label__software": 0.06658935546875, "__label__software_dev": 0.472900390625, "__label__sports_fitness": 0.00034737586975097656, "__label__transportation": 0.0007109642028808594, "__label__travel": 0.0003981590270996094}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 26309, 0.02926]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 26309, 0.2149]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 26309, 0.84425]], "google_gemma-3-12b-it_contains_pii": [[0, 1014, false], [1014, 5064, null], [5064, 10110, null], [10110, 14636, null], [14636, 14796, null], [14796, 17934, null], [17934, 21476, null], [21476, 22633, null], [22633, 26309, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1014, true], [1014, 5064, null], [5064, 10110, null], [10110, 14636, null], [14636, 14796, null], [14796, 17934, null], [17934, 21476, null], [21476, 22633, null], [22633, 26309, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 26309, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 26309, null]], "pdf_page_numbers": [[0, 1014, 1], [1014, 5064, 2], [5064, 10110, 3], [10110, 14636, 4], [14636, 14796, 5], [14796, 17934, 6], [17934, 21476, 7], [21476, 22633, 8], [22633, 26309, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 26309, 0.25309]]}
|
olmocr_science_pdfs
|
2024-12-11
|
2024-12-11
|
77ff82403226211dcdd46c836a9e6eeb637c7368
|
[REMOVED]
|
{"Source-Url": "http://www.researchgate.net/profile/Jukka_Huhtamaeki/publication/221096374_Catalysing_the_Development_of_a_Conference_Workspace/links/546de0270cf2193b94c5da84.pdf", "len_cl100k_base": 5280, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 25655, "total-output-tokens": 6413, "length": "2e12", "weborganizer": {"__label__adult": 0.0003736019134521485, "__label__art_design": 0.0021953582763671875, "__label__crime_law": 0.000499725341796875, "__label__education_jobs": 0.018310546875, "__label__entertainment": 0.00028634071350097656, "__label__fashion_beauty": 0.0003077983856201172, "__label__finance_business": 0.000957012176513672, "__label__food_dining": 0.0005035400390625, "__label__games": 0.0007066726684570312, "__label__hardware": 0.0011777877807617188, "__label__health": 0.0008935928344726562, "__label__history": 0.0011148452758789062, "__label__home_hobbies": 0.0002321004867553711, "__label__industrial": 0.0005741119384765625, "__label__literature": 0.0010061264038085938, "__label__politics": 0.00069427490234375, "__label__religion": 0.0005888938903808594, "__label__science_tech": 0.352294921875, "__label__social_life": 0.0007796287536621094, "__label__software": 0.1312255859375, "__label__software_dev": 0.483642578125, "__label__sports_fitness": 0.0003192424774169922, "__label__transportation": 0.0007433891296386719, "__label__travel": 0.0005879402160644531}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28591, 0.03436]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28591, 0.1692]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28591, 0.89258]], "google_gemma-3-12b-it_contains_pii": [[0, 2625, false], [2625, 5864, null], [5864, 9098, null], [9098, 10835, null], [10835, 13980, null], [13980, 17321, null], [17321, 19366, null], [19366, 22609, null], [22609, 25909, null], [25909, 28591, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2625, true], [2625, 5864, null], [5864, 9098, null], [9098, 10835, null], [10835, 13980, null], [13980, 17321, null], [17321, 19366, null], [19366, 22609, null], [22609, 25909, null], [25909, 28591, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28591, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28591, null]], "pdf_page_numbers": [[0, 2625, 1], [2625, 5864, 2], [5864, 9098, 3], [9098, 10835, 4], [10835, 13980, 5], [13980, 17321, 6], [17321, 19366, 7], [19366, 22609, 8], [22609, 25909, 9], [25909, 28591, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28591, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
212754d1f23b842d5ecfc7a1d52a83e20e3799c1
|
Mini 1 graded by next lecture
Project 1 is out, sample writeups on website
Recall: HMMs
- **Input** \( x = (x_1, \ldots, x_n) \)
**Output** \( y = (y_1, \ldots, y_n) \)
\[
P(y, x) = P(y_1) \prod_{i=2}^{n} P(y_i | y_{i-1}) \prod_{i=1}^{n} P(x_i | y_i)
\]
- **Training**: maximum likelihood estimation (with smoothing)
\[
score_i(s) = \max_{y_{i-1}} P(s | y_{i-1}) P(x_i | s) score_{i-1}(y_{i-1})
\]
- **Inference problem**: \( \arg\max_y P(y | x) = \arg\max_y \frac{P(y, x)}{P(x)} \)
- **Viterbi**:
This Lecture
- CRFs: model (+features for NER), inference, learning
- Named entity recognition (NER)
- (if time) Beam search
Named Entity Recognition
- BIO tagset: begin, inside, outside
- Sequence of tags — should we use an HMM?
- Why might an HMM not do so well here?
- Lots of O’s, so tags aren’t as informative about context
- Insufficient features/capacity with multinomials (especially for unks)
Barack Obama will travel to Hangzhou today for the G20 meeting.
CRFs
Conditional Random Fields
- HMMs are expressible as Bayes nets (factor graphs)
\[ y_1 \rightarrow y_2 \rightarrow \ldots \rightarrow y_n \]
\[ x_1 \rightarrow x_2 \rightarrow \ldots \rightarrow x_n \]
- This reflects the following decomposition:
\[ P(y, x) = P(y_1)P(x_1|y_1)P(y_2|y_1)P(x_2|y_2) \ldots \]
- Locally normalized model: each factor is a probability distribution that normalizes
Conditional Random Fields
- HMMs: \( P(y, x) = P(y_1)P(x_1|y_1)P(y_2|y_1)P(x_2|y_2) \ldots \)
- CRFs: discriminative models with the following globally-normalized form:
\[
P(y|x) = \frac{1}{Z} \prod_k \exp(\phi_k(x, y))
\]
normalizer
any real-valued scoring function of its arguments
- Naive Bayes : logistic regression :: HMMs : CRFs
local vs. global normalization \(\leftrightarrow\) generative vs. discriminative
- Locally normalized discriminative models do exist (MEMMs)
- How do we max over \(y\)? Intractable in general — can we fix this?
**Sequential CRFs**
- **HMMs:** \( P(y, x) = P(y_1)P(x_1|y_1)P(y_2|y_1)P(x_2|y_2) \ldots \)
- **CRFs:**
\[
P(y|x) \propto \prod_k \exp(\phi_k(x, y))
\]
\[
P(y|x) \propto \exp(\phi_o(y_1)) \prod_{i=2}^n \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^n \exp(\phi_e(x_i, y_i))
\]
- HMMs:
- CRFs:
Sequential CRFs
\[
P(y|x) \propto \exp(\phi_o(y_1)) \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(x_i, y_i))
\]
- We condition on \(x\), so every factor can depend on all of \(x\) (including transitions, but we won’t do this)
- \(y\) can’t depend arbitrarily on \(x\) in a generative model
Token index — lets us look at current word
- Notation: omit $x$ from the factor graph entirely (implicit)
- Don’t include initial distribution, can bake into other factors
Sequential CRFs:
$$P(y|x) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, x))$$
Feature Functions
\[
P(y|x) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, x))
\]
- This can be almost anything! Here we use linear functions of sparse features
\[
\begin{align*}
\phi_e(y_i, i, x) &= w^\top f_e(y_i, i, x) \\
\phi_t(y_{i-1}, y_i) &= w^\top f_t(y_{i-1}, y_i)
\end{align*}
\]
\[
P(y|x) \propto \exp w^\top \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right]
\]
- Looks like our single weight vector multiclass logistic regression model
Basic Features for NER
\[ P(y|x) \propto \exp w^\top \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right] \]
Barack Obama will travel to Hangzhou today for the G20 meeting.
Transitions: \( f_t(y_{i-1}, y_i) = \text{Ind}[y_{i-1} \& y_i] = \text{Ind}[O \rightarrow \text{B-LOC}] \)
Emissions: \( f_e(y_6, 6, x) = \text{Ind}[\text{B-LOC} \& \text{Current word = Hangzhou}] \)
\( \text{Ind}[\text{B-LOC} \& \text{Prev word = to}] \)
Features for NER
Leicestershire is a nice place to visit...
I took a vacation to Boston
Apple released a new version...
According to the New York Times...
Leonardo DiCaprio won an award...
Texas governor Greg Abbott said
Features for NER
- Word features (can use in HMM)
- Capitalization
- Word shape
- Prefixes/suffixes
- Lexical indicators
- Context features (can’t use in HMM!)
- Words before/after
- Tags before/after
- Word clusters
- Gazetteers
According to the *New York Times*...
Apple released a new version...
CRFs Outline
- Model:
\[ P(y|x) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, x)) \]
\[ P(y|x) \propto \exp w^\top \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right] \]
- Inference
- Learning
Computing (arg)maxes
\[ P(y|x) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, x)) \]
- **argmax\_y P(y|x):** can use Viterbi exactly as in HMM case
\[
\max_{y_1, \ldots, y_n} e^{\phi_t(y_{n-1}, y_n)} e^{\phi_e(y_n, n, x)} \cdots e^{\phi_e(y_2, 2, x)} e^{\phi_t(y_1, y_2)} e^{\phi_e(y_1, 1, x)}
\]
\[
= \max_{y_2, \ldots, y_n} e^{\phi_t(y_{n-1}, y_n)} e^{\phi_e(y_n, n, x)} \cdots e^{\phi_e(y_2, 2, x)} \max_{y_1} e^{\phi_t(y_1, y_2)} e^{\phi_e(y_1, 1, x)}
\]
\[
= \max_{y_3, \ldots, y_n} e^{\phi_t(y_{n-1}, y_n)} e^{\phi_e(y_n, n, x)} \cdots \max_{y_2} e^{\phi_t(y_2, y_3)} e^{\phi_e(y_2, 2, x)} \max_{y_1} e^{\phi_t(y_1, y_2)} \text{score}_1(y_1)
\]
- \( \exp(\phi_t(y_{i-1}, y_i)) \) and \( \exp(\phi_e(y_i, i, x)) \) play the role of the Ps now, same dynamic program
Can do inference in any tree-structured CRF
Max-product algorithm: generalization of Viterbi to arbitrary tree-structured graphs (sum-product is generalization of forward-backward)
CRFs Outline
Model: $$P(y|x) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, x))$$
$$P(y|x) \propto \exp w^\top \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right]$$
Inference: argmax $$P(y|x)$$ from Viterbi
Learning
Training CRFs
\[ P(y|x) \propto \exp w^\top \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right] \]
- Logistic regression: \( P(y|x) \propto \exp w^\top f(x, y) \)
- Maximize \( \mathcal{L}(y^*, x) = \log P(y^*|x) \)
- Gradient is completely analogous to logistic regression:
\[
\frac{\partial}{\partial w} \mathcal{L}(y^*, x) = \sum_{i=2}^{n} f_t(y^*_{i-1}, y^*_i) + \sum_{i=1}^{n} f_e(y^*_i, i, x)
\]
\[
-\mathbb{E}_y \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right]
\]
intractable!
Training CRFs
\[
\frac{\partial}{\partial w} \mathcal{L}(y^*, x) = \sum_{i=2}^{n} f_t(y^*_{i-1}, y^*_i) + \sum_{i=1}^{n} f_e(y^*_i, i, x) \\
- \mathbb{E}_y \left[ \sum_{i=2}^{n} f_t(y^*_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right]
\]
» Let’s focus on emission feature expectation
\[
\mathbb{E}_y \left[ \sum_{i=1}^{n} f_e(y_i, i, x) \right] = \sum_{y \in \mathcal{Y}} P(y|x) \left[ \sum_{i=1}^{n} f_e(y_i, i, x) \right] = \sum_{i=1}^{n} \sum_{y \in \mathcal{Y}} P(y|x) f_e(y_i, i, x)
\]
\[
= \sum_{i=1}^{n} \sum_{s} P(y_i = s|x) f_e(s, i, x)
\]
Computing Marginals
\[
P(y|\mathbf{x}) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, \mathbf{x}))
\]
- Normalizing constant \( Z = \sum_{\mathbf{y}} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, \mathbf{x})) \)
- Analogous to \( P(\mathbf{x}) \) for HMMs
- For both HMMs and CRFs:
\[
P(y_i = s|\mathbf{x}) = \frac{\text{forward}_i(s) \text{backward}_i(s)}{\sum_{s'} \text{forward}_i(s') \text{backward}_i(s')}
\]
Posteriors vs. Probabilities
\[ P(y_i = s | x) = \frac{\text{forward}_i(s) \cdot \text{backward}_i(s)}{\sum_{s'} \text{forward}_i(s') \cdot \text{backward}_i(s')} \]
- Posterior is *derived* from the parameters and the data (conditioned on \( x \)!)
\[
\begin{align*}
P(x_i | y_i), P(y_i | y_{i-1}) & \quad \text{HMM} \\
\text{Model parameter (usually multinomial distribution)} & \\
\text{Inferred quantity from forward-backward} & \end{align*}
\]
\[
\begin{align*}
P(y_i | \mathbf{x}), P(y_{i-1}, y_i | \mathbf{x}) & \quad \text{CRF} \\
\text{Undefined (model is by definition conditioned on } \mathbf{x}) & \\
\text{Inferred quantity from forward-backward} & \end{align*}
\]
Training CRFs
- For emission features:
\[
\frac{\partial}{\partial w} \mathcal{L}(y^*, x) = \sum_{i=1}^{n} f_e(y^*_i, i, x) - \sum_{i=1}^{n} \sum_{s} P(y_i = s | x) f_e(s, i, x)
\]
gold features — expected features under model
- Transition features: need to compute \( P(y_i = s_1, y_{i+1} = s_2 | x) \)
using forward-backward as well
- ...but you can build a pretty good system without transition features
CRFs Outline
- **Model:**
\[
P(y \mid x) = \frac{1}{Z} \prod_{i=2}^{n} \exp(\phi_t(y_{i-1}, y_i)) \prod_{i=1}^{n} \exp(\phi_e(y_i, i, x))
\]
\[
P(y \mid x) \propto \exp w^\top \left[ \sum_{i=2}^{n} f_t(y_{i-1}, y_i) + \sum_{i=1}^{n} f_e(y_i, i, x) \right]
\]
- **Inference:** \( \text{argmax } P(y \mid x) \) from Viterbi
- **Learning:** run forward-backward to compute posterior probabilities; then
\[
\frac{\partial}{\partial w} \mathcal{L}(y^*, x) = \sum_{i=1}^{n} f_e(y^*_i, i, x) - \sum_{i=1}^{n} \sum_{s} P(y_i = s \mid x) f_e(s, i, x)
\]
Pseudocode
for each epoch
for each example
extract features on each emission and transition (look up in cache)
compute potentials phi based on features + weights
compute marginal probabilities with forward-backward
accumulate gradient over all emissions and transitions
Implementation Tips for CRFs
- Caching is your friend! Cache feature vectors especially
- Try to reduce redundant computation, e.g. if you compute both the gradient and the objective value, don’t rerun the dynamic program
- Exploit sparsity in feature vectors where possible, especially in feature vectors and gradients
- Do all dynamic program computation in log space to avoid underflow
- If things are too slow, run a profiler and see where time is being spent. Forward-backward should take most of the time
Debugging Tips for CRFs
- Hard to know whether inference, learning, or the model is broken!
- Compute the objective — is optimization working?
- **Inference**: check gradient computation (most likely place for bugs)
- Is $\sum_s \text{forward}_i(s) \cdot \text{backward}_i(s)$ the same for all $i$?
- Do probabilities normalize correctly + look “reasonable”? (Nearly uniform when untrained, then slowly converging to the right thing)
- **Learning**: is the objective going down? Can you fit a small training set? Are you applying the gradient correctly?
- If objective is going down but model performance is bad:
- **Inference**: check performance if you decode the training set
CRF with lexical features can get around 85 F1 on this problem
Other pieces of information that many systems capture
World knowledge:
The delegation met the president at the airport, *Tanjug* said.
**Tanjug**
*From Wikipedia, the free encyclopedia*
*Tanjug* (/tǎnˈd͡ʒʊɡ/; *Serbian Cyrillic: Танђуљ*) is a Serbian state news agency based in Belgrade.[2]
Nonlocal Features
The news agency Tanjug reported on the outcome of the meeting. The delegation met the president at the airport, Tanjug said.
- More complex factor graph structures can let you capture this, or just decode sentences in order and use features on previous sentences.
Finkel and Manning (2008), Ratinov and Roth (2009)
Semi-Markov Models
Barack Obama will travel to Hangzhou today for the G20 meeting.
- Chunk-level prediction rather than token-level BIO
- $y$ is a set of touching spans of the sentence
- Pros: features can look at whole span at once
- Cons: there’s an extra factor of $n$ in the dynamic programs
Evaluating NER
Prediction of all Os still gets 66% accuracy on this example!
What we really want to know: how many named entity chunk predictions did we get right?
Precision: of the ones we predicted, how many are right?
Recall: of the gold named entities, how many did we find?
F-measure: harmonic mean of these two
### How well do NER systems do?
<table>
<thead>
<tr>
<th>System</th>
<th>Resources Used</th>
<th>$F_1$</th>
</tr>
</thead>
<tbody>
<tr>
<td>+ LBJ-NER</td>
<td>Wikipedia, Nonlocal Features, Word-class Model</td>
<td>90.80</td>
</tr>
<tr>
<td>- (Suzuki and Isozaki, 2008)</td>
<td>Semi-supervised on 1G-word unlabeled data</td>
<td>89.92</td>
</tr>
<tr>
<td>- (Ando and Zhang, 2005)</td>
<td>Semi-supervised on 27M-word unlabeled data</td>
<td>89.31</td>
</tr>
<tr>
<td>- (Kazama and Torisawa, 2007a)</td>
<td>Wikipedia</td>
<td>88.02</td>
</tr>
<tr>
<td>- (Krishnan and Manning, 2006)</td>
<td>Non-local Features</td>
<td>87.24</td>
</tr>
<tr>
<td>- (Kazama and Torisawa, 2007b)</td>
<td>Non-local Features</td>
<td>87.17</td>
</tr>
<tr>
<td>+ (Finkel et al., 2005)</td>
<td>Non-local Features</td>
<td>86.86</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Lample et al. (2016)</th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>LSTM-CRF (no char)</td>
<td>90.20</td>
</tr>
<tr>
<td>LSTM-CRF</td>
<td>90.94</td>
</tr>
<tr>
<td>S-LSTM (no char)</td>
<td>87.96</td>
</tr>
<tr>
<td>S-LSTM</td>
<td>90.33</td>
</tr>
<tr>
<td>BiLSTM-CRF + ELMo</td>
<td>92.2</td>
</tr>
</tbody>
</table>
Ratinov and Roth (2009)
Beam Search
Fed raises interest rates 0.5 percent
- n word sentence, s tags to consider — what is the time complexity?
- $O(ns^2)$ — s is ~40 for POS, n is ~20
Many tags are totally implausible
Can any of these be:
- Determiners?
- Prepositions?
- Adjectives?
Features quickly eliminate many outcomes from consideration — don’t need to consider these going forward
Fed raises interest rates 0.5 percent
**Beam Search**
- Maintain a beam of $k$ plausible states at the current timestep
- Expand all states, only keep $k$ top hypotheses at new timestep
- Beam size of $k$, time complexity $O(nks \log(ks))$
---
**Diagram and Table**
<table>
<thead>
<tr>
<th>Tag</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<td>VBD</td>
<td>+1.2</td>
</tr>
<tr>
<td>NNP</td>
<td>+0.9</td>
</tr>
<tr>
<td>VBN</td>
<td>+0.7</td>
</tr>
<tr>
<td>NN</td>
<td>+0.3</td>
</tr>
<tr>
<td>VBZ</td>
<td>+1.2</td>
</tr>
<tr>
<td>NNS</td>
<td>-1.0</td>
</tr>
<tr>
<td>DT</td>
<td>-5.3</td>
</tr>
<tr>
<td>PRP</td>
<td>-5.8</td>
</tr>
</tbody>
</table>
- Not expanded: Fed raises
- Maintained priority queue to efficiently add things
How good is beam search?
- $k=1$: greedy search
- Choosing beam size:
- 2 is usually better than 1
- Usually don’t use larger than 50
- Depends on problem structure
- If beam search is much faster than computing full sums, can use structured SVM instead of CRFs, but we won’t discuss that here
Next Time
- Neural networks
|
{"Source-Url": "http://www.cs.utexas.edu/~gdurrett/courses/fa2018/lectures/lec5-1pp.pdf", "len_cl100k_base": 5433, "olmocr-version": "0.1.50", "pdf-total-pages": 40, "total-fallback-pages": 0, "total-input-tokens": 62416, "total-output-tokens": 6888, "length": "2e12", "weborganizer": {"__label__adult": 0.0003814697265625, "__label__art_design": 0.0009007453918457032, "__label__crime_law": 0.0006237030029296875, "__label__education_jobs": 0.023162841796875, "__label__entertainment": 0.00022709369659423828, "__label__fashion_beauty": 0.00024962425231933594, "__label__finance_business": 0.0005164146423339844, "__label__food_dining": 0.0005025863647460938, "__label__games": 0.0008978843688964844, "__label__hardware": 0.0023365020751953125, "__label__health": 0.000949859619140625, "__label__history": 0.0005407333374023438, "__label__home_hobbies": 0.0003516674041748047, "__label__industrial": 0.0010356903076171875, "__label__literature": 0.000782012939453125, "__label__politics": 0.0004911422729492188, "__label__religion": 0.0006022453308105469, "__label__science_tech": 0.408935546875, "__label__social_life": 0.0003762245178222656, "__label__software": 0.035888671875, "__label__software_dev": 0.51904296875, "__label__sports_fitness": 0.00041413307189941406, "__label__transportation": 0.0005245208740234375, "__label__travel": 0.00025177001953125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 14564, 0.01589]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 14564, 0.29799]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 14564, 0.7122]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 76, false], [76, 509, null], [509, 635, null], [635, 982, null], [982, 987, null], [987, 1385, null], [1385, 1946, null], [1946, 2237, null], [2237, 2598, null], [2598, 2854, null], [2854, 3383, null], [3383, 3843, null], [3843, 4070, null], [4070, 4384, null], [4384, 4660, null], [4660, 5482, null], [5482, 5664, null], [5664, 5958, null], [5958, 6517, null], [6517, 7073, null], [7073, 7571, null], [7571, 8255, null], [8255, 8675, null], [8675, 9239, null], [9239, 9528, null], [9528, 10044, null], [10044, 10738, null], [10738, 10738, null], [10738, 11097, null], [11097, 11433, null], [11433, 11758, null], [11758, 12080, null], [12080, 13350, null], [13350, 13362, null], [13362, 13512, null], [13512, 13759, null], [13759, 14233, null], [14233, 14536, null], [14536, 14564, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 76, true], [76, 509, null], [509, 635, null], [635, 982, null], [982, 987, null], [987, 1385, null], [1385, 1946, null], [1946, 2237, null], [2237, 2598, null], [2598, 2854, null], [2854, 3383, null], [3383, 3843, null], [3843, 4070, null], [4070, 4384, null], [4384, 4660, null], [4660, 5482, null], [5482, 5664, null], [5664, 5958, null], [5958, 6517, null], [6517, 7073, null], [7073, 7571, null], [7571, 8255, null], [8255, 8675, null], [8675, 9239, null], [9239, 9528, null], [9528, 10044, null], [10044, 10738, null], [10738, 10738, null], [10738, 11097, null], [11097, 11433, null], [11433, 11758, null], [11758, 12080, null], [12080, 13350, null], [13350, 13362, null], [13362, 13512, null], [13512, 13759, null], [13759, 14233, null], [14233, 14536, null], [14536, 14564, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, true], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 14564, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 14564, null]], "pdf_page_numbers": [[0, 0, 1], [0, 76, 2], [76, 509, 3], [509, 635, 4], [635, 982, 5], [982, 987, 6], [987, 1385, 7], [1385, 1946, 8], [1946, 2237, 9], [2237, 2598, 10], [2598, 2854, 11], [2854, 3383, 12], [3383, 3843, 13], [3843, 4070, 14], [4070, 4384, 15], [4384, 4660, 16], [4660, 5482, 17], [5482, 5664, 18], [5664, 5958, 19], [5958, 6517, 20], [6517, 7073, 21], [7073, 7571, 22], [7571, 8255, 23], [8255, 8675, 24], [8675, 9239, 25], [9239, 9528, 26], [9528, 10044, 27], [10044, 10738, 28], [10738, 10738, 29], [10738, 11097, 30], [11097, 11433, 31], [11433, 11758, 32], [11758, 12080, 33], [12080, 13350, 34], [13350, 13362, 35], [13362, 13512, 36], [13512, 13759, 37], [13759, 14233, 38], [14233, 14536, 39], [14536, 14564, 40]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 14564, 0.08469]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
7b6ba6cb453a40caf80f491f87715a979eee75b1
|
Inclusive Educational Review of Software Architectural Styles and Patterns for the Students of the College of Information and Computing Sciences of Cagayan State University
Freddie P. Masuli\textsuperscript{1}
Lourdes M. Padirayon\textsuperscript{2}
Manny S. Alipio\textsuperscript{3}
Daniel T. Ursulum\textsuperscript{4}
Grecilia A. Callitong\textsuperscript{5}
Segundo D. Pacris Jr.\textsuperscript{6}
Journal for Educators, Teachers and Trainers, Vol. 14 (4)
https://jett.labosfor.com/
Date of reception: 10 Jan 2023
Date of revision: 19 Mar 2023
Date of acceptance: 22 Mar 2023
\textsuperscript{1,2,3,4,5,6} Cagayan State University Sanchez Mira, College of Information and Computing Sciences (CICS) Centro 02, Sanchez Mira, Cagayan, 3518 Philippines
Inclusive Educational Review of Software Architectural Styles and Patterns for the Students of the College of Information and Computing Sciences of Cagayan State University
Freddie P. Masuli¹, Lourdes M. Padirayon², Manny S. Alipio³, Daniel T. Ursulum⁴, Grecilia A. Callitong⁵, Segundo D. Pacris Jr.⁶
¹²³⁴⁵⁶Cagayan State University Sanchez Mira, College of Information and Computing Sciences (CICS) Centro 02, Sanchez Mira, Cagayan, 3518 Philippines
*Corresponding Author
Email: desmpadirayon@csu.edu.ph
ABSTRACT
A good architectural design has a high contribution to the success of a system. In addition, this architectural design is useful for the Information Technology (IT) students as their basis of their software development of their capstone project. The utilization of inappropriate architecture can lead to disastrous consequences for IT student researchers. A detailed understanding of software architecture styles is very useful to analyze distributed and complex systems which is the trend of capstone projects. This paper explores the quality attributes of three architecture styles namely shared-nothing, broker, and representational state transfer, which are perceived as beneficial to distributed system architecture that serve as guide to student researchers. This is to provide a picture of the said three key software architecture styles which could be helpful not only for student researchers but also for the software developers by adding references to minimize the uncertainty while selecting the appropriate architectural style for their specific needs. An architectural style must be chosen correctly to obtain all its benefits in the system. In this paper, the three architectural styles are compared on the foundation of various quality attributes derived from ISO 9126-1 standard such as functionality, reliability, usability, efficiency, maintainability, and portability. The results of the study are useful to guide the student researchers in their capstone project and to reduce the number of unsuccessful attempts of software development component of their capstone project.
Keywords: Architecture Styles, Broker architecture, Capstone Project, Shared-nothing, Software developer, Representational State Transfer (ReST)
INTRODUCTION
Capstone Project Mandates of ABET
The Accreditation Board for Engineering and Technology (ABET, 2016) mandates an outcome-based evaluation of graduating engineers' competence to use technical and other professional skills to solving real-world engineering challenges. Widespread implementation of capstone courses has assisted in the development and enhancement of these required skill sets (Omar, 2014).
Engineering programs incorporate capstone projects to combine multi-disciplinary subjects and teach professional abilities that are difficult to convey in a standard lecture course. Due to the fact that these projects assist to transition students into professional engineers, they have a direct effect on the industry reputation and ranking of a university (Ward, 2013).
Todd et al. (1995), who conducted a survey of capstone engineering courses in North America, discovered that many engineering programs use senior design/capstone-type courses to prepare students for engineering practice and that a significant number of institutions partner with industrial clients to sponsor capstone projects. They concluded that the intensive faculty investment produced competent engineering graduates, which was beneficial.
In addition, the nation's present emphasis on preparing undergraduate students for engineering practice has attracted significant attention to the quality of capstone projects, which are used to evaluate the efficacy of an engineering degree. The emphasis on quality capstone projects is part of an effort not just to guarantee that graduates are adequately prepared for engineering practice, but also to raise and broaden the professional competence of the engineering workforce.
Mosher (2015) suggested that in order to develop a high-quality capstone course for technology undergraduates, it is necessary to have appropriate scoping and planning of the project with the client beforehand, ownership and buy-in from students through controlled project and team selection, a high tolerance for ambiguity and
uncertainty as students work through the details of the project, and balanced methods of individual and group accountability. Literature demonstrates that thorough planning and execution are necessary for a successful capstone project.
According to ABET (2016) stresses the importance of engineering programs that provide students with the skills necessary to recognize, articulate, and propose engineering solutions to solve industrial challenges as well as contemporary social or global concerns. In other words, ABET promotes the importance of cooperation, communication, and project-based engineering courses. The culminating courses are meant to help students develop and improve these specific skill sets (Franchetti, 2012). The engineering design process taught to students incorporates the development of analytical, critical thinking, synthesis, and communication skills, which are vital to the industry.
Depending on the course or the general design of the program, capstone design projects may be undertaken independently or in groups. Since cooperation is regarded as an essential ability for success in the working world, the majority of capstone design courses require students to complete a design project in a group context (Zhou & Pazos, 2014).
### Software Architecture
Software architecture is the core of software systems defining its sections and their connections (Yang et al., 2021). One of the major design tasks in building enterprise applications is to design good software architecture (Kotusev et al., 2022). Software architecture is the structural solution that accomplishes the general technical and operational requirements for software developments (Bamhdi, 2021). Software architecture is part of the early design stage which comes just after specifying all the requirements in a certain project and just before the design phase (Tian et al., 2022). Software architects incorporate architectural styles in the planning and organizing of the components of a complete system to meet and achieve the requirements of the customer. Additionally, it was developed to solve common problems which arise in developing software systems. Foremost, software architecture decomposes a system into a group of different components and then develops components and related connectors, and finally selects an architecture style and pattern (Bamhdi, 2021). When the architecture style is specified, designers need to determine the extent to which features of the software architecture influence quality attributes.
A quality attribute is the property of a system that can be tested and is used to indicate how well the system satisfies the needs of its stakeholders. Generally, software quality is considered during the early stage of the software development process, to lessen risks and to achieve the success of the overall software system (Sharma et al., 2015) like distributed systems. Also designing achievable architectural representations for detailed software system development, because it allows assessment of the different functional and non-functional properties of a designed system.
Interrelating the computing services of different layers by multiple cloud providers is indispensable for overcoming the exponentially increasing demands of enterprises. Because of a highly dynamic and unpredictable environment, cloud providers have to face numerous challenges related to service provisioning of diversified applications while handling the critical management operations on resource layers. To set up interconnected clouds, proper configuration and uniform development of architectural components and protocols are required, which play an important role in management. Some studies proposed techniques to increase the capabilities of a standalone source. Capabilities that influence interconnected cloud computing management from multiple perspectives. The resource management features of architectural components are from several sources and maintaining service quality metrics in service provisioning. Numerous architectural projects are offered to illustrate the challenges and future perspectives (Latif et al., 2020).
Architectural Approaches are to achieve a common goal as to very much popular today because of the many advantages which users can derive from it as mentioned by (Garcés et al., 2021) but the use of architectural approaches has decreased in the previous decade (Slavin, 2023). There are several architectural styles and patterns available today for distributed systems such as shared-nothing architecture, broker, and representational state transfer (Tian et al., 2022). Designers need to recognize which particular architecture style is suitable for a certain distributed project for them to successfully implement a software project. An architectural style must be chosen correctly to take advantage of all its benefits as applied in a distributed system. Fast release and delivery of next-generation software, which is the software industry's core goal these days, results in a mistake in the software development process (Yang et al., 2021).
### The Cagayan State University at glance
The Cagayan State University (CSU) is the sole state university in the province of Cagayan and one of the top higher education institutions in Region 02 in the Philippines. The college is required by its charter to fulfill its thrusts in education, research, extension, and generation. The CSU is made up of 8 empowering campuses that are strategically located throughout the state in order to maximize its human and natural resources. Through the execution of its missions, it acts as a catalyst for regional growth by offering top-notch educational services,
conducting scientific research, and packaging, shipping, and commercializing technologies through outreach programs to the community (Cagayan State University | No.1 Philippines Government Medical College, 2021).
)
Figure 1: The Map of Cagayan State University (source: (Cagayan State University | Official Website, n.d.))
The fundamental goal of the university is to transform CSU into an agent of positive change that enhances the quality of life for both individuals and communities. The one-liner vision effectively conveys the deep significance and ultimate goal of the university's joint efforts and educational goals. Through excellent instruction and creative research, development, production, and extension, Cagayan State University aims to alter individuals' lives as well as those of their communities. The Cagayan State University, a reputable and eminent center of higher learning in Northern Luzon, is dedicated to enhancing the quality of life for individuals and communities by offering advanced education in the humanities, agriculture, and natural sciences as well as in technological and professional fields. This is done by delivering high-quality instruction and implementing cutting-edge research, resource mobilization, and extension methods. By offering high-quality education and training through instruction, research, extension, and manufacturing, CSU principally contributes to President Aquino's Social Contract to the Filipino people. (Cagayan State University | Official Website, n.d.)
They are guided by the core values like Competence, Critical Thinker, Creative Problem Solver, Competitive Performer: Nationally, Regionally and Globally, Social Responsibility, Sensitive to Ethical Demands, Steward of the Environment for Future Generations, Social Justice and Economic Equity Advocate. (Cagayan State University | Official Website, n.d.)
One of the campuses that offers Bachelor’s degree in Information Technology was the Sanchez-Mira Campus. There are more than 100 students under this college from 2019-up to present. They also produced graduates who landed a good job related to their fields. The college requires capstone course of students before graduation. A total of 14 groups who were not successful in presenting their capstone projects due to failure to meet the development requirements, very poor features of the system, improper implementation of the software methodology and poor database design.
This reason motivated the researchers to come up with the inclusive educational study to prevent the unsuccessful capstone project. It provided a picture of the said three key software architecture styles for distributed systems which could be helpful for software developers by adding references to minimize the uncertainty while selecting the appropriate architectural style for their specific needs. All the architectural styles are compared based on various quality attributes based on ISO 9126-1 which include functionality, reliability, usability, efficiency, maintainability, and portability. Many businesses require dependable software architectures to meet the quality demands of new developing technologies which is the possible target beneficiaries of the student researchers.
**METHODOLOGY**
This section describes the process used for performing a systematic literature review of identifying Software Architectural Styles and Patterns. A literature review to assess the collective evidence in particular work by (Snyder, 2019) mainly indeed in Google Scholar, Scopus, and Web ok Knowledge, was conducted. It involves a thorough review of the existing studies in the area of architecture Software Architectural Styles and Patterns. In performing the review, guidelines by Banjarmali et al. (2020), the review was based on articles concerning Software Architectural Styles and Patterns. From the research questions, this study derived the keywords. The
keywords were software architecture, architecture for distributed systems, architecture patterns, and styles. The search of the literature on software architecture was not established within a period although most of them date from the five four years. Case reports, review articles, and studies found by keywords and the references taken from the bibliography were short-listed, as well as architectures definitions. A total of 22 candidate software architectures were initially reviewed, however, only fifteen of them met the study requirements. These requirements refer to the description of the software architecture for the distributed system used as the basis for something which is being constructed.
Each of the remaining papers was read in its entirety. Although there is a large body of research related to software architecture in a broader Information Technology context, the papers presented in this literature review are those specifically related to software architecture for distributed systems. After reading each of the selected papers, only 3 remained which are in Table 1 contains a list of the papers included in this literature review.
### Table 1. List of Included Publications
<table>
<thead>
<tr>
<th>ID</th>
<th>Description</th>
<th>Study type</th>
<th>Focus</th>
<th>Paper</th>
</tr>
</thead>
<tbody>
<tr>
<td>P1</td>
<td>The Broker Architectural Framework</td>
<td>Case Study</td>
<td>complex software system</td>
<td>(Varadharajan et al., 2022)</td>
</tr>
<tr>
<td>P2</td>
<td>Shared-Nothing Architecture</td>
<td>Case Study</td>
<td>Web applications have an application server, such as Tomcat, Apache + Mod_PHP</td>
<td>(Enia & Martella, 2019)</td>
</tr>
<tr>
<td>P3</td>
<td>Applying representational state transfer (REST) architecture to archetype-based electronic health record systems</td>
<td>Case Study</td>
<td>Electronic Health Records (EHRs)</td>
<td>(Sundvall et al., 2013)</td>
</tr>
</tbody>
</table>
### RESULTS AND DISCUSSIONS
#### Architecture Styles and Patterns
Managing shared memory windows can achieve performance levels comparable to state-of-the-art Message Passing Interface (MPI) implementations (Quaranta & Maddegedara, 2021), thus lowering volatility in execution times and boosting process synchronization, especially in conditions with many nodes (Quaranta & Maddegedara, 2021b). Where service-oriented architecture is the most frequently applied and most investigated style and among all applicable quality attributes such as scalability, timeliness, and security. In addition, analyses of the relationship between architectural patterns, styles, views, and evaluation methodologies concerning different quality attributes and application areas. (Banijamali et al., 2020b) are the challenges that need to address. Thus, to address these challenges, software architecture style has become a necessary discipline. Architectures are used to develop a thorough understanding of the system and to ensure acceptable quality.
#### Architectural Styles in Application Type
Different architectural styles such as software development, Shared Memory, Messaging, Structure, adapt systems, and Modern System are used in distributed systems. Designing software needs a careful selection that supports the understanding of defined architectural approaches across sites.
Shared Memory introduced the capacity to run large-scale simulations with realistic particle shapes on platforms readily accessible to many (Park et al., 2021). A big data tool built upon for computing framework to boost the performance and to implement a scalable error correction algorithm intended for built using commodity hardware (Expósito et al., 2020).
Distributed System is used as a solution to a model that requires long run times and large memory usage for it strongly limiting its application [20]. Distributed Real-Time Architecture platform services for the development of applications establish end-to-end channels over hierarchical, heterogeneous, and mixed-criticality networks respecting mixed-criticality safety and security requirements (Obermaisser, 2018). Messaging bridge the gap between the abstract representation of communication styles and technologies, stakeholders, and a multitude of application domains (Rouland et al., 2020) where data from new documents is used to create new models, and big data analytics services are used to handle the growing data volumes in all datasets and infer new knowledge from the connected data sources. (Sadek et al., 2022) also, Communication mechanisms are critical for architecture implementation. For various frameworks, many deployment mechanisms and communication patterns appear to be useful.
Adaptable Systems for the architectural style realize flexibility, where its unit adapts behavior and interactions...
to operating conditions and copes with another unit (Weyns & Oquendo, 2019) for a system can adapt to a change in its environment (Farshidi et al., 2020) and several interacting cooperatively perform the system tasks (Affonso et al., 2019). Also, an important way to support the development, standardization, and evolution of software systems that aims to support the development of such applications.
Modern System distributed monitoring for reconfigurable computer systems offers nonstop status diagnostics of the computational unit mechanisms for saving the low-productive periods of equipment and for minimization of problems worst-case conditions are sensed (Danilov et al., 2016).
**Broker architecture**
A broker architecture pattern is used to structure distributed systems by decoupling components that interact by remote service innovations (Ekblom, 2011). Also, a data integration framework is designed to dynamically retrieve and transform heterogeneous data from different sources into a common format to provide an integrated view. Likewise, communication is matched by the component which is responsible for the forwarding of the client’s request to the server and the transmission of results and exceptions (Gruner et al., 2021).
To solve issues with the pattern addresses to large scale system were scaling too many components; such as components must be decoupled and distributed; where more services are required for adding, removing, activating, locating components at run time; and where designers of individual components should not need to know about the others, Brokers are being used to mediate between clients and servers; clients send requests to a broker; brokers locate appropriate servers, forward requests and relay results back to clients; and allow client-side or server-side proxies. Likewise, a digital assistance platform based on an adaptive workflow architecture is designed to individualize worker assistance. This demands a set of operators to adapt the underlying sequence flow and task instructions expressed as workflow models to individualize the resulting assistive action (Oestreicher et al., 2021). Lastly, implementation of this architecture includes the Common Object Request Broker Architecture (CORBA) and OLE/DCOM/Active X. Multi-agent systems are often coordinated through brokers such as JADE that provide a standard mechanism for relaying messages, based on a high-level communication protocol. Individual agents may be implemented in any language as long as they can input/output according to the protocol. See Figure 2 for Broker Architecture.

**A shared-nothing architecture**
The Shared Nothing architecture is a distributed computing architecture where nodes are networked to form a scalable system (C. C. Yang et al., 2008). In its environment, each of the systems has its private memory and other available disks.
The study of (Sievi-Korte et al., 2019) described that the clustered processors correspond by passing messages through a network that interconnects the computers and requests from clients are automatically routed to the
system that possesses the resource. Only one of the clustered systems has sole access to and responsibility for a particular resource at a time. In figure 3, each node has its private memory (RAM), processor (CPU), and storage devices (Disk) which are independent of any other node in the configuration which means that every node stores its lock table and buffer pool. (Karabey Aksakalli et al., 2021)
Figure 3: Shared Nothing Architecture (“6 Distributed Systems,” 1987)
The study of (Ramirez et al., 2018) identified three of the most popular databases that support a shared-nothing model using different strategies. The first one is the Oracle which operates Range & Hash portioning of tables on shared-nothing (Bednar J & Robertson. D, 2006). Oracle uses a table and table space level segregation of data based either on a hashing algorithm or a range of key values. Another implementation of a shared-nothing model is the DB2 UDB. A partition key is being used to partition the data. Each partition is assigned by rows, and each partition has entire control of that row. If another partition wishes to read or update a row, it must send the request to the owning partition and then the owning partition executes the command on behalf of the requestor. Finally, the third application is the SQL Server which utilizes distributed partitioned views to implement the shared-nothing architecture.
Representational State Transfer (ReST) Architecture
Representational State Transfer (REST) is an architecture style that was originally designed by Fielding in the early 1990s to support the high performance and scalability requirements of the hypermedia environment and as a suggestion to redesign the use of the Hypertext Transfer Protocol and the Uniform Resource Identifier’s (Whang et al., 2020). The World Wide Web represents the largest execution of a system compliant with the REST architectural style.
The study of (Lee, S., 2011) discussed that REST behaves like a virtual state machine, where the state transition happens when the user selects links, resulting in the next stage of the application being transferred to the user. In addition, they provided emphasized that the key characteristic of the REST architecture is that it takes a “resource view” of the world. They also described the RESTful principles which are as follows: P1: Resource can be identified by a URI; P2: Separation of the abstract resource and its concrete representations; P3: Stateless interaction, each interaction contains all the necessary context information and meta-data; P4: Small number of operations, with distinct semantics based on HTTP methods: safe operations; non-safe, idempotent operations and non-safe, non-idempotent operation (Post); P5: Idempotent operations and representation metadata support cache; P6: Promote the presence of intermediaries such as proxies, gateways or filters to alter or restrict request and response based on metadata.
RESTs functionality leads to four main subjects (Rajan, S, 2010) such as resources, representations, uniform interface, and State transfer. Resources are given an exclusive identity with a URI. Data sent to or from the resource is designed as a representation. And a set of uniform methods operates the resource. The communication is stateless but the client keeps a state of its workflow by navigating different resources, while the server tracks the state of the values of the resources.
Quality Attributes
Software quality in use (QinU) is the perception of software in its context of use (Williamson et al., 2022). In the context of software measurement, ISO/IEC/IEEE 15939 (Souza-Pereira et al., 2022) identifies a process including the definition of a suitable set of measures that address specific information needs, but it does not
provide the set of measures to be used as part of the measurement plan. (Souza-Pereira et al., 2022b) despite the importance that quality has in the development of successful software products (Anon, 2021a), the management of quality requirements is still an open challenge (López et al., 2022). Software quality improvement by enhancing software engineering, advanced software testing, and improvement still with shortcomings (Kokol P, 2021). Thus, measures of quality assurance are determined by standards of software quality lifecycle and a quality model with defined parameters for quality evaluation. Several models, like, Boehm's, McColl's, FURPS, ISO 9126, and Dromey's, have been developed for quality evaluation using hierarchically related characteristics of quality indicators (Yadav, 2020). Assessing software products involves quality models of a wide range from simple hierarchic decomposition techniques to complex meta-models to cope with the abstract notion of software quality (Galll et al., 2021). The definition of the quality attributes (Tian et al., 2022) of the ISO 9126-1 standard for software quality measurement which was used to compare the three software architecture styles is shown in Table 2.
Table 2: Characteristics and Sub-Characteristics of ISO 9126-1 Quality Model (Castillo-Salinas et al., 2020)
<table>
<thead>
<tr>
<th>Attributes</th>
<th>Definitions</th>
<th>Sub-Characteristics</th>
</tr>
</thead>
<tbody>
<tr>
<td>Functionality</td>
<td>The capability of the software product to provide functions that meet stated and implied needs when the software is used under specified conditions (Ikram, A, Masood, I, Sarfraz, T & Amjad, T, 2021).</td>
<td>Suitability, Accuracy, Interoperability, Security, Compliance</td>
</tr>
<tr>
<td>Reliability</td>
<td>The capability of the software product to maintain its level of performance under stated conditions for a stated period (Ikram, A, Masood, I, Sarfraz, T & Amjad, T, 2021).</td>
<td>Maturity, Fault tolerance, Recoverability, Compliance</td>
</tr>
<tr>
<td>Usability</td>
<td>The capability of the software product to be understood, learned, used and attractive to the user, when used under specified conditions (Zhu & H.Pham, 2018).</td>
<td>Understandability, Learnability, Operability, Compliance</td>
</tr>
<tr>
<td>Efficiency</td>
<td>The capability of the software product to provide appropriate performance, relative to the number of resources used, under stated conditions (Babo et al., 2021)</td>
<td>Time behavior, Resource behavior, Compliance</td>
</tr>
<tr>
<td>Maintainability</td>
<td>The capability of the software product to be modified. Modifications may include corrections, improvements, or adaptations of the software to changes in the environment and the requirements and functional specifications (Fitrisia & Hendradjaya, 2014)</td>
<td>Analysability, Changeability, Stability, Testability, Compliance</td>
</tr>
<tr>
<td>Portability</td>
<td>The capability of the software product to be transferred from one environment to another. The environment may include organizational, hardware, or software environment (Fitrisia & Hendradjaya, 2014)</td>
<td>Adaptability, Installability, Co-existence, Replaceability, Compliance</td>
</tr>
</tbody>
</table>
Comparison of the Architecture Styles
Table 3 summarizes the comparison of the three architecture styles utilized for distributed systems. They were being compared in terms of functionality, reliability, usability, efficiency, maintainability, and portability.
Table 3: Summary of the comparison of the architecture styles for distributed systems
<table>
<thead>
<tr>
<th></th>
<th>Broker</th>
<th>Shared-nothing</th>
<th>REST</th>
</tr>
</thead>
<tbody>
<tr>
<td>Functionality</td>
<td>+</td>
<td>+</td>
<td>+</td>
</tr>
<tr>
<td>Reliability</td>
<td>- -</td>
<td>+</td>
<td>++</td>
</tr>
<tr>
<td>Usability</td>
<td>+</td>
<td>+</td>
<td>+</td>
</tr>
<tr>
<td>Efficiency</td>
<td>- -</td>
<td>+</td>
<td>+</td>
</tr>
<tr>
<td>Maintainability</td>
<td>+</td>
<td>++</td>
<td>- -</td>
</tr>
</tbody>
</table>
### Legend:
<table>
<thead>
<tr>
<th>Attribute Remarks</th>
<th>Measure</th>
</tr>
</thead>
<tbody>
<tr>
<td>++</td>
<td>The attribute (or sub-characteristic) is an asserted advantage of the architectural style</td>
</tr>
<tr>
<td>+</td>
<td>At least one of the sub-characteristics is evident in the application of the architecture style to distributed systems</td>
</tr>
<tr>
<td>-</td>
<td>No sub-characteristic is evident in the application of the architecture style to distributed systems</td>
</tr>
<tr>
<td>- -</td>
<td>The attribute (or sub-characteristic) is an asserted disadvantage of the architectural style</td>
</tr>
</tbody>
</table>
Based on table 3, the broker architecture style is easy to maintain because of its flexibility. It allows dynamic change, addition, deletion, and relocation of objects and automatic activation of applications to scale with the message volume. The advantages of this style are that it is high in portability because the components can be written in different programming languages and related message locking allows more than one instance of an application to process messages from the same queue without explicit synchronization. The functionality and usability of the style are due to the database integration enhancing application performance and simplifying administration. On the other hand, reliability and efficiency are restricted for this style because of indirection, high communications cost, and low fault tolerance which may need object replication to create higher fault tolerance.
Shared nothing architecture has high reliability because of its advantage versus a central entity that controls the network eliminating any single point of failure and allowing self-healing capabilities. Fault tolerance is lofty for failure is local. That if one node fails, the others stay up. Shared nothing is popular for web development because of its scalability allowing almost infinitely simply by adding nodes in the form of inexpensive computers, since there is no single bottleneck to slow the system down. This makes the shared-nothing architecture easy to maintain. Moreover, its portability provides an advantage. With its offering of non-disruptive upgrades, there is no need for reboots for it does not require the whole system to be rebooted when the update process completes. For these reasons (Enia & Martella, 2019) claimed that shared-nothing systems have no apparent disadvantages compared to the other alternatives.
On the other hand, the ReST style is very simple which made it easy to use. It advocates that information should be logically divided into linked resources where each resource is identified by a URI, and where operations on the resources are performed by the methods of HTTP. Content negotiation is used to deliver different types of representations of the resources to the clients which makes a REST-based system efficient (Banijamali et al., 2020). Reliability and portability are the advantages of this style by reusing components that can be managed and updated without affecting the system as a whole. If deployed over HTTP it gets supported by every major programming language and strengthens by well-tested technology, resulting in a system that is ready for work without requiring heavy machinery. The disadvantage of REST becomes apparent when sensitive information is transferred in a more complex system where the developer must strongly consider implementing encryption at the application layer. Because representations are chunks of data, the developer cannot request only parts of a document which makes the ReST architecture style hard to maintain.
### CONCLUSIONS
This study provides a picture of shared-nothing, broker, and representational state transfer (REST) architecture styles which are all perceived to be important in distributed system development. The results served as guide not only for system developers but for student researchers. All above discussed architectural styles are compared based on quality attributes such as functionality, reliability, usability, efficiency, maintainability, and portability. Each of which has its qualities and demerits. One style may not fit all types of applications. Quality attributes should be considered depending on the priority and needs of the application. This paper could be further extended with more comprehensive and wide-ranging coverage of all these techniques along with their recent developments. The study's findings can help student researchers complete their capstone projects more successfully and with fewer failed attempts at the software development portion of their capstone projects.
### REFERENCES
|
{"Source-Url": "https://jett.labosfor.com/index.php/jett/article/download/1448/1019/6761", "len_cl100k_base": 7061, "olmocr-version": "0.1.50", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 40165, "total-output-tokens": 12803, "length": "2e12", "weborganizer": {"__label__adult": 0.0009984970092773438, "__label__art_design": 0.01280975341796875, "__label__crime_law": 0.0008301734924316406, "__label__education_jobs": 0.12335205078125, "__label__entertainment": 0.00043582916259765625, "__label__fashion_beauty": 0.0007257461547851562, "__label__finance_business": 0.00124359130859375, "__label__food_dining": 0.0010080337524414062, "__label__games": 0.0023555755615234375, "__label__hardware": 0.00208282470703125, "__label__health": 0.0015010833740234375, "__label__history": 0.001854896545410156, "__label__home_hobbies": 0.0004725456237792969, "__label__industrial": 0.0011224746704101562, "__label__literature": 0.0020885467529296875, "__label__politics": 0.0005145072937011719, "__label__religion": 0.0015039443969726562, "__label__science_tech": 0.08831787109375, "__label__social_life": 0.0004363059997558594, "__label__software": 0.0158233642578125, "__label__software_dev": 0.73779296875, "__label__sports_fitness": 0.0006728172302246094, "__label__transportation": 0.0012273788452148438, "__label__travel": 0.0006723403930664062}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 50426, 0.0493]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 50426, 0.29876]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 50426, 0.86885]], "google_gemma-3-12b-it_contains_pii": [[0, 1129, false], [1129, 5432, null], [5432, 11144, null], [11144, 15162, null], [15162, 20250, null], [20250, 23435, null], [23435, 27232, null], [27232, 31218, null], [31218, 35929, null], [35929, 40275, null], [40275, 44717, null], [44717, 49029, null], [49029, 50426, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1129, true], [1129, 5432, null], [5432, 11144, null], [11144, 15162, null], [15162, 20250, null], [20250, 23435, null], [23435, 27232, null], [27232, 31218, null], [31218, 35929, null], [35929, 40275, null], [40275, 44717, null], [44717, 49029, null], [49029, 50426, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 50426, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 50426, null]], "pdf_page_numbers": [[0, 1129, 1], [1129, 5432, 2], [5432, 11144, 3], [11144, 15162, 4], [15162, 20250, 5], [20250, 23435, 6], [23435, 27232, 7], [27232, 31218, 8], [31218, 35929, 9], [35929, 40275, 10], [40275, 44717, 11], [44717, 49029, 12], [49029, 50426, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 50426, 0.14525]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
a2cd6d8da8ef60a73da39d66c8f2687ff9d6e03a
|
Problem 1: MACs and encryption
Consider the following symmetric encryption scheme
\( (KG, E, D) \). KG chooses an AES key.
\[ E(k, m) := E_{AES}(k, m) || 0^{32}. \]
(0^{32} stands for a string consisting of 32 zeros.) And the decryption \( D(k, c) \) does the following: Let \( c' || p := c \) where \( p \) has length 32 bit and \( c' \) is all but the last 32 bits of \( c \). \( m := D_{AES}(k, c') \). If \( p = 0^{32} \), then \( D(k, c) \) returns \( m \). If \( p \neq 0^{32} \) and \( k_p = 0 \) (here \( k_p \) is the \( p \)-th bit of the key \( k \)), then \( D(k, c) \) returns \( m \). If \( p \neq 0^{32} \) and \( k_p = 1 \), then \( D(k, c) \) aborts.
(a) Show that \( (KG, E, D) \) can be totally broken using a chosen ciphertext attack.\(^1\) That is, show that it is possible to recover the key \( k \) using a chosen ciphertext attack.
(b) To avoid the issue, we try to use authentication: Let MAC be an EF-CMA secure MAC. We construct a new encryption scheme \( E' \). The key of this scheme consists of an AES key \( k_1 \) and a MAC-key \( k_2 \). Encryption is as follows: \( E'(k_1 k_2, m) := E(k_1, (MAC(k_2, m), m)) \). Decryption \( D' \) checks the tag \( MAC(k_2, m) \) and aborts if it is incorrect.\(^2\) (This is called MAC-then-encrypt.)
Does \( E' \) withstand chosen ciphertext attacks that reveal the whole key \( k_1 \)? If yes, explain why (without proof). If no, how to attack?
(c) We try to use authentication in another way: Let MAC be an EF-CMA secure MAC. We construct a new encryption scheme \( E'' \). The key of this scheme consists of an AES key \( k_1 \) and a MAC-key \( k_2 \). Encryption is as follows: \( E''(k_1 k_2, m) := MAC(k_2, c) || c \) with \( c := E(k_1, m) \). Decryption \( D' \) checks the tag \( MAC(k_2, c) \) and aborts if it is incorrect.\(^3\) (This is called encrypt-then-MAC.)
Does \( E'' \) withstand chosen ciphertext attacks that reveal the whole key \( k_1 \)? If yes, explain why (without proof). If no, how to attack?
**Hint:** One of \((b), (c)\) is secure, the other is insecure.
\(^1\)In a chosen ciphertext attack, the adversary is also allowed to submit plaintexts for encryption, not only ciphertexts for decryption.
\(^2\)We assume that you cannot distinguish between an abort due to a wrong tag or an abort of the underlying algorithm \( D \).
\(^3\)We assume that you cannot distinguish between an abort due to a wrong tag or an abort of the underlying algorithm \( D \).
Problem 2: Authentication in WEP
In the WEP-protocol (used for securing Wifi, now mostly replaced by WPA), messages are “encrypted” using the following procedure: First, a key $k$ is established between the parties $A$ and $B$. (We do not care how, for the purpose of this exercise we assume that this is done securely.) Then, to transmit a message $m$, $A$ chooses an initialization vector $IV$ (we do not care how) and sends $IV$ and $c := keystream \oplus (m \parallel CRC(m))$. Here $keystream$ is the RC4 keystream computed from $IV$ and $k$ (we do not care how).
The function $CRC$ is a so-called cyclic redundancy check, a checksum added to the WEP protocol to ensure integrity. We only give the important facts about $CRC$ and omit a full description. Each bit of $CRC(m)$ is the XOR of some of the message bits. Which message bits are XORed into which bit of $CRC(m)$ is publicly known. (In other words, the $i$-th bit of $CRC(m)$ is $\bigoplus_{j \in I_i} m_j$ for a publicly known $I_i$.)
An adversary intercepts the ciphertext $c$. He wishes to flip certain bits of the message (i.e., he wants to replace $m$ by $m \oplus p$ for some fixed $p$). This can be done by flipping the corresponding bits of the ciphertext $c$. But then, the CRC will be incorrect, and $B$ will reject the message after decryption! Thus the CRC seems to ensure integrity of the message and to avoid malleability. (This is probably why the designers of WEP added it here.)
Show that the CRC does not increase the security! That is, show how the adversary can modify the ciphertext $c$ such that $c$ becomes an encryption of $m \oplus p$ and such that the CRC within $c$ is still valid (i.e., it becomes the CRC for $m \oplus p$).
**Hint:** Think of how the $i$-th bit of $CRC(m \oplus p)$ relates to the $i$-th bit of $CRC(m)$. (Linearity!)
Problem 3: One-way functions
Which of the following are one-way functions? For each function that is a one-way function, explain why (no formal proof required). For each function that is not a one-way function, write an attack in Python. (Code for all the functions, including test code is provided in `owf.py`. You only need to fill in the functions `adv` for attacking function $f_i$.)
**Hint:** Out of the four functions, one is a OWF, the other three are not.
**Note:** You may assume that the RSA assumption holds. And that $E_{AES}$ is a PRF.
**Note:** Remember that to break a one-way function, it is sufficient to find some preimage, not necessarily the “true” one that was fed into the one-way function.
(a) $f_1(x) := 0$ for all $x \in \{0,1\}^n$.
(b) $f(N,e,x) := (N,e,x^e \mod N)$ where the domain of $f$ is the set of all $(N,e,x)$ where $N$ is an RSA modulus, $e$ is relatively prime to $N$, and $x \in \{0,\ldots,N-1\}$.
(c) $f(N,e,x) := x^e \mod N$ where the domain of $f$ is the set of all $(N,e,x)$ where $N$ is an RSA modulus, $e$ is relatively prime to $N$, and $x \in \{0,\ldots,N-1\}$.
2
Problem 4: Tree-based signatures
This problem refers to the tree-based construction of signature schemes from one-time signatures from Construction 7 in the lecture notes. You may assume that Lamport’s signature scheme (Construction 4 in the lecture notes) is used as the underlying one-time signature scheme. (Where all messages are first hashed with a hash function \( H \) before signing with Lamport’s scheme in order to fit in the message space.)
(a) Assume someone has implemented the signature scheme incorrectly as follows: Instead of using randomness from the pseudorandom function \( F \) for the signing and key-generation algorithm, it runs signing and key-generation normally (i.e., as probabilistic algorithms, with fresh randomness each time it is invoked).
Explain how to break the signature scheme. More precisely, show how to sign an arbitrary message \( m \) by performing only signature queries for messages \( m' \neq m \).
**Note:** Be explicit: describe all the actions and computations the adversary has to perform. (E.g., give the adversary in pseudocode.) It is not sufficient to say something like: “since two signatures are produced using the same key with a one-time signature scheme, the adversary can break the scheme”. Remember that the underlying scheme is Lamport’s one-time signature scheme.
(b) **Bonus problem:** Lamport’s signature scheme has public keys consisting of \( 2\eta \eta \)-bit blocks (assuming that the one-way function \( f \) has domain and range \( \{0, 1\}^\eta \)). But it signs only messages of consisting of a single \( \eta \)-bit block. In the tree-based construction, we need to sign two Lamport public keys, i.e., \( 4\eta \eta \)-bit blocks. Normally we solve this by converting Lamport’s scheme into a one-time signature scheme for long messages by hashing the messages to be signed.
Here we explore a different possibility. Instead of hashing the \( 4\eta \times \eta \) bits, we XOR the blocks together. That is, from Lamport’s scheme \((KG_{\text{Lamport}}, \text{Sign}_{\text{Lamport}}, \text{Verify}_{\text{Lamport}})\) we construct a one-time signature scheme \((KG_1, \text{Sign}_1, \text{Verify}_1)\) for \( 4\eta \times \eta \)-bit messages as follows:
\[
KG_1 := KG_{\text{Lamport}}. \quad \text{Sign}_1(sk, m_1 || \ldots || m_{4\eta}) := \text{Sign}_{\text{Lamport}}(sk, \bigoplus_{i=1}^{4\eta} m_i) \quad \text{for} \quad m_1, \ldots, m_{4\eta} \in \{0, 1\}^\eta.
\]
\[
\text{Verify}_1(pk, m_1 \ldots m_{4\eta}, \sigma) := \text{Verify}_{\text{Lamport}}(pk, \bigoplus_{i=1}^{4\eta} m_i, \sigma).
\]
Now we can construct the tree-based signature scheme \((KG_\text{tree}, \text{Sign}_\text{tree}, \text{Verify}_\text{tree})\) from \((KG_1, \text{Sign}_1, \text{Verify}_1)\) without needing a hash function (as in Construction 7 in the lecture notes).
Your task: Break the resulting \((KG_\text{tree}, \text{Sign}_\text{tree}, \text{Verify}_\text{tree})\).
**Note:** It is not sufficient to just show that \((KG_1, \text{Sign}_1, \text{Verify}_1)\) is insecure. You have to break \((KG_\text{tree}, \text{Sign}_\text{tree}, \text{Verify}_\text{tree})\). All the other comments from the note of \((a)\) also apply.
Problem 5: Encoding messages for ElGamal (bonus problem)
The message space of ElGamal (when using the instantiation that operates modulo a prime \( p > 2 \) with \( p \equiv 3 \mod 4 \)) is the set \( \text{QR}_p = \{ x^2 \mod p : x = 0, \ldots, p - 1 \} \).
The problem is now: if we wish to encrypt a message \( m \in \{0, 1\}^\ell \) (with \( \ell \leq |p| - 2 \)), how do we interpret \( m \) as an element of \( \text{QR}_p \)?
One possibility is to use the following function \( f : \{1, \ldots, \frac{p-1}{2}\} \to \text{QR}_p : \)
\[
\begin{cases}
x & \text{if } x \in \text{QR}_p \\
-x \mod p & \text{if } x \notin \text{QR}_p
\end{cases}
\]
Once we see that \( f \) is a bijection and can be efficiently inverted, the problem is solved, because a bitstring \( m \in \{0, 1\}^\ell \) can be interpreted as a number in the range \( 1, \ldots, \frac{p-1}{2} \) by simply interpreting \( m \) as a binary integer and adding 1 to it. (I.e., we encrypt \( f(m + 1) \).)
We claim that the following function is the inverse of \( f \):
\[
\begin{cases}
x & \text{if } x = 1, \ldots, \frac{p-1}{2} \\
-x \mod p & \text{if } x \neq 1, \ldots, \frac{p-1}{2}
\end{cases}
\]
We thus need to show the following: the range of \( f \) is indeed \( \text{QR}_p \), and that \( g(f(x)) = x \) for all \( x \in \{1, \ldots, \frac{p-1}{2}\} \).
(a) Show that \( f(x) \in \text{QR}_p \) for all \( x \in \{1, \ldots, \frac{p-1}{2}\} \).
**Hint:** You can use (without proof) that \(-1 \notin \text{QR}_p \) (this only holds in \( \text{QR}_p \) for \( p \) prime with \( p \equiv 3 \mod 4 \)). And that the product of two quadratic non-residues is a quadratic residue (this only holds in \( \text{QR}_p \), but not in \( \text{QR}_n \) for \( n \) non-prime).
(b) Show that \( g(f(x)) = x \) for all \( x \in \{1, \ldots, \frac{p-1}{2}\} \).
(This then shows that \( f \) is injective and efficiently invertible. Bijectivity follows from injectivity because the domain and range of \( f \) both have the same size.)
**Hint:** Make a case distinction between \( x \in \text{QR}_p \) and \( x \notin \text{QR}_p \). Show that for \( x \in \{1, \ldots, \frac{p-1}{2}\} \) it holds that \(-x \mod p \notin \{1, \ldots, \frac{p-1}{2}\} \).
Problem 6: Security proofs (bonus problem)
Recall the definition of IND-OT-CPA (Definition 4 in the lecture notes). There, we defined security by saying that if the adversary tries to guess whether \( m_0 \) or \( m_1 \) was encrypted, his guess will be correct with probability approximately \( 1/2 \).
Consider the following variant of the definition:
\footnotetext[4]{You do not actually need to use this fact, but the hint that \(-1 \notin \text{QR}_p \) below is only true in this case.}
Definition 1 (IND-OT-CPA – variant) An encryption scheme \((KG, E, D)\) is IND-OT-CPA' secure if for any polynomial-time algorithm \(A\) there is a negligible function \(\mu\), such that for all \(\eta \in \mathbb{N}\) we have that
\[
\left| \Pr[b^* = 1 : k \leftarrow KG(1^{\eta}), (m_0, m_1) \leftarrow A(1^{\eta}), c \leftarrow E(k, m_0), b^* \leftarrow A(1^{\eta}, c)] - \Pr[b^* = 1 : k \leftarrow KG(1^{\eta}), (m_0, m_1) \leftarrow A(1^{\eta}), c \leftarrow E(k, m_1), b^* \leftarrow A(1^{\eta}, c)] \right| \leq \mu(\eta).
\]
(Here we quantify only over algorithms \(A\) that output \((m_0, m_1)\) with \(|m_0| = |m_1|\).)
We wish to prove that if \((KG, E, D)\) is IND-OT-CPA secure, then \((KG, E, D)\) is IND-OT-CPA' secure.
Note: The converse also holds, but we will not prove that.
(a) Assume an adversary \(A\) that breaks IND-OT-CPA' security. Let
\[
\alpha_0(\eta) := \Pr[b^* = 1 : k \leftarrow KG(1^{\eta}), (m_0, m_1) \leftarrow A(1^{\eta}), c \leftarrow E(k, m_0), b^* \leftarrow A(1^{\eta}, c)]
\]
and
\[
\alpha_1(\eta) := \Pr[b^* = 1 : k \leftarrow KG(1^{\eta}), (m_0, m_1) \leftarrow A(1^{\eta}), c \leftarrow E(k, m_1), b^* \leftarrow A(1^{\eta}, c)].
\]
What do we know about \(\alpha_0\) and \(\alpha_1\) (by definition of IND-OT-CPA' and the fact that \(A\) breaks IND-OT-CPA')?
(b) Compute
\[
\beta(\eta) := \left| \Pr[b' = b : k \leftarrow KG(1^{\eta}), b \leftarrow \{0, 1\}, (m_0, m_1) \leftarrow A(1^{\eta}), c \leftarrow E(k, m_b), b' \leftarrow A(1^{\eta}, c)] - \frac{1}{2} \right|.
\]
(As a formula using \(\alpha_0\) and \(\alpha_1\).)
(c) Using \(\alpha_0\) and \(\alpha_1\), show that if \(A\) breaks IND-OT-CPA', then \(A\) breaks IND-OT-CPA.
(Hence: IND-OT-CPA implies IND-OT-CPA'.)
---
\[\text{This is not an established name!}\]
|
{"Source-Url": "https://courses.cs.ut.ee/all/MTAT.07.002/2017_spring/uploads/homework/sheet-05.pdf", "len_cl100k_base": 4239, "olmocr-version": "0.1.53", "pdf-total-pages": 5, "total-fallback-pages": 0, "total-input-tokens": 20986, "total-output-tokens": 4668, "length": "2e12", "weborganizer": {"__label__adult": 0.0006017684936523438, "__label__art_design": 0.00042939186096191406, "__label__crime_law": 0.0017642974853515625, "__label__education_jobs": 0.0025920867919921875, "__label__entertainment": 0.00013744831085205078, "__label__fashion_beauty": 0.00020754337310791016, "__label__finance_business": 0.0003876686096191406, "__label__food_dining": 0.0008177757263183594, "__label__games": 0.001239776611328125, "__label__hardware": 0.0034732818603515625, "__label__health": 0.001308441162109375, "__label__history": 0.00043487548828125, "__label__home_hobbies": 0.0002830028533935547, "__label__industrial": 0.0014162063598632812, "__label__literature": 0.0005941390991210938, "__label__politics": 0.0004642009735107422, "__label__religion": 0.0009546279907226562, "__label__science_tech": 0.314697265625, "__label__social_life": 0.00018334388732910156, "__label__software": 0.0102996826171875, "__label__software_dev": 0.65576171875, "__label__sports_fitness": 0.0005288124084472656, "__label__transportation": 0.0009746551513671876, "__label__travel": 0.000225067138671875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 13157, 0.02363]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 13157, 0.4142]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 13157, 0.8176]], "google_gemma-3-12b-it_contains_pii": [[0, 2474, false], [2474, 5427, null], [5427, 8626, null], [8626, 11372, null], [11372, 13157, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2474, true], [2474, 5427, null], [5427, 8626, null], [8626, 11372, null], [11372, 13157, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 13157, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 13157, null]], "pdf_page_numbers": [[0, 2474, 1], [2474, 5427, 2], [5427, 8626, 3], [8626, 11372, 4], [11372, 13157, 5]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 13157, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
a9da512eb06ed1dba8bb27a4938b964afdcb5120
|
GPFinder: Tracking the Invisible in Android Malware
Mourad Leslous, Valérie Viet Triem Tong, Jean-François Lalande, Thomas Genet
To cite this version:
HAL Id: hal-01584989
https://hal-centralesupelec.archives-ouvertes.fr/hal-01584989
Submitted on 11 Sep 2017
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
GPFinder: Tracking the Invisible in Android Malware
Mourad Leslous
EPI CIDRE
Inria, CentraleSupelec, Univ. Rennes 1, CNRS
IRISA UMR 6074,
F-35065 Rennes, France
mourad.leslous@inria.fr
Valérie Viet Triem Tong
EPI CIDRE
CentraleSupelec, Inria, Univ. Rennes 1, CNRS
IRISA UMR 6074,
F-35065 Rennes, France
valerie.viettriemtong@centralesupelec.fr
Jean-François Lalande
INSA Centre Val de Loire
Univ. Orléans
LIFO EA 4022,
F-18020 Bourges, France
jean-francois.lalande@insa-cvl.fr
Thomas Genet
EPI Celtique
Univ. Rennes 1, Inria
IRISA UMR 6074,
F-35065 Rennes, France
thomas.genet@irisa.fr
Abstract
Malicious Android applications use clever techniques to hide their real intents from the user and avoid detection by security tools. They resort to code obfuscation and dynamic loading, or wait for special events on the system like reboot or WiFi activation. Therefore, promising approaches aim to locate, study and execute specific parts of Android applications in order to monitor for suspicious behavior. They rely on Control Flow Graphs (CFGs) to obtain execution paths towards sensitive codes. We claim here that these CFGs are incomplete because they do not take into consideration implicit control flow calls, i.e., those that occur when the Android framework calls a method implemented in the application space. This article proposes a practical tool, GPFinder, exposing execution paths towards any piece of code considered as suspicious. GPFinder takes the Android framework into account and considers explicit and implicit control flow calls to build CFGs. Using GPFinder, we give global characteristics of application CFGs by studying a dataset of 14,224 malware and 2,311 goodware samples. We evaluate that 72.69% of the analyzed malicious samples have at least one suspicious method reachable only through implicit calls.
1 Introduction
The smartphone market has known a fast growth in recent years, and these devices become omnipresent in daily life. Smartphones are mostly governed by the Android operating system (87% of the smartphone market in the second quarter of 2016 [12]). Naturally, Android has become a major target for malware of numerous types [10]. Malware is, for its authors, a simple way to make money by sending messages to premium numbers, ransoming the user or remotely controlling the device’s resources. To distribute their malware, authors use repackaging/piggybacking techniques: they inject malicious code in popular applications and redistribute them in alternative markets [8, 15].
To counter Android malware spread, many detection approaches have been proposed. Some of the proposed techniques perform static analysis [6, 18], while others try to observe malicious behavior [21]. Unfortunately, Android malware use clever tricks to avoid detection and, in the end, is able to perform malicious activities. For instance, to avoid static analysis, malware use code obfuscation [5, 19] and dynamic code loading from which the malicious code is downloaded from a remote server or loaded from a local file. Additionally, Android malware also tries to avoid dynamic analysis by executing its malicious code only under certain circumstances such as checking the country where the smartphone is located, or waiting for a system event, a command from a remote server, or a specific duration [9].
Recent approaches try to automatically characterize malicious behavior [2, 20, 24, 26]. They rely on a combination of static and dynamic analysis. A first static analysis of the code identifies the most suspicious locations in the code and then a particular run of the application targets the execution of the code previously identified as suspicious.
ConDroid [20] aims to launch suspicious code in the app to reveal malicious activities. It lets the definition of suspicious code types up to the user, such as dynamic loading of code. First, ConDroid finds a path from an entry point, such as lifecycle methods and input events, to the suspicious code location. Second, it performs an adaptive concolic execution by instrumenting the app and setting the necessary variable values in order to observe its behavior.
Similarly, GroddDroid [2] automatically triggers and monitors suspicious code. First, it locates code considered to be suspicious which is protected or hidden (ciphered, encoded, obfuscated or dynamically loaded) or when it calls a sensitive API method identified in [1], such as sending a SMS. Then, GroddDroid exhibits execution paths from the entry points, which can be Activity.onCreate(Bundle) or other similar entry points, to the suspicious code. Next, the app is instrumented by forcing the necessary branches in the execution path to reach the malicious code when the malware is launched.
These approaches strongly rely on the computation of application global control flow graphs (CFGs) that represent all execution paths in the program [3]. Such CFGs are useful only when they are complete, or at least in this context, when they contain the necessary execution paths towards suspicious code. Unfortunately, these approaches do not take into consideration all types of execution paths because they only analyze the application code, which leads to missing paths that pass though the Android framework.
The goal of this article is to automatically exhibit execution paths towards all possible suspicious locations in the code by computing global CFGs with implicit edges. This is implemented in GPFinder (for GroddDroid Path Finder) as the main practical outcome of this work. GPFinder helps security analysts retrieve execution paths that may trigger the malicious code, even when they pass through Android framework’s callbacks. When studying these executions paths, the security analyst can understand how the suspicious code is protected by triggering conditions. We use GPFinder to study a collection of 14,224 malware samples and we show that including implicit calls to build CFGs improves the analysis. We evaluate that 72.69% of the samples have at least one suspicious code location which is only reachable through implicit calls. Furthermore, we analyze the common structures of Android malware, we highlight their favorite entry points and how they use implicit calls.
The rest of the article is structured as follows. Section 2 details the importance of implicit calls in CFGs and Section 3 discusses the impact for the Android framework. Next, Section 4 details how the implemented tool, GPFinder, takes advantage of CFGs to study the inner-structure of an Android malware set. Section 5 discusses the completeness of malware’s CFGs in the literature. Lastly, Section 6 and 7 discuss the results and conclude the paper.
2 Execution Paths with Implicit Transitions
Android applications are distributed as archives that contain resource files, native libraries, an application manifest and the Dalvik bytecode. Essential building blocks of an Android app cooperate and may have independent lifecycles. Activities manage screens of the user interface; Services perform long-running background tasks such as playing music; Content providers manage shared data, such as SQLite databases; and Broadcast Receivers receive system-wide broadcasts announcing events such as SMS reception. Android applications are written mostly in Java and compiled to Dalvik bytecode. The bytecode is stored in a .dex file which is distributed with the resources needed to execute the application. A program in Dalvik bytecode format can be easily translated into Jimple intermediate representation [23] by Soot [22], which makes it easy to compute a CFG for each method independently at the granularity of a Jimple statement. In these graphs, an oriented edge between a node A and a node B indicates that statement B could be executed immediately after statement A.
Method CFGs constitute an important step towards accurate static analysis. Nevertheless, we are mostly interested by the global or inter-procedural CFG that represents all execution scenarios for the whole application. Obtaining an execution path towards a malicious code location shows how the malware is executed, and how it is protected by triggering techniques. The global graph is constructed by connecting all the method graphs, i.e., by adding edges representing inter-procedural calls. There exist two types of inter-procedural calls: explicit and implicit.
Explicit Call: A method a() explicitly calls a method b() when the code of a() contains a call (an invoke statement) of the method b(). For example, in Listing 1, the statement run(content) is an explicit call to the method MailTask.run(String). For such a case, we build an edge from the node representing the invoke statement run(content) of the method doInBackground() towards the node containing the first statement in the CFG of the method run(String). This is represented on the right part of Figure 1.
Implicit Call: A method a() implicitly calls a method b() when the following conditions hold:
1. a() contains a call (an invoke statement) of a method c() which is defined in the Android framework.
2. c() invokes the method b() either directly or through a sequence of method calls in the framework that ends by an invocation of b().
public class ClientActivity extends Activity {
protected void onCreate(Bundle bundle) {
MailTask mt = new MailTask("", ((Context) this))
mt.execute(new Integer[0]);
}
}
public class MailTask extends AsyncTask {
protected String doInBackground(Integer... args) {
run(content);
return "doInBackground:" + this.content;
}
public void run(String arg) {
String str2 = ((TelephonyManager)context.getSystemService("phone")).getDeviceId();
ArrayList localArrayList = new ArrayList();
localArrayList.add(new BasicNameValuePair("imei", str2));
localArrayList.add(new BasicNameValuePair("count", Integer.toString(i)));
localArrayList.add(new BasicNameValuePair("notebook", "Number:" + i + "\n" + str1));
String url = "####.com/MailTask.php";
HttpSend.postData(url, localArrayList);
}
}
Listing 1. Implicit call in a real malware
For example, the method doInBackground() in Listing 1 is implemented by the application but invoked by the Android framework. This method does not have an incoming control flow edge starting from the application itself, and that is why such methods are called callbacks. If a malicious code is located in a method which is implicitly called, it will be considered as unreachable by most existing static analyzers since they do not take into account the Android framework.
For example, Listing 1 details an example of code extracted from a spyware that sends sensitive information like the device ID and the contact list to a remote server. In this malware, the entry point is ClientActivity.onCreate(). The malicious code is mainly the last statement, namely HttpSend.postData(url, localArrayList) appearing in the method doInBackground(). This statement leaks sensitive information previously retrieved by calling context.getSystemService("phone").getDeviceId(). The main goal of dynamic analysis is thus to observe this application executing the suspicious method doInBackground(). The method doInBackground() is implicitly called when running MailTask.execute().
Obviously, the CFG of this code which is depicted in Figure 1 could be incomplete if one does not take into consideration the implicit call from MailTask.execute(new Integer[0]) (line 5) to MailTask.doInBackground(String... params) (line 9). Additionally, running directly MailTask.execute() by instrumenting the application without finding a complete path from an entry point is meaningless since the suspicious method will be isolated from its context and could not have access to objects built in ClientActivity.onCreate(). A standalone analysis of the app code could not reveal the existence of such a call. Thus, we have to analyze additional code outside the application, i.e. in the Android framework to determine implicit calls and build a reliable CFG.
3 Considering the Android Framework
Implicit edges in the global CFG are due to methods implemented by the application but invoked by the Android framework (callback methods). In EdgeMiner [7], Cao et al. are already concerned about the lack of these callbacks in global CFGs. They pointed out that the Android framework is aware of the callbacks existence thanks to the so-called registration methods. A registration method is defined in the Android framework space and called by the application. The registration method calls the callback method directly or through a sequence of method calls inside the framework space. Cao et al. have statically analyzed the 24,089 classes of the Android framework and extracted a list of 5,125,472 registration-callback pairs responsible of implicit control flow calls. These summaries are under the form registration#callback#position, where registration and callback point out the involved methods and the integer position denotes the place of the registration's argument responsible of calling the callback. For instance, EdgeMiner contains the following rules that indicates that a call to the method execute() induces a call to doInBackground().
AsyncTask AsyncTask.execute(Object[]) #
Object AsyncTask.doInBackground(Object[]) #
1malware SHA-256: 45d21e32698d1536a73e42c1e5131c29ca94-b9d9d1bd5744bd74ff2af6853e
The link between the registration (execute()) and the callback (doInBackground()) is the defining class `AsyncTask` of the registration method which is of the same type as the callback class. This information is given by the position 0 indicated in the above-mentioned EdgeMiner rule.
We propose to go one step further than EdgeMiner and combine the analysis of the Dalvik class hierarchy with the EdgeMiner rules in order to compute a global CFG with implicit edges of any Android application. With a global CFG computed by our tool GPFinder, we intend to find all execution paths leading towards a specific method in the application bytecode, especially suspicious ones.
GPFinder computes method graphs and then connects them with implicit edges. For each pair (invoke(b()), a()) where b() is a framework method and a() is a method overridden in the application code, we add an edge from node invoke(b()) to node a() iff. we find a rule registration#callback#position in EdgeMiner summaries where b() equals or overrides registration and a() overrides callback. A method x() overrides any callback or registration when the following conditions hold:
- **Name**: The overriding method in the app code has the same name as in the EdgeMiner rule.
- **Defining class**: The defining class of x() is a subclass of the one defining the callback/registration.
- **Return type**: The type returned by x() is a subtype of the one returned by the callback/registration.
Arguments: Any argument of x() is a sub-type of the corresponding argument in the callback/registration.
In addition, if the position p = 0, the callback class must be a subtype of the registration class. If p > 0, the callback class must be the same as the p<sup>th</sup> argument of the registration method.
### 4 How to Reach Suspicious Code
In this section, we explain how GPFinder works and we show two practical experiments performed with it. First, we detail a complete analysis on a malware sample performing SMS fraud and exfiltrating personal data. This first experiment explains how GPFinder improves further security analysis. In a second part, we detail an analysis that takes as input a collection of 14,224 applications considered as known malware. On this malware set we exhibit all possible execution paths starting form entry points and leading to malicious code locations. The malicious code is here automatically located by a heuristic detailed hereafter, which means that the targeted code is malicious or at least suspicious. The second analysis gives an overview of the malware features such as favorite entry points, most frequent malicious code types, the average number of execution paths leading to malicious code locations, the average number of triggering conditions protecting the malicious code from dynamic analysis, and the average number of implicit calls protecting the malicious code from static analysis. Lastly, we analyze a collection of 2,311 goodware samples to emphasis the difference between the characteristics of malicious and benign applications.
4.1 GPFinder’s Analysis Steps
Suspicious code location. GPFinder automatically identifies suspicious methods in the application’s bytecode. For that purpose, it relies on a heuristic explained in [2]. Intuitively, the more a method uses sensitive API calls the more it is suspicious. Sensitive API methods have been split into categories related to networking, telephony, cryptography, binary code execution, SMS, and dynamic code loading. Note that one can remove or add any method or class to this list. GPFinder sets a score of risk for each category and computes the total risk for each method. Methods with non-zero scores become targets for the next analysis step.
Control flow graph computation. GPFinder computes the global control flow graph with implicit interprocedural calls and highlights all the execution paths starting from an entry point and leading to each suspicious method.
GPFinder’s contribution. GPFinder gives valuable information with a relatively short time of analysis for the security experts since it automatically locates the most suspicious code, computes all execution paths towards these suspicious sites and explains how the malware is protected by triggering conditions. For example, we analyzed the malware sample mentioned in Section 2 which performs SMS fraud and exfiltrates personal data. For this piece of malware, the analysis took 13.6 seconds, and GPFinder found a total of 13 suspicious methods and exhibited 22 execution paths in the global CFG, starting from entry points and leading to methods considered as suspicious. These execution paths contain 14 implicit edges. All of them are presented in the tool’s output which permits to understand how the malware exploits the framework. Finally, GPFinder details executions paths one after the other. For each of them it details the sequence of method calls, and points out how many conditions protect the malicious code.
Most of the conditions that are in the execution paths are just ordinary, nevertheless some of them are interesting from a security point of view. Indeed, some conditions are used by malware to trigger malicious actions. Listing 2 shows a triggering condition example where the IMEI of the device is sent to a remote server when a SMS is received. This condition is extracted from the previous malware sample.
4.2 Experiment on a Dataset of Malware
We led a similar experiment on a collection of 14,224 detected malware samples randomly chosen from a database provided by koodous.com. The global CFG computation takes an average time of 94.23 seconds per sample of an average APK size of 190 kB. We show below the synthesis of this experiment.
Suspicious code type. In the whole malware set, we found 159,053 suspicious methods, which correspond to 4.5% of the total methods in the collection. This means an average of 11.18 suspicious methods per application. Figure 2 depicts the ratio of APKs (in orange) found in the malware collection that have a positive score risk divided by categories of suspicious code (cf. Section 4.1 for suspicious code categories).
Entry points. Android applications can be launched by a number of events, such as when the app launcher is pressed, an Intent is received, etc. Consequently, an Android application does not have only one entry point but a set of entry points like lifecycle callbacks (onX() methods). There exist mainly seven entry lifecycle callbacks belonging to three main categories: Callbacks allowing to create, start or resume an Activity, those enabling to create, start or bind a Service,
and lastly a callback (BroadcastReceiver: void onReceive) that wakes up the application when it is notified by a system event.
Among these entry points, we evaluate which ones are the most used to reach the suspicious code. Results are detailed in Figure 3 (in orange). Our experiments reveal that malware prefer BroadcastReceiver.onReceive(Context, Intent) and Activity.onCreate(Bundle) over other entry points. The use of the latter is common as it enables to launch applications using their launcher icon. Nevertheless, the heavy usage of onReceive() permits to trigger malicious actions whenever the app receives an Intent broadcast like BOOT_COMPLETED or SMS_RECEIVED. These entry points permit to easily add malicious code to a benign application without much because the malicious code tends to be independent from the benign one.
Implicit transitions leading to suspicious code Implicit edges in the application’s global CFG prevent security analysis tools that rely on CFGs from reaching some malicious code if the Android framework code is not taken into account.
Our results shows that 61.34% of all suspicious methods are reachable (they have at least one path leading to them from an entry point.) We found also that 47.82% (almost half) of the reachable suspicious methods are reachable only through implicit interprocedural calls. More globally, 72.69% of malware have at least one suspicious piece of code hidden behind implicit calls without any alternative execution path. These results show the importance of including implicit interprocedural calls in the phase of building application CFGs, since they almost double the number of reached suspicious methods in our analyzed malware dataset. Obviously, an analysis tool that relies on application CFGs to reach targeted code without taking into consideration this type of calls could miss a part of the malicious behavior.
We have also focused on the nature of implicit calls. We discovered that one of the most used implicit calls is due to the pair of registration-callback: (Thread.start(), Runnable.run()). Such callback are used to launch a thread from the main application. They can be used to perform heavy asynchronous tasks like downloading data from Internet or encrypting files, which may slow down activities and affect the user experience or force Android to kill the application. We find also that many callbacks from the Handler class are used. A Handler can be used to schedule messages and runnables to be executed later and to enqueue an action to be performed on a different thread. Once again, using an handler helps create an execution separated from the main thread. We also found a callback of a different nature: (setOnClickListener(), onClick()) which is related to elements of the graphical interface suggesting that malware may be triggered by actions performed by the end user.
**Triggering condition** In this experiment, we found an average of 12.34 conditions per path leading to suspicious code location. These conditions are a mix of necessary checks for the app to work, and of triggering conditions that protect the malicious behavior in order to run only under certain circumstances. Some conditional branches are directly related to the malicious behavior such as if(sms.getMessageBody().equals("GetBook")) that checks the received attacker message and sends the phone address book to a remote server.
## 4.3 What about Benign Applications?
To emphasize the difference between malicious and benign applications structure, we analyzed a set of 2,311 applications considered as benign, and provided by AndroZoo [4]. The analysis took an average time of 86.29 seconds per app, and the apps have an average size of 80 Kb.
We did the same analysis on these benign applications as the one performed on the malicious ones. Figure 2 shows the usage of sensitive API calls in the analyzed benign application set (in blue). We can see that malicious applications have more suspicious calls than the benign ones. The proportions are bigger for suspicious API calls such as those used for encryption. These methods are often used by malware to decrypt binary code in order to load it dynamically and to encrypt personal data before sending it to remote servers.
Figure 3 shows the usage of entry points by the set of analyzed goodware samples (in blue). The main information that we can extract from this figure is the difference in usage of BroadcastReceiver: void onReceive (Context, Intent) between benign and malicious apps. As mentioned before, malware rely a lot on system events to launch malicious actions unbeknownst to the user.
5 Related Work
Being able to take into account implicit calls appears to be a key point for improving recent works on static analysis of Android malware. For instance Flowdroid [6] achieves static taint-analysis of Android applications and relies on CFGs which are computed from various sources, including layout XML files, executable code and the manifest file. This work should benefit from our computation of a global CFG that takes the framework into account. In the same way, Lilack et al. [16] use taint analysis to know which parts of an Android application are influenced by the platform’s configuration, e.g. when Bluetooth is activated. Klieber et al. [14] rely on FlowDroid for intra-component taint analysis, and on Epicc [17] for inter-component analysis. This work handles calls that occur when an Activity calls another one to propagate the taint. Nevertheless, authors do not propose a solution for other types of implicit calls, which leads to imprecise results. Graa et al. [11] tried to get FlowDroid handle the control flows that leak information implicitly. They mainly focus on implicit flows that occur due to conditional branches. Nevertheless, they also do not also take in consideration implicit calls generated by the Android framework.
Some approaches have made attempts to handle some callbacks. Wu et al. [25] build callback graphs for synchronous callbacks, like for classes AsyncTask and Handler, in addition to application components, namely Activity, Service, BroadcastReceiver, and Content Provider. Authors focus only on main classes and methods, and neglect other callbacks that may be called by the framework. In [13], authors use lifecycle callbacks of Android applications to build a model of the application and then detect malicious behaviors. This approach focuses only on lifecycle callbacks and does not handle other types of implicit calls.
None of the works cited above are able to handle most of the implicit calls due to the Android framework itself. As shown in Section 4, malware can easily hide behind implicit calls which implies that these approaches suffer from a lack of precision.
6 Discussion
To connect different method CFGs, GPFinder uses API summaries generated by EdgeMiner which is built for Android version 4.2. Thus, for a better results, it should be updated. Nevertheless, as we showed in Section 4.2, the most used implicit calls are related to multitasking and message exchange, which have not changed a lot since Android 4.2 as far as we know.
Our experiments show that we can almost double the coverage of suspicious code by including implicit calls while building global CFGs, although, there is no other accurate implicit calls tool to compare GPFinder to. Thus, we do not have statistics about the accuracy of our tool, but it depends on the used summaries, in this case EdgeMiner’s.
Implicit calls can easily be used by Android malware to hide their code. This is not specific to Android malware nor to Android, but it is a feature of the Java language. However, Android heavily uses event-driven callbacks, a characteristic that can be easily exploited by malware authors.
7 Conclusion
This article proposes GPFinder, a practical solution to help security experts to understand and analyze Android malware. GPFinder determines the suspicious code locations in Android applications. Then, for each method in the bytecode considered as suspicious, GPFinder exhibits all execution paths that start from an entry point and lead to that method. For that purpose, GPFinder is the first approach able to take the Android framework itself into account by computing a global control flow graph with implicit edges related to the callback mechanism.
We have evaluated, on a collection of 14,224 Android malware samples, how implicit interprocedural calls are used by malware. Our experiments show that 72.69% of malware have at least one suspicious piece of code hidden behind implicit calls without any alternative execution path. We demonstrated that we can easily almost double the coverage of suspicious code by including implicit calls while building global CFGs. We have evaluated that malware uses an average of 12.45 conditions, including triggering ones, to protect malicious code from dynamic analysis.
Acknowledgements
This work has received a French government support granted to the COMIN Labs excellence laboratory and managed by the National Research Agency in the “Investing for the Future” program under reference ANR-10-LABX-07-01.
All the code described here, and experiments’ inputs and outputs are available at http://kharon.gforge.inria.fr/gpfinder.html.
References
|
{"Source-Url": "https://hal-centralesupelec.archives-ouvertes.fr/hal-01584989/document", "len_cl100k_base": 6082, "olmocr-version": "0.1.51", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 27973, "total-output-tokens": 8752, "length": "2e12", "weborganizer": {"__label__adult": 0.0007333755493164062, "__label__art_design": 0.0004839897155761719, "__label__crime_law": 0.00426483154296875, "__label__education_jobs": 0.0006380081176757812, "__label__entertainment": 0.0001883506774902344, "__label__fashion_beauty": 0.0002694129943847656, "__label__finance_business": 0.00020968914031982425, "__label__food_dining": 0.0003628730773925781, "__label__games": 0.0022525787353515625, "__label__hardware": 0.0036525726318359375, "__label__health": 0.0006837844848632812, "__label__history": 0.0003795623779296875, "__label__home_hobbies": 0.00012028217315673828, "__label__industrial": 0.0005207061767578125, "__label__literature": 0.00049591064453125, "__label__politics": 0.0004584789276123047, "__label__religion": 0.0005736351013183594, "__label__science_tech": 0.10723876953125, "__label__social_life": 0.0001533031463623047, "__label__software": 0.0545654296875, "__label__software_dev": 0.82080078125, "__label__sports_fitness": 0.0003795623779296875, "__label__transportation": 0.0004253387451171875, "__label__travel": 0.00018703937530517575}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 36508, 0.04046]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 36508, 0.42301]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 36508, 0.85505]], "google_gemma-3-12b-it_contains_pii": [[0, 1186, false], [1186, 4872, null], [4872, 10421, null], [10421, 14645, null], [14645, 17707, null], [17707, 21276, null], [21276, 25540, null], [25540, 30574, null], [30574, 36508, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1186, true], [1186, 4872, null], [4872, 10421, null], [10421, 14645, null], [14645, 17707, null], [17707, 21276, null], [21276, 25540, null], [25540, 30574, null], [30574, 36508, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 36508, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 36508, null]], "pdf_page_numbers": [[0, 1186, 1], [1186, 4872, 2], [4872, 10421, 3], [10421, 14645, 4], [14645, 17707, 5], [17707, 21276, 6], [21276, 25540, 7], [25540, 30574, 8], [30574, 36508, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 36508, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-03
|
2024-12-03
|
e847939feff286b42949a74789b9149ff25aa4f3
|
CS303E: Elements of Computers and Programming
Lists
Dr. Bill Young
Department of Computer Science
University of Texas at Austin
Last updated: August 16, 2022 at 09:35
The list class is one of the most useful in Python.
Both strings and lists are sequence types in Python, so share many similar methods. Unlike strings, lists are mutable.
If you change a list, it doesn’t create a new copy; it changes the input list.
Value of Lists
Suppose you have 30 different test grades to average. You could use 30 variables: grade1, grade2, ..., grade30. Or you could use one list with 30 elements: grades[0], grades[1], ..., grades[29].
In file AverageScores.py:
```python
grades = [ 67, 82, 56, 84, 66, 77, 64, 64, 85, 67, \
73, 63, 98, 74, 81, 67, 93, 77, 97, 65, \
77, 91, 91, 74, 93, 56, 96, 90, 91, 99 ]
sum = 0
for score in grades:
sum += score
average = sum / len(grades)
print("Class average:", format(average, ".2f"))
```
```bash
> python AverageScores.py
Class average: 78.60
```
Indexing and slicing on lists are as for strings, including negative indexes.
Creating Lists
Lists can be created with the `list` class constructor or using special syntax.
>>> list() # create empty list, with constructor
[]
>>> list([1, 2, 3]) # create list [1, 2, 3]
[1, 2, 3]
>>> list(["red", 3, 2.5]) # create heterogeneous list
[‘red’, 3, 2.5]
>>> ["red", 3, 2.5] # create list, no explicit constructor
[‘red’, 3, 2.5]
>>> range(4) # not an actual list
range(0, 4)
>>> list(range(4)) # create list using range
[0, 1, 2, 3]
>>> list("abcd") # create character list from string
[‘a’, ‘b’, ‘c’, ‘d’]
Many programming languages have an array type.
Arrays are:
- homogeneous (all elements are of the same type)
- fixed size
- permit very fast access time
Python lists are:
- heterogeneous (can contain elements of different types)
- variable size
- permit fast access time
Like strings, lists are sequences and inherit various functions from sequences.
<table>
<thead>
<tr>
<th>Function</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td><code>x in s</code></td>
<td>x is in sequence s</td>
</tr>
<tr>
<td><code>x not in s</code></td>
<td>x is not in sequence s</td>
</tr>
<tr>
<td><code>s1 + s2</code></td>
<td>concatenates two sequences</td>
</tr>
<tr>
<td><code>s * n</code></td>
<td>repeat sequence s n times</td>
</tr>
<tr>
<td><code>s[i]</code></td>
<td>i\text{th} element of sequence (0)-based</td>
</tr>
<tr>
<td><code>s[i:j]</code></td>
<td>slice of sequence s from i to j-1</td>
</tr>
<tr>
<td><code>len(s)</code></td>
<td>number of elements in s</td>
</tr>
<tr>
<td><code>min(s)</code></td>
<td>minimum element of s</td>
</tr>
<tr>
<td><code>max(s)</code></td>
<td>maximum element of s</td>
</tr>
<tr>
<td><code>sum(s)</code></td>
<td>sum of elements in s</td>
</tr>
<tr>
<td><code>for loop</code></td>
<td>traverse elements of sequence</td>
</tr>
<tr>
<td><code><</code>, <code><=</code>, <code>></code>, <code>>=</code></td>
<td>compares two sequences</td>
</tr>
<tr>
<td><code>==</code>, <code>!=</code></td>
<td>compares two sequences</td>
</tr>
</tbody>
</table>
Calling Functions on Lists
>>> l1 = [1, 2, 3, 4, 5]
>>> len(l1)
5
>>> min(l1) # assumes elements are comparable
1
>>> max(l1) # assumes elements are comparable
5
>>> sum(l1) # assumes summing makes sense
15
>>> l2 = [1, 2, "red"]
>>> sum(l2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: unsupported operand type(s) for +: 'int' and 'str'
>>> min(l2)
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: '<' not supported between instances of 'str' and 'int'
>>>
Aside: Functions vs. Methods
Since lists are actual objects in class `list`, shouldn’t `len`, `max`, etc. be *methods* instead of functions? Yes and no!
Remember from earlier that `len` is actually syntactic sugar for the method `__len__`.
```python
>>> len([1, 2, 3])
3
>>> [1, 2, 3].__len__()
3
```
The others (`sum`, `max`, `min`) are actually functions defined on the class, for user convenience.
*You just have to remember which operators are functions and which are methods.*
We could rewrite `AverageScores.py` as follows:
```python
grades = [ 67, 82, 56, 84, 66, 77, 64, 64, 85, 67, \
73, 63, 98, 74, 81, 67, 93, 77, 97, 65, \
77, 91, 91, 74, 93, 56, 96, 90, 91, 99 ]
average = sum(grades) / len(grades)
print("Class average:" , format(average, ".2f"))
```
```
> python AverageScores.py
Class average: 78.60
```
Traversing Elements with a For Loop
General Form:
for u in list:
body
In file test.py:
```python
for u in range(3): # not really a list
print(u, end=" ")
print()
for u in [2, 3, 5, 7]:
print(u, end=" ")
print()
for u in range(15, 1, -3): # not really a list
print(u, end=" ")
print()
```
> python test.py
0 1 2
2 3 5 7
15 12 9 6 3
Comparing Lists
Compare lists using the operators: $>$, $\geq$, $<$, $\leq$, $==$, $!=$. Uses lexicographic ordering: Compare the first elements of the two lists; if they match, compare the second elements, and so on. The elements must be of comparable classes.
```python
>>> list1 = ["red", 3, "green"]
>>> list2 = ["red", 3, "grey"]
>>> list1 < list2
True
>>> list3 = ["red", 5, "green"]
>>> list3 > list1
True
>>> list4 = [5, "red", "green"]
>>> list3 < list4
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: '<' not supported between instances of 'str' and 'int'
>>> ["red", 5, "green"] == [5, "red", "green"]
False
```
BTW: the book’s comparisons in 10.2.8 seem wrong.
List comprehension gives a compact syntax for building lists.
```python
>>> range(4) # not actually a list
range(0, 4)
>>> [ x for x in range(4) ] # create list from range
[0, 1, 2, 3]
>>> [ x ** 2 for x in range(4) ]
[0, 1, 4, 9]
>>> lst = [ 2, 3, 5, 7, 11, 13 ]
>>> [ x ** 3 for x in lst ]
[8, 27, 125, 343, 1331, 2197]
>>> [ x for x in lst if x > 2 ]
[3, 5, 7, 11, 13]
>>> [s[0] for s in ["red", "green", "blue"] if s <= "green"]
['g', 'b']
>>> from IsPrime3 import *
>>> [ x for x in range(100) if isPrime(x) ]
```
Let’s Take a Break
More List Methods
These are methods from class `list`. Since lists are mutable, these actually change `l`.
<table>
<thead>
<tr>
<th>Function</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>l.append(x)</td>
<td>add x to the end of l</td>
</tr>
<tr>
<td>l.extend(l2)</td>
<td>append elements of l2 to l</td>
</tr>
<tr>
<td>l.insert(i, x)</td>
<td>insert x into l at position i</td>
</tr>
<tr>
<td>l.pop()</td>
<td>remove and return the last element of l</td>
</tr>
<tr>
<td>l.pop(i)</td>
<td>remove and return the ith element of l</td>
</tr>
<tr>
<td>l.remove(x)</td>
<td>remove the first occurrence of x from l</td>
</tr>
<tr>
<td>l.reverse()</td>
<td>reverse the elements of l</td>
</tr>
<tr>
<td>l.sort()</td>
<td>order the elements of l</td>
</tr>
<tr>
<td>l.count(x)</td>
<td>number of times x appears in l</td>
</tr>
<tr>
<td>l.index(x)</td>
<td>index of first occurrence of x in l</td>
</tr>
</tbody>
</table>
>>> l1 = [1, 2, 3]
>>> l1.append(4) # add 4 to the end of l1
>>> l1 # note: changes l1
[1, 2, 3, 4]
>>> l1.count(4) # count occurrences of 4 in l1
1
>>> l2 = [5, 6, 7]
>>> l1.extend(l2) # add elements of l2 to l1
>>> l1
[1, 2, 3, 4, 5, 6, 7]
>>> l1.index(5) # where does 5 occur in l1?
4
>>> l1.insert(0, 0) # add 0 at the start of l1
>>> l1 # note new value of l1
[0, 1, 2, 3, 4, 5, 6, 7]
>>> l1.insert(3, 'a') # lists are heterogenous
>>> l1
[0, 1, 2, 'a', 3, 4, 5, 6, 7]
>>> l1.remove('a') # what goes in can come out
>>> l1
[0, 1, 2, 3, 4, 5, 6, 7]
>>> l1.pop() # remove and return last element
7
>>> l1
[0, 1, 2, 3, 4, 5, 6]
>>> l1.reverse() # reverse order of elements
>>> l1
[6, 5, 4, 3, 2, 1, 0]
>>> l1.sort() # elements must be comparable
>>> l1
[0, 1, 2, 3, 4, 5, 6]
>>> l2 = [4, 1.3, "dog"]
>>> l2.sort() # elements must be comparable
Traceback (most recent call last):
File "<stdin>" , line 1, in <module>
TypeError: '<' not supported between instances of 'str' and 'float'
>>> l2.pop() # put the dog out
'dog'
>>> l2
[4, 1.3]
>>> l2.sort() # int and float are comparable
>>> l2
[1.3, 4]
Random Shuffle
A useful method on lists is `random.shuffle()` from the `random` module.
```python
>>> import random
>>> list1 = [ x for x in range(9) ]
>>> list1
[0, 1, 2, 3, 4, 5, 6, 7, 8]
>>> random.shuffle(list1)
>>> list1
[7, 4, 0, 8, 1, 6, 5, 2, 3]
>>> random.shuffle(list1)
>>> list1
[4, 1, 5, 0, 7, 8, 3, 2, 6]
>>> random.shuffle(list1)
>>> list1
[7, 5, 2, 6, 0, 4, 3, 1, 8]
```
Recall our SplitFields function from Slideset 8 to split up a comma separated value (csv) string. Python provides an easier approach with the `split` method on strings.
```python
>>> str1 = "abc, def, ghi"
>>> str1.split("","")
["abc", 'def', 'ghi'] # split on comma
>>> strs = " abc def ghi "
strs.split()
["abc", 'def', 'ghi'] # split on whitespace
>>> str3 = "\tabc\ndef\r ghi\n"
>>> str3.split()
["abc", 'def', 'ghi'] # split on whitespace
>>> str4 = "abc / def / ghi"
>>> str4.split("/")
["abc", 'def', 'ghi'] # split on slash
```
Note `split` with no arguments splits on whitespace.
Suppose grades for a class were stored in a list of csv strings, such as:
```
studentData = ["Charlie,90,75",
"Frank,8,77",
"Susie,60,80"]
```
Here the fields are: Name, Midterm grade, Final Exam grade.
Compute the average for each student and print a nice table of results. *Remember that we solved a version of this problem in Slideset 3, where the data was entered by the user.*
def ProcessStudentData ( studentData ):
""" Process list of csv student records. """
# Print header line:
print ( "Name MT FN Avg")
print ( "-----------------------------")
for line in studentData:
fields = line . split (',','
if ( len ( fields ) < 3):
print ( "Bad student record for ", fields [0] )
continue
else:
name , midterm , final = fields [0].strip (),
int ( fields [1].strip () ),
int ( fields [2].strip () )
avg = ( midterm + final ) / 2
print ( format ( name , "10s"), \
format ( midterm , "4d"), \
format ( final , "4d"), \
format ( avg , "7.2f" ) )
def main():
studentData = ["Charlie,90,75",
"Frank,8,77",
"Johnnie,40",
"Susie,60,80"]
ProcessStudentData( studentData )
main()
> python ExamExample2.py
Name | MT | FN | Avg |
-------|-----|-----|------|
Charlie| 90 | 75 | 82.50|
Frank | 8 | 77 | 42.50|
Bad student record for Johnnie
Susie | 60 | 80 | 70.00|
Suppose you want to make a copy of a list. The following won’t work!
```python
>>> lst1 = [1, 2, 3, 4]
>>> lst2 = lst1
>>> lst1 is lst2 # there’s only one list here
True
>>> print(lst1)
[1, 2, 3, 4]
>>> print(lst2)
[1, 2, 3, 4]
>>> lst1.append(5) # changes to lst1 also change lst2
>>> print(lst2)
[1, 2, 3, 4, 5]
```
But you can do the following:
```python
>>> lst2 = [x for x in lst1] # creates a new copy
```
Passing Lists to Functions
Like any other *mutable* object, when you pass a list to a function, you’re really passing a reference (pointer) to the object in memory.
```python
def alter(lst):
lst.pop()
def main():
lst = [1, 2, 3, 4]
print( "Before call: ", lst )
alter(lst)
print( "After call: ", lst )
main()
```
```shell
> python ListArg.py
Before call: [1, 2, 3, 4]
After call: [1, 2, 3]
```
Let’s Take a Break
In Slideset 7 we introduced the Card class. Let’s now define a Deck of Cards. Remember we defined some functions: isRank, isSuit, cardRankToIndex, cardIndexToRank, etc.
It would be much easier to just add the following constant definitions to Card.py.
RANKS = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']
SUITS = ['Spades', 'Diamonds', 'Hearts', 'Clubs']
Think of how you’d redefine the functions listed above with those lists available.
RANKS = ['Ace', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack', 'Queen', 'King']
SUITS = ['Spades', 'Diamonds', 'Hearts', 'Clubs']
def isRank(r):
return r in RANKS
def isSuit(s):
return s in SUITS
def cardRankToIndex(r):
return RANKS.index(r)
def cardSuitToIndex(s):
return SUITS.index(s)
A deck of cards “is” a list of Card objects, one for each combination of rank and suit.
Data: a list of Card objects, initially all possible combinations of rank and suit.
Methods:
- Print the deck in order.
- Shuffle the deck.
- Deal a card from deck.
- How many cards are left in the deck (after dealing)?
Create a Card Deck
In file Deck.py:
```python
import random
from Card import *
class Deck:
""" Defines the Deck class. Each Deck contains a list of cards, one for each rank and suit ""
def __init__(self):
"""Return a new deck of cards.""
self.__cards = []
for suit in Card.SUITS:
for rank in Card.RANKS:
c = Card(rank, suit)
self.__cards.append(c)
```
Other things we might want to do with a deck are:
1. shuffle the deck
2. deal a card from the deck
3. ask how many cards are left in the deck
4. print the deck in order
Since the deck “is” a list, shuffling just means calling the `random.shuffle` function.
```python
def shuffle(self):
"""Shuffle the cards.""
random.shuffle(self.__cards)
```
Since lists are mutable, this shuffles in place, i.e., it doesn’t create a new deck.
Dealing a Card and Deck Length
Dealing a Card means removing the top card from the Deck and returning that card:
```python
def deal(self):
"""Remove and return the top card, or None if the deck is empty.""
if len(self) == 0:
print("Deck is empty.")
return None
else:
return self.__cards.pop(0)
```
Notice that we’re calling `len(self)` to check whether the Deck is empty. This only works if we define the `__len__` method for the class:
```python
def __len__(self):
"""Returns the number of cards left in the deck.""
return len(self.__cards)
```
Finally, we can use the `print` method for Deck class instances only if we’ve defined a `__str__` method to generate an appropriate string value:
```python
def __str__(self):
result = ""
for c in self.__cards:
# Here we ask each card how it wants to be printed.
result = result + str(c) + "\n"
return result
```
Notice that `str(c)` only works because we defined the `__str__` method within class `Card`.
Using the Deck Class
```python
>>> from Deck import * # create a new deck
>>> d = Deck() # print, notice order
Ace of Spades
2 of Spades
...
Jack of Clubs
Queen of Clubs
King of Clubs
>>> d.shuffle() # randomly shuffle deck
>>> print(d)
Queen of Spades
5 of Diamonds
4 of Clubs
...
Jack of Diamonds
8 of Clubs
```
Using the Deck Class
>>> c1 = d.deal() # deal top card
>>> print(c1)
Queen of Spades
>>> c2 = d.deal() # deal next card
>>> print(c2)
5 of Diamonds
>>> len(c1) # didn’t define len for Card
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: object of type 'Card' has no len()
>>> len(d) # deck now 50 cards
50
>>> d.__len__() # len same as __len__
50
>>> d.__cards # can’t access private field
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
AttributeError: 'Deck' object has no attribute '__cards'
Recall that our initial goal (from the Object slideset) was playing Poker. Now that we have Cards and Decks, we can define Hands; a poker hand is five cards.
**Data:** a list of five Card objects, dealt from a Deck object.
**Methods:**
- Print the hand in order.
- (Later) evaluate the hand as a poker hand.
import Card
from Deck import *
class Hand:
""" Five cards dealt from a Deck object. """
def __init__(self, deck):
""" A hand is simply a list of 5 cards, dealt from the deck. """
if (len(deck) < 5):
print ("Not enough cards left!"")
return None
self.__cards = []
for i in range(5):
card = deck.deal() # deal next card
self.__cards.append(card) # append to hand
def __str__(self):
result = ""
for card in self.__cards:
result = result + str(card) + "\n"
return result
Finally, we allow looking at the cards in the Hand object:
```python
def getCard(self, i):
""" Get the ith card from the hand, where i in [0..4]. """
if (0 <= i <= 4):
return self.__cards[i]
else:
return None
```
```python
>>> from Hand import *
>>> h1 = Hand() # can't deal without a deck
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: __init__() missing 1 required positional argument : 'deck'
>>> d = Deck() # so create a new deck
>>> d.shuffle() # shuffle it
>>> print(d)
7 of Clubs
King of Diamonds
6 of Diamonds
Queen of Spades
8 of Clubs
Jack of Hearts
8 of Hearts
...
7 of Spades
10 of Clubs
```
Using the Hand Class
```python
>>> h1 = Hand(d) # deal a hand from Deck d
>>> print(h1)
7 of Clubs
King of Diamonds
6 of Diamonds
Queen of Spades
8 of Clubs
>>> h2 = Hand(d) # deal another hand
>>> print(h2)
Jack of Hearts
8 of Hearts
Jack of Clubs
9 of Clubs
8 of Diamonds
>>> len(d)
42
>>> len(h1) # we didn’t define len on Hand
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
TypeError: object of type 'Hand' has no len()
```
It would be nice to be able to evaluate a hand as a poker hand, and perhaps compare two hands.
*That would be a pretty good project!*
Next stop: More on Lists.
|
{"Source-Url": "https://www.cs.utexas.edu/~byoung/cs303e/slides9-lists.pdf", "len_cl100k_base": 5695, "olmocr-version": "0.1.53", "pdf-total-pages": 41, "total-fallback-pages": 0, "total-input-tokens": 56572, "total-output-tokens": 7444, "length": "2e12", "weborganizer": {"__label__adult": 0.0005450248718261719, "__label__art_design": 0.0004172325134277344, "__label__crime_law": 0.00040268898010253906, "__label__education_jobs": 0.0108795166015625, "__label__entertainment": 0.0001327991485595703, "__label__fashion_beauty": 0.00020122528076171875, "__label__finance_business": 0.0001533031463623047, "__label__food_dining": 0.0007834434509277344, "__label__games": 0.0018634796142578125, "__label__hardware": 0.0010013580322265625, "__label__health": 0.00054168701171875, "__label__history": 0.00033664703369140625, "__label__home_hobbies": 0.00018846988677978516, "__label__industrial": 0.000518798828125, "__label__literature": 0.0004603862762451172, "__label__politics": 0.0002741813659667969, "__label__religion": 0.0006175041198730469, "__label__science_tech": 0.0092926025390625, "__label__social_life": 0.0002751350402832031, "__label__software": 0.007335662841796875, "__label__software_dev": 0.96240234375, "__label__sports_fitness": 0.0004601478576660156, "__label__transportation": 0.0005412101745605469, "__label__travel": 0.00027680397033691406}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 17749, 0.07445]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 17749, 0.15578]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 17749, 0.75268]], "google_gemma-3-12b-it_contains_pii": [[0, 169, false], [169, 421, null], [421, 1013, null], [1013, 1091, null], [1091, 1617, null], [1617, 1890, null], [1890, 3116, null], [3116, 3655, null], [3655, 4142, null], [4142, 4490, null], [4490, 4843, null], [4843, 5555, null], [5555, 6172, null], [6172, 6191, null], [6191, 7130, null], [7130, 7692, null], [7692, 8257, null], [8257, 8645, null], [8645, 9240, null], [9240, 9653, null], [9653, 10420, null], [10420, 10812, null], [10812, 11230, null], [11230, 11651, null], [11651, 11670, null], [11670, 12145, null], [12145, 12460, null], [12460, 12770, null], [12770, 13202, null], [13202, 13642, null], [13642, 14236, null], [14236, 14672, null], [14672, 14989, null], [14989, 15552, null], [15552, 15862, null], [15862, 16461, null], [16461, 16703, null], [16703, 17131, null], [17131, 17589, null], [17589, 17724, null], [17724, 17749, null]], "google_gemma-3-12b-it_is_public_document": [[0, 169, true], [169, 421, null], [421, 1013, null], [1013, 1091, null], [1091, 1617, null], [1617, 1890, null], [1890, 3116, null], [3116, 3655, null], [3655, 4142, null], [4142, 4490, null], [4490, 4843, null], [4843, 5555, null], [5555, 6172, null], [6172, 6191, null], [6191, 7130, null], [7130, 7692, null], [7692, 8257, null], [8257, 8645, null], [8645, 9240, null], [9240, 9653, null], [9653, 10420, null], [10420, 10812, null], [10812, 11230, null], [11230, 11651, null], [11651, 11670, null], [11670, 12145, null], [12145, 12460, null], [12460, 12770, null], [12770, 13202, null], [13202, 13642, null], [13642, 14236, null], [14236, 14672, null], [14672, 14989, null], [14989, 15552, null], [15552, 15862, null], [15862, 16461, null], [16461, 16703, null], [16703, 17131, null], [17131, 17589, null], [17589, 17724, null], [17724, 17749, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, true], [5000, 17749, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 17749, null]], "pdf_page_numbers": [[0, 169, 1], [169, 421, 2], [421, 1013, 3], [1013, 1091, 4], [1091, 1617, 5], [1617, 1890, 6], [1890, 3116, 7], [3116, 3655, 8], [3655, 4142, 9], [4142, 4490, 10], [4490, 4843, 11], [4843, 5555, 12], [5555, 6172, 13], [6172, 6191, 14], [6191, 7130, 15], [7130, 7692, 16], [7692, 8257, 17], [8257, 8645, 18], [8645, 9240, 19], [9240, 9653, 20], [9653, 10420, 21], [10420, 10812, 22], [10812, 11230, 23], [11230, 11651, 24], [11651, 11670, 25], [11670, 12145, 26], [12145, 12460, 27], [12460, 12770, 28], [12770, 13202, 29], [13202, 13642, 30], [13642, 14236, 31], [14236, 14672, 32], [14672, 14989, 33], [14989, 15552, 34], [15552, 15862, 35], [15862, 16461, 36], [16461, 16703, 37], [16703, 17131, 38], [17131, 17589, 39], [17589, 17724, 40], [17724, 17749, 41]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 17749, 0.05104]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
a385d69ff6a40d8fc2e4ee5f832354687dc9640b
|
Towards a Reference Model Management System for Business Engineering
Oliver Thomas
Institute for Information Systems (IWi)
at the German Research Center for Artificial Intelligence (DFKI)
Stuhlsatzenhausweg 3, Building D 3.2, D–66123 Saarbrücken (Germany)
+49 681 302 5239
Masahiro Horiuchi
Graduate School of International Management (GSIM)
Aoyama Gakuin University
4–4–25 Shibuya, Shibuya-ku Tokyo 150–8366, Japan
+81 3 3409 8530
Masao Tanaka
Graduate School of Business Administration
Aoyama Gakuin University
4–4–25 Shibuya, Shibuya-ku Tokyo 150–8366, Japan
+81 3 3409 7381
thomas@iwi.uni-sb.de horiuchi@gsim.aoyama.ac.jp masao@busi.aoyama.ac.jp
ABSTRACT
The central idea in reference modeling is the reutilization of the business knowledge contained in reference models for the construction of specific models. The orientation on the content of a reference model can increase the efficiency of processes in business engineering projects. Despite this, the use of reference models in the field of business engineering has not established itself in practice. This is due to the field of conflict between research and practice, in which reference modeling is at home. There is still a deficit in knowledge about the use and problems inherent in the implementation of reference models despite the array of theoretical concepts. Accordingly, in the past years the supply-sided development of reference models predominant in the science world has distanced itself from their demand-sided use in business practice. The article analyses this problem and presents an approach to the management of reference models. The task to be mastered using the proposed approach will be conceptually concretized with a framework and prototypically implemented in the form of a reference model management system.
Categories and Subject Descriptors
C.4 [Performance of Systems]: Modeling techniques; D.2.2 [Design Tools and Techniques]: Computer-aided software engineering (CASE); D.2.13 [Reusable Software]: Reusable libraries, Reuse models; H.5.3 [Group and Organization Interfaces]: Organizational design; I.6.5 [Model Development]: Modeling methodologies.
General Terms
Management, Documentation, Design
Keywords
Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee.
SAC ’06, April, 23–27, 2006, Dijon, France.
Copyright 2006 ACM 1–59593–108–2/06/0004…$5.00.
1. REFERENCE MODELING AS AN INSTRUMENT OF BUSINESS ENGINEERING
1.1 From Business Engineering to Business Process Management
The field of “business engineering” emerged at the start of the 1990ies as a management trend that was to enrich the approaches existing at the time with respect to the development of operational information systems with considerations on strategy and process design [6; 15; 16; 18; 19]. From today’s perspective, business engineering can be understood as the method and model-based design theory for businesses in the information age [17, p. 7]. Using the methods and models made available by business engineering, businesses and strategic fields of business were to be redesigned—taking advantage of the potentials in information technology (IT)—from the bottom up with the help of engineering principles.
Nowadays, business processes have established themselves as the organizational objects of design for this task [8; 11]. Thus, the design of business processes and the analysis of the demands to their IT-support with regard to corporate strategy are of enormous importance in business engineering projects. Business process design must follow a comprehensive approach which encompasses the planning and control, as well as the management of operational workflows.
1.2 From Business Process Management to Reference Modeling
Information modeling has proved to be useful in supporting a systematic procedure in process design [10; 12; 14; 26]. Modeling languages such as, for example, the event-driven process chain (EPC) [13], serve as operationalized approaches for the construction of models (cp. Figure 1). Software tools for business process modeling, such as the ARIS-Toolset from IDS Scheer, can support the business engineer by way of system components for the collection, design, analysis and simulation of business process models [1].
Due to the possibility of their being reutilized, in many cases the construction of information models is connected to the demand to abstract from enterprise-specific characteristics. One must therefore distinguish between enterprise-specific information models and reference models. While enterprise-specific models...
The management of reference models can be conceived as a process. This process is characterized by creativity and is highly complex due to its multifariousness and dependency on human judgment. Procedure models can be used in reference modeling to help make this complexity controllable. These procedure models—an overview can be found in [25, p.131–142]—emphasize the developmental phase of a reference model on the one hand and, on the other, the phase of creation of enterprise-specific models based on a reference model, i.e. its use. In both cases, one must go to a process of construction and this process can be supported by operationalizable approaches to the creation of models.
The process of development and use of a reference model are, however, usually chronologically, as well as contextually and organizationally separated from one another:
- **Chronological separation:** A model can be referred to as a reference model when used to support the construction of other information models. Thus, the construction of a reference model always precedes the construction of specific models.
- **Contextual separation:** Usually, the reference model constructor does not know the demands regarding the content of future reference model adaptations. He must therefore, try to foresee them.
- **Organizational separation:** The model provider and customer, resp. constructor and user are usually different people, departments or companies.
This separation of the processes “reference model development” and “reference model usage” regarding time, content and organization, is seen here as a problem of integration. Thus, we can identify the development, usage and integration of reference models as the core functions of reference model management (cp. Figure 2) [22; 23; 24]:
**Figure 2. Reference Model Management—Core Functions**
- **Reference model development:** The planning and realization of reference model construction. The development of reference models encompasses the acquisition of and search for relevant information sources and context, as well as the explication and documentation of employee's application system knowledge resp. organizational knowledge. The development of reference models refers to the development of new reference models, as well as the modification and continual improvement of existing reference models.
- **Reference model usage:** The planning and realization of the construction of information models using reference models. The usage of reference models comprises the search for and navigation of the reference models relevant for the use case, their selection and distribution to the persons concerned, the presentation of knowledge content, as well as the support of the reference model adaptation. It also comprises the retroactive evaluation of the reference models used and associated information.
- **Reference model integration:** The fusion of the chronological, contextual and organizationally separated processes in the development of reference models and the use of reference models for the construction of enterprise-specific models in the sense of the (re) creation of a whole.
### 2.2 IT-Support
Due to the magnitude of the information models in business engineering projects, their economic construction and use can only be guaranteed with the help of IT-tools. IT-support can considerably increase the efficiency of processes in business engineering projects. Therefore, there is no need to debate whether it makes sense to economically develop a computer-aided information system for the management of reference models from the research perspective, as well as from the practice perspective. This question has long been answered by the economic success of modeling and analysis-tool providers [21].
On the contrary, we must investigate the question of how an information system should be designed so that it can support reference model management adequately. With emphasis on the aim of business information systems—in the sense of the planning, construction and modification of operational reality and supportive information systems—the goal to be achieved cannot be found in design alone, but rather also in the realization of an information system which can support the management of reference models. This information system will be referred to here as a reference model management system (RMMS).
### 3. REFERENCE MODEL MANAGEMENT SYSTEM
#### 3.1 Framework
The framework shown in Figure 3 illustrates the most important components of an RMMS, as well as their functional interactions.
**Figure 3. Framework of a Reference Model Management System [22]**
On the tool layer, the core functions of the management of reference models form the main functionalities of the information system for the support of reference model management. The link between the elements “reference model development” and “reference model usage” is created by the element “reference model integration”.

The information model for reference model management, derived from the technical-conceptual layer, can be seen as the core component for the organizational framework. It is a semantic data-model which is used to clarify relevant terms, as well as to define a uniform terminology.
The RMM-information model forms the technical basis for the functionality “reference model integration” of the RMMS on the tool layer. It is however, also the basis for the logical database structure of the RMMS on the physical layer (repository). An excerpt of the RMM-information model, which is modeled in Unified Modeling Language (http://www.uml.org/) is shown in Figure 4.
Because established products exist in the field of information and, especially, business process modeling and analysis [21], the complete new development of an RMMS is not necessary, but rather only the extension of existing systems. Thus, on the modeling layer, professional tools could be used for the design of the component “modeling and analysis”. The functionalities necessary for the development and usage of reference models which, for example, require a model modification have already been implemented in corresponding systems. Functionalities which, however, serve the documentation of a construction process or a certain procedure in the usage of reference models in business engineering, may require a new implementation.
In addition to human judgment, the user interface of the RMMS (interaction layer) represents a large bottleneck in the implementation of computer-aided information systems. Great importance must therefore be attributed to its design. This has been done in the following.
3.2 Prototype
The RMMS-prototype presented in the following was developed over a period of a year at the Institute for Information Systems (IWi) at the German Research Center for Artificial Intelligence (DFKI), Saarland University, Saarbrücken. The development of the system was supported by the “Deutsche Forschungsgemeinschaft” (German Research Foundation) within the framework of the project “Reference Model-Based Customizing with Vague Data”. The ARIS-Toolset from the IDS Scheer was used as the basis modeling tool for the RMMS. The following factors regarding the research project resources were decisive for the decision to use the ARIS-Toolset as the RMMS-basis modeling tool:
- Both IDS Scheer and the Institute for Information Systems (IWi) are located in Saarbrücken; IDS Scheer with its head office and the Institute for Information Systems (IWi) at the University of Saarland in Saarbrücken. This allows an intensive dialogue between employees and software developers in the field of reference modeling, especially regarding reference modeling-support using tools from the user perspective. The proximity of these institutes to one another results in extremely low travel costs.
- Since 1994 IDS Scheer has provided diverse reference models created using the ARIS-Toolset [2]. These were made available by the company and could be used for testing purposes within the framework of this research project.
The graphic user interface of the RMMS is illustrated in Figure 5. The prototype—implemented in the platform independent programming language Java (http://java.sun.com/)—distinguishes between a project and a model view. The project view has been selected in the screenshot in Figure 5.
The RMMS work space is divided up into an explorer and a viewer which are connected logically with each other, i.e. a project selected in the explorer is displayed in detail in the viewer and can be manipulated there.
The project active in Figure 5 is called “reference model for event management” and serves to develop a reference model for the domain “event management”. The title, the project’s customer segment and information concerning the project period, progress and type were selected by the project manager while setting up the project with the help of an assistant (project wizard). This information can, in addition, be modified using the buttons “project” and “subject”.
A detailed representation of the customer assigned to the activated reference modeling project, i.e. his or her address, branch of business, turnover, number of employees, range of products, etc. can be reached using the button “customer”. This functionality also allows you to call up information such as customer description, goals or requirements. While this assignment in the use of reference models pertains more to individual customers, projects in reference model development are usually assigned an entire customer segment, as reference models are constructed for a whole class of use cases.
The viewer is divided up into index cards which can be selected using their respective tabs. The index card “overview” (Figure 5) basically characterizes the modeling projects. The elements in this card form important criteria according to which the projects stored can be sorted or searched.
The index card “activities” contains tasks or activities necessary for the realization of the targeted reference modeling project. Furthermore, descriptions of the above, activity plans and hierarchies are also stored here. These tasks are individually assigned to project members (link to the index card “members”), as well as to project documents, such as for example, meeting minutes or the presentation of results (link to the index card “history”).
The creation of the team, which will work together in realizing the reference modeling project, takes place using the index card “members”, which contains the name, position, location, organizational unit and contact information for each member of the team, as well as the respective tasks assigned to them.
In addition to the project activities and employees involved in business engineering projects, one should also document information about the progress of the tasks, the problems met, as well as possible and ultimately selected measures for solving these problems. The history of the reference modeling project is therefore documented in a project history ("history"). This can be used by the project members as a source of information regarding the project history and can support the user in planning future projects.
The cooperation between employees in different departments and at different locations is also customary in the development and use of reference models. The RMMS thus has functionalities which support cooperation during reference modeling projects. To this purpose, an asynchronous communication medium (discussion) is offered on the "collaboration"-card. The user is also given the possibility of reviewing project documents.
The workspace in the RMMS-model view is also divided up into an explorer and a viewer (cp. Figure 6, screenshot in the background). In the model-explorer, all of the information models managed by the RMMS are displayed. This pertains to reference models constructed in development projects, as well as enterprise-specific models created in projects, in which reference models are applied.
The index card system in the "model viewer" is used to manage the most important model related information for the management of reference models. Information models managed by the RMMS are characterized on the index card "overview" of the model view. This is similar to the corresponding card in the project view. The elements of the card "overview" provide criteria, similar to that in the project view, according to which the information models stored can be sorted or searched. Potential sorting criteria which can be selected in the corresponding pull-down-menu in the upper part of the "model explorer" are: branch of trade, model name, application domain, model developer, period of development, modeling progress and modeling language. In the screenshot in Figure 6 the criteria "economic activity" is selected. The selected reference model, referred to due to its form as "Event-E", is assigned to the branch "marketing".
Figure 6. Interactive-Design between RMMS and ARIS-Toolset
The card “graphic” makes a graphic representation of the model to be constructed in the modeling project possible. Figure 6 illustrates the connected requirements clearly, as well as the resulting interactive-design between the RMMS and the modeling tool ARIS-Toolset. The example illustrates that it is possible for the user to make modifications on a version of the reference model framework for event-management. To do so, he must open the ARIS-Toolset by clicking the button “edit”. In addition to reading, editing or deleting models and model elements, the modeling tool gives a business engineer further functionalities.
The subject of the dialogue which can be reached using the button “versions” on the “graphic”-card (cp. Figure 6), is the management of the models and model element versions (model history) created in the course of the reference modeling project. In addition to the most important model data, such as name, type or creation and modification dates, other data such as time, responsibility (link to the card “members”), description, reason, priority and status of the model modifications, as well as the corresponding project activities (link to the card “activities”) are recorded. The structure of this dialog is based upon the findings on the configuration management of information models [5].
The display of characteristic information, with which certain information models can be recognized, can be viewed on the index card “attributes”. Similarities and differences between the models are emphasized and used for other activities (for example: similarity analyses, searches).
The RMMS provides diverse functionalities for the support of distributed reference modeling [25]. In the project view, these referred to the support of administrative project tasks, complemented by the asynchronous communication medium of the discussion forum. These functionalities have been extended by way of a synchronous communication medium on the index card “collaboration”, a shared whiteboard for the interactive viewing and annotation of graphic data.
4. CRITICAL DISCUSSION OF THE RESULTS AND FURTHER RESEARCH
The rationale for the approach, “reference model management” selected here is based on an analysis of the state of the art in reference modeling, whereby potentials were seen in two respects. First, we have shown that the contributions at hand comprehensively address the design of construction results but however, disregard the corresponding construction processes which makes the retraceability and thus, the reuse of the results difficult. On the other hand, results pertaining to the design of the construction processes are available, concentrate however, either on the development or the use of the reference models or they do not sufficiently reduce the chronological, contextual and organizational separation between both processes. Reference model management was therefore formulated explicitly with the purpose of recreating the connection between the separated processes in reference model development and usage.
The knowledge won in this analysis can be used as a starting point for more detailed research work. Thus, for example, empirical studies could be made to investigate whether the insights won more or less deductively coincide with the reality of business practice. One could also investigate how the use of the RMMS affects efficiency in modeling projects. The investigation of the effects of this prototype in operational business reality is seen as a future challenge for the authors in their research activities.
5. RMMS AS ENRICHMENT FOR THE TOOLBOX OF A BUSINESS ENGINEER
Leading providers of IT-solutions, such as for example, IBM, Microsoft or SAP have recognized the necessity of integrated business process management. Products such as WebSphere, .NET and NetWeaver are consequently geared to business processes. Modeling tools, which provide functionalities for the collection, analysis and implementation of business processes, offer one the chance to enrich architectures with components for process configuration and visualization. This makes it possible to link the modeling of business processes with their physical configuration and execution in a software platform. The knowledge stored in reference models concerning the design of business processes will thus gain considerably in importance in the future. The RMMS is a prototype for the support of the development and use of these reference models and should be seen as enrichment for the toolbox of any business engineer.
6. ACKNOWLEDGMENTS
The system presented in this article was developed at the Institute for Information Systems (IWI) at the German Research Center for Artificial Intelligence (DFKI), Saarbrücken, Germany. The development of the system was funded by the “Deutsche Forschungsgemeinschaft” (German Research Foundation) within the research project “Reference Model-Based Customizing with Vague Data”. The authors are grateful to Johann Spuling for supporting the implementation of the presented prototype. We would also like to thank the anonymous reviewers for their comments which helped to improve an earlier version of this paper.
7. REFERENCES
|
{"Source-Url": "http://www.bpm-agu.com/publications/Thomas_Horiuchi_Tanaka_2006_ACM_SAC.pdf", "len_cl100k_base": 4461, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 23511, "total-output-tokens": 6388, "length": "2e12", "weborganizer": {"__label__adult": 0.0006098747253417969, "__label__art_design": 0.0025634765625, "__label__crime_law": 0.0007138252258300781, "__label__education_jobs": 0.019012451171875, "__label__entertainment": 0.00019037723541259768, "__label__fashion_beauty": 0.0004134178161621094, "__label__finance_business": 0.034912109375, "__label__food_dining": 0.0008077621459960938, "__label__games": 0.0009222030639648438, "__label__hardware": 0.0017576217651367188, "__label__health": 0.0011577606201171875, "__label__history": 0.0008497238159179688, "__label__home_hobbies": 0.00029206275939941406, "__label__industrial": 0.00351715087890625, "__label__literature": 0.0009889602661132812, "__label__politics": 0.0004520416259765625, "__label__religion": 0.0007672309875488281, "__label__science_tech": 0.31103515625, "__label__social_life": 0.00022482872009277344, "__label__software": 0.061981201171875, "__label__software_dev": 0.5546875, "__label__sports_fitness": 0.0003685951232910156, "__label__transportation": 0.0015888214111328125, "__label__travel": 0.00035190582275390625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 27778, 0.05364]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 27778, 0.26674]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 27778, 0.8939]], "google_gemma-3-12b-it_contains_pii": [[0, 5079, false], [5079, 5767, null], [5767, 10104, null], [10104, 13193, null], [13193, 15824, null], [15824, 18152, null], [18152, 24393, null], [24393, 27778, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5079, true], [5079, 5767, null], [5767, 10104, null], [10104, 13193, null], [13193, 15824, null], [15824, 18152, null], [18152, 24393, null], [24393, 27778, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 27778, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 27778, null]], "pdf_page_numbers": [[0, 5079, 1], [5079, 5767, 2], [5767, 10104, 3], [10104, 13193, 4], [13193, 15824, 5], [15824, 18152, 6], [18152, 24393, 7], [24393, 27778, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 27778, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
82ad4379b87586c367587e8d3c2e87f3fafe7795
|
The JavaSPI Framework for Security Protocol Implementation
Original
Availability:
This version is available at: 11583/2460419 since:
Publisher:
IEEE
Published
DOI:10.1109/ARES.2011.117
Terms of use:
openAccess
This article is made available under terms and conditions as specified in the corresponding bibliographic description in the repository
Publisher copyright
(Article begins on next page)
Abstract—This paper presents JavaSPI, a “model-driven” development framework that allows the user to reliably develop security protocol implementations in Java, starting from abstract models that can be verified formally. The main novelty of this approach stands in the use of Java as both a modeling language and the implementation language. By using the SSL handshake protocol as a reference example, this paper illustrates the JavaSPI framework.
Keywords—Formal methods; Java; Security protocols; ProVerif; Model-driven development
I. INTRODUCTION
Security protocols are distributed algorithms that run over untrusted networks with the aim of achieving security goals, such as mutual authentication of two protocol parties. In order to achieve such goals, security protocols typically use cryptography.
It is well known that despite their apparent simplicity it is quite difficult to design security protocols right, and it may be quite difficult to find out all the subtle flaws that affect a given protocol logic. Research on this topic has led to the development of specialized formal methods that can be used to rigorously reason about a protocol logic and to prove that it does really achieve its intended goals under certain assumptions (e.g. [1]).
One problem that remains with this solution is the gap that exists between the abstract protocol model that is formally analyzed and its concrete implementation written in a programming language. The latter may be quite different from the former, thus breaking the validity of the formal verification when the final implementation is considered.
In order to solve this problem two approaches have been proposed. On one hand, model extraction techniques (e.g. [2], [3]) automatically extract an abstract protocol model that can be verified formally, starting from the code of a protocol implementation. On the other hand, code generation model-driven techniques (e.g. [4], [5]) automatically generate a protocol implementation, starting from a formally verified abstract model. In either case, if the automatic transformation is formally guaranteed to be sound, it is possible to extend the results of formal verification done on the abstract protocol model to the corresponding implementation code.
Model-driven development (MDD) offers the advantage of hiding the complexity of a full implementation during the design phase, because the developer needs only focus on a simplified abstract model. Moreover, since the implementation code is automatically generated, it is possible to make it immune from some low-level programming errors, such as memory leakages, that could make the program vulnerable in some cases but that are not represented in abstract models.
However, MDD usually requires a high level of expertise, which limits its adoption, because formal languages used for abstract protocol models are generally not known by code developers, and quite different from common programming languages. For example, the user needs to know the formal spi calculus language in order to properly work with the Spi2Java framework [4].
Our motivation is to solve this problem and make MDD approaches more affordable. To achieve this, our contribution is the proposal of a new framework, based on Spi2Java, called JavaSPI\(^1\), where the abstract protocol model is itself an executable Java program.
This little but significant difference grants several different improvements over frameworks like Spi2Java:
- it is not necessary to learn a new completely different modeling language anymore (Java is also used as a modeling language);
- standard Java Integrated Development Environments (IDEs), to which the programmer is already familiar, can be used to develop the security protocol model like it was a plain Java program, making full use of IDE features such as code completion, or live compilation;
- it is possible to debug the abstract model using the same debuggers Java programmers are used to;
- thanks to Java annotations, information about low-level implementation choices and security properties can be neatly embedded into the abstract model.
The rest of the paper is organized as follows. Section II analyzes related work and Spi2Java in particular, highlighting its main limitations. Then, section III illustrates the JavaSPI framework in detail, while section IV reports about the SSL case study. Finally, section V concludes.
II. BACKGROUND AND RELATED WORK
Model-driven development of security protocols based on formal models has been experimented using various
\(^1\)Available online at http://staff.polito.it/riccardo.sisto/javaSPI/
languages and tools. One of the most comprehensive approaches is Spi2Java, which enables semi-automatic development of interoperable Java implementations of standard protocols [4].
This framework models protocols in spi calculus, a formal process algebraic language. With this language it is possible to write an abstract model of a protocol which can be automatically analyzed in order to formally verify that there are no possible attacks on the protocol under the modeling assumptions made. Of course, this requires that the protocol expected goals be formally specified too. The analysis can be done, for example, by the automatic theorem prover ProVerif [1], which can work on spi calculus.
Once the abstract model has been successfully analyzed, and it has been shown that it is free from logical flaws, a Java implementation can be derived for each protocol role.
During this refinement step, the abstract model must be enriched with all the missing protocol aspects that are needed in order to get a concrete and interoperable Java implementation: (i) concrete Java implementations of cryptographic algorithms with their actual parameters; (ii) Java types to be used for terms; and (iii) concrete binary representations of messages and corresponding Java implementations of marshaling functions.
The Spi2Java framework also requires the user to manually edit and keep in sync the model and an intermediate XML file containing refinement information, which is error prone and time consuming. By keeping refinement information neatly integrated as Java annotations, JavaSPI also solves these engineering issues.
In addition to Spi2Java, other approaches based on code generation are documented in literature (e.g. [5]), but they present the same or larger limitations.
Other researchers have explored the model extraction approach (e.g. [2], [3]). These techniques, like JavaSPI, do not expose the programmer to specialized formal specification languages, but they lack the model-driven approach, so that all the code must be written manually by the programmer.
For example, in [2], a full Java implementation must be provided, before a model can be extracted. In contrast, with JavaSPI, the programmer only writes a simplified Java model of the protocol, from which a code generator generates the full implementation.
In [3], model extraction is performed on full implementations written in F#. The F# implementation can be linked either to a concrete or to a symbolic library of cryptographic and communication primitives, which enables protocol symbolic simulation, just like when the JavaSPI abstract Java model is executed. However, in [3] there is no neat distinction between protocol logic and lower-level details such as cryptographic algorithms and parameters or data marshaling. Moreover, in [3] programs are written in F#, which is far less known than Java, thus making the tool of lesser impact to common developers.
Other researchers have focused on different model-driven approaches, starting from UML representations of security protocols (e.g. [6], [7]). While UML modeling is agreed to be an essential design phase in very large scale software projects, it is often the case that the UML modeling overhead is deemed too expensive for the typical application size of a security protocol, thus being not accepted by the average security protocol implementer.
III. THE JAVA SPI FRAMEWORK
JavaSPI has been developed as a set of tools and utilities which enables the user to model a cryptographic protocol by following the workflow shown in Figure 1: basically, the user is intended to develop abstract models in the form of typical Java applications, but using a specific library which is part of the JavaSPI framework, named SpiWrapperSim, which contains a set of basic data types along with the networking and cryptographic primitives.
The logical execution of the protocol can be simulated by simply debugging the abstract code. The protocol security properties can be formally verified by using the JavaSPI Java-ProVerif converter that produces an output compatible with the ProVerif tool.
Once a model has been properly designed, it can be refined by adding implementation information by means of Java annotations, as defined in the SpiWrapperSim library. From the annotated Java model a concrete implementation of the protocol can be generated by using the JavaSPI Java-Java converter.
The entire JavaSPI framework described in this paper has been completely developed from scratch: still, some architectural choices have been made to allow re-use of parts of the Spi2Java framework.
A. Developing the abstract model
The JavaSPI framework includes a Java library, called SpiWrapperSim, which can be used to write abstract security protocol models as Java applications and to simulate them.
Models that can be expressed in this way are instances of the class of models that can be described by the input language of ProVerif. Based on this, the framework provides the Java-ProVerif tool that transforms a Java model into the corresponding ProVerif model, which can be analyzed by ProVerif. Note that differently from [3], here the ProVerif model is not extracted from the Java code, rather the model, expressed in the Java syntax, is translated into the ProVerif syntax. A Java model differs from the final Java implementation because it is as abstract as the ProVerif model.
Moreover, the Java model can also be executed like any regular Java application. Its execution in fact simulates the underlying model that it describes, thus giving the user the possibility to debug the abstract model. In this execution messages are represented symbolically, and input/output operations are implemented by exchanging symbolic expressions over in-memory channels behaving according to the classical spi calculus semantics.
In order to get a Java program that models a protocol in this way, the user must use Java according to a particular programming pattern. Only the SpiWrapperSim library can be used for cryptographic and input/output operations, and some restrictions on the Java language constructs that can be used for the description of each process apply. These restrictions, documented in the library JavaDoc, naturally lead the user to develop models in the right way.
A protocol role (a “process”) is represented by a class that inherits from the library class spiProcess. In this way, the common code needed for simulation that surrounds the protocol algorithm is hidden inside the superclass. Moreover, objects derived from spiProcess are allowed to use some protected methods that enable common operations, like the parallel instantiation of sub-processes.
The class that inherits from spiProcess must define the doRun() method, which is the abstract description of the protocol role.
Any message, complex at will, can be represented by an immutable object belonging to a class that inherits from the Packet library class. The fields of this class are the fields of the message. The class must be made immutable by declaring all fields as final. This is necessary as, in spi calculus, each variable can be bound only once. Using mutable Java objects would be possible but it would then entail more complex relationships between the Java code and the underlying model.
The only class types the user is allowed to instantiate are the ones provided by the SpiWrapperSim library, plus the ones used as arguments of methods of such classes (e.g. String). The primitive type int is also admitted, but only for loop control flow, with the constraint that each loop must be bounded and the bound must be known at compile time.
Conditional statements are possible only with equality tests (via the equals() method) and with tests on the return values of certain operations of the library.
SpiWrapperSim is very similar to the SpiWrapper library that provides the implementations of the spi calculus cryptographic and communication operations in the Spi2Java framework. This is a precise architectural choice that greatly facilitates the last development step, i.e. the refinement of the abstract model into a concrete implementation. Indeed, the implementation code is based on the SpiWrapper library.
As it is possible to notice in Figure 2, thanks to this choice even the syntax used in the two codes is very similar; the main difference is just that the abstract model lacks many implementation details, like the encryption algorithms of each cryptographic function call, or the marshaling functions (whose implementation is included in the “SR” suffixed classes in the example shown).
The SpiWrapperSim library also provides a set of annotations which can be used during refinement to assign, for each object, its implementation details. As annotations do not affect the simulation phase, they can be specified later on, just before generating the concrete implementation.
By using this technique the implementation details and the code both reside on the same file: this means that JavaSPI is not affected by the sync problems described previously for Spi2Java. Moreover, each annotation has a scope and a default value, so that it is not necessary to specify each implementation detail for each object used in the code, but it is possible to specify just the implementation details that differ from the default values.
By following the intended workflow, the Java model can be converted to a ProVerif compatible model, or a concrete Java implementation can be derived from the Java model. The next two subsections will cover these two cases.
B. Java-ProVerif conversion and formal verification
The mapping from Java to ProVerif syntax is based on simple rules, developed in this work along with the corresponding converter, that are informally exemplified in Table I. Each Java statement that may occur in a doRun method is mapped to a corresponding ProVerif equivalent piece of code. For simplicity, the figure does not consider the addition of the numeric suffix in ProVerif, needed in order to disambiguate variable names, as shown in Figure 2.
Conversion of loops requires special handling. ProVerif does not support unbounded loops natively, but they can be easily encoded as recursive processes. However, ProVerif often experiences termination problems when loops encoded as recursive processes are used. Because of this limitation of the verification engine, the restriction of having only bounded loops was introduced in the Java modeling language, so that the conversion tool can perform loop unrolling in order to eliminate loops.
The fields of a Java Packet object are translated into nested pairs. In order to facilitate code translation and readability, a new variable is introduced in ProVerif for each field. For example, let us consider a class called MyPacket with three fields called a, b, and c, all of type Nonce. The Java code
```java
MyPacket p = channel.receive(MyPacket.class);
Nonce a = p.getA();
Nonce b = p.getB();
Nonce c = p.getC();
```
that receives a message of type MyPacket and extracts its three fields is converted into the following ProVerif code:
```proverif
in(channel1, p2);
{
(* Packet expansion *)
let p2_getA3 = GetLeft(p2);
let tmp4 = GetRight(p2);
let p2_getB5 = GetLeft(tmp4);
let p2_getC6 = GetRight(tmp4);
(* Variable assignment *)
let a7 = p2_getA3;
let b8 = p2_getB5;
let c9 = p2_getC6;
```
By using this technique the converter is forced to write, in ProVerif, more code lines than with the Java syntax, but this disadvantage is overcome by the fact that this technique totally hides to ProVerif the additional complexity that custom packet types could cause, thus avoiding the risk to generate diverging code.
Translating plain Java models into ProVerif is not enough to enable automatic verification of security properties. Indeed, the formal specification of the security properties to be proved must be given to ProVerif.
The JavaSPI library provides a specific annotation set for expressing security properties in the Java model. These annotations are then processed during conversion to ProVerif and translated into corresponding queries in the output ProVerif code.
A variable can be marked as @Secret in order to specify that ProVerif should verify its secrecy, in this way:
```proverif
@Secret SharedKey DHx = new SharedKey(pl);
```
The corresponding ProVerif generated code will look like this:
```proverif
(query attacker:DHx53.
Authentication can be expressed instead as correspondence assertions on the order of events. In JavaSPI, a process can raise an event by calling the event(String name, Message data) method provided by the SpiProcess class, where name specifies the name of the event, and data the data associated to that event. This method has no effect in the code, but it is translated to a corresponding event in ProVerif. Finally, correspondence between events, such as “if event(n1,x) happened, then event(n2,x) must have happened before”
<table>
<thead>
<tr>
<th>Statement</th>
<th>Java</th>
<th>ProVerif</th>
</tr>
</thead>
<tbody>
<tr>
<td>Fresh</td>
<td>Type a = new Packet();</td>
<td>new a;</td>
</tr>
<tr>
<td>Assign</td>
<td>Type a = b;</td>
<td>let a = b in</td>
</tr>
<tr>
<td>Hashing</td>
<td>Hashing a = new Hashing(b);</td>
<td>let a = b;</td>
</tr>
<tr>
<td>Send</td>
<td>aAB.send(a);</td>
<td>out(aAB, a);</td>
</tr>
<tr>
<td>Receive</td>
<td>Type a = aAB.receive();</td>
<td>in(aAB, a);</td>
</tr>
<tr>
<td>SharedKey</td>
<td>SharedKey key = new SharedKey();</td>
<td>let key = SharedKey(a) in</td>
</tr>
<tr>
<td>Encrypt</td>
<td>SharedKeyCiphertext = new SharedKeyCiphertext();</td>
<td>let a = SymEncrypt(key, b) in</td>
</tr>
<tr>
<td>Decrypt</td>
<td>Type a = b.decrypt(key);</td>
<td>let a = SymDecrypt(key, b) in</td>
</tr>
<tr>
<td>Error</td>
<td>ResultContainer = new ResultContainer();</td>
<td></td>
</tr>
</tbody>
</table>
Table I
A significant portion of the conversion mapping between the Java model and ProVerif model.
can be specified by a specific annotation associated with the instantiation process class.
C. Implementation generation
The last development stage is the automatic generation of the protocol implementation code from the model. As SpiWrapperSim is similar to the library used for the concrete implementation, there is a strict correspondence between the abstract code (the model) and the concrete code (the implementation). The implementation aspects that are missing in the abstract model can all be specified by means of annotations.
One of such aspects is the choice of the marshaling functions to be used for each object. A default marshaling mechanism based on Java serialization is provided by a library called spiWrapperSR, which extends spiWrapper. The user can provide custom implementations of the marshaling functions. This is a key factor enabling development of interoperable implementations of standard protocols, where the specific marshaling functions to be used are specified by the protocol standard.
Another key feature of JavaSPI enabling interoperability is the ability of resolving Java annotations values either statically at compile time, or dynamically at run time. For example, this enables implementations of protocols featuring algorithm negotiation.
IV. THE SSL CASE STUDY
In order to provide a validation example of the proposed JavaSPI approach, a simplified but interoperable implementation of both the client and server sides of the SSL handshake protocol has been developed.
The considered scenario, depicted in Figure 3, can be logically divided into four different phases:
1) Client and server exchange two “hello” messages which are used to negotiate protocol version and ciphersuites.
2) The server authenticates itself to the client by sending its certificate \( s\_cert \).
3) Diffie-Hellman (DH) key exchange is performed; note that the server DH parameters are signed by the server.
4) Finally, the session is completed by the exchange of encrypted “Finished” messages.
For simplicity, in the considered scenario both the developed client and server only support version 3.0 of the protocol with DSA server certificate. Other ciphersuites or other protocol features such as session resumption or client authentication are not considered. Indeed, the goal is to validate the methodology with a minimal, yet significant example, rather than provide a full reference implementation of the SSL protocol.
The SpiWrapperSim library has been used to develop the abstract model of the SSL protocol. This includes eight new Packet classes representing the structures of the different types of exchanged messages and a client and a server SpiProcess classes. In addition, an “instancer” process called Master that just runs an instance of client and server in parallel has been added in order to simulate protocol execution. Figure 4 provides a code excerpt of the Java SSL model.
After defining the model the following properties have been expressed and successfully verified:
1. Secrecy of the client and server DH secret values.
2. Server authentication, expressed as an injective correspondence between the correct termination of the two processes: each time a client correctly terminates a
The amount of required annotations does not burden the code: the SSL example required about 60 annotations in total (client + server), which amounts to about 10% of the whole model size. To make this measure significant, few default values have been used; in other words, default values where not crafted to suite the SSL example.
The generated client and server implementation have been successfully tested for interoperability against OpenSSL 0.9.8o.
V. CONCLUSION
The JavaSPI framework enables model-driven development of security protocols based on formal methods without the need to know specialized formal languages. Knowledge of a formal language is replaced by knowledge of a Java library and of a set of language restrictions, which is easier to learn for Java experienced programmers. Moreover, standard IDEs can be used to develop the Java model, with the benefit of having access to all the development features offered by such IDEs.
The proposed approach, along with the provided toolchain and libraries, enables (i) interactive simulation and debugging of the Java model, via standard Java debuggers available in all common IDEs; (ii) automatic verification of the protocol security properties, via the de-facto standard ProVerif tool; and (iii) automatic generation of interoperable implementation code, via a custom tool, driven by Java annotations embedded into the model files.
Compared to similar frameworks, like Spi2Java, JavaSPI is easier to use, while retaining the nice feature of enabling fast development of protocol implementations with high integrity assurance given by the linkage between Java code and verified formal models. Future work includes focusing on the formalization of the relationship between Java and spi calculus semantics, in order to get a soundness proof for the Java code, once the ProVerif model is verified. From an engineering point of view, porting the ProVerif verification results directly to the Java model could further improve usability and accessibility of the proposed framework. Moreover, further tests could be performed in order to demonstrate that quite every Java developer is able to design and validate a communication protocol by just reading the framework documentation.
REFERENCES
|
{"Source-Url": "https://iris.polito.it/retrieve/handle/11583/2460419/55147/ares2011_author_postprint.pdf", "len_cl100k_base": 4912, "olmocr-version": "0.1.50", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 21392, "total-output-tokens": 5636, "length": "2e12", "weborganizer": {"__label__adult": 0.0004677772521972656, "__label__art_design": 0.00022268295288085935, "__label__crime_law": 0.0006561279296875, "__label__education_jobs": 0.000278472900390625, "__label__entertainment": 5.453824996948242e-05, "__label__fashion_beauty": 0.0001621246337890625, "__label__finance_business": 0.00020968914031982425, "__label__food_dining": 0.0003833770751953125, "__label__games": 0.00044035911560058594, "__label__hardware": 0.0010929107666015625, "__label__health": 0.0006132125854492188, "__label__history": 0.00020492076873779297, "__label__home_hobbies": 7.647275924682617e-05, "__label__industrial": 0.0005121231079101562, "__label__literature": 0.00019729137420654297, "__label__politics": 0.0003504753112792969, "__label__religion": 0.0005669593811035156, "__label__science_tech": 0.01800537109375, "__label__social_life": 8.893013000488281e-05, "__label__software": 0.00467681884765625, "__label__software_dev": 0.9697265625, "__label__sports_fitness": 0.0004427433013916016, "__label__transportation": 0.0005707740783691406, "__label__travel": 0.00023651123046875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25783, 0.0178]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25783, 0.75603]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25783, 0.90396]], "google_gemma-3-12b-it_contains_pii": [[0, 705, false], [705, 5332, null], [5332, 10163, null], [10163, 14929, null], [14929, 19054, null], [19054, 22292, null], [22292, 25783, null]], "google_gemma-3-12b-it_is_public_document": [[0, 705, true], [705, 5332, null], [5332, 10163, null], [10163, 14929, null], [14929, 19054, null], [19054, 22292, null], [22292, 25783, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25783, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25783, null]], "pdf_page_numbers": [[0, 705, 1], [705, 5332, 2], [5332, 10163, 3], [10163, 14929, 4], [14929, 19054, 5], [19054, 22292, 6], [22292, 25783, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25783, 0.07746]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
5cb3a9a2c3018a3412d401c71940d8b043d80113
|
Object-Oriented Productivity Metrics
John L. Connell, Sterling Software, Inc.
Nancy Eller, Sterling Software, Inc.
Software productivity metrics are useful for sizing and costing proposed software and for measuring development productivity. Estimating and measuring source lines of code has proven to be a bad idea because it encourages writing more lines of code and using lower level languages. Function Point Analysis, as espoused by Dreger [1], is an improved software metric system but it is not compatible with newer rapid prototyping and object-oriented approaches to software development. A process is presented here for counting object-oriented effort points, based on a preliminary object-oriented analysis. It is proposed that this approach is compatible with object-oriented analysis, design, programming, and rapid prototyping. Statistics gathered on actual projects are presented to validate the approach.
Problems With Existing Productivity Metrics
The software engineering field has been searching for decades for a way to definitively size software. The purpose of precise sizing of software is to provide a means for determining, exactly, the answers to the following questions:
• How much will it cost to develop a proposed new software application?
• How well will the software fit within the target computer's available storage and memory?
• How are individual programmers performing in terms of units of software produced per unit of project time elapsed?
• When will the work in progress be ready for use?
Good metrics would make new application cost/benefit analyses lead to correct decisions more frequently. There would be less processing of expensive change requests during system development to reconfigure baseline hardware architectures as true software size becomes apparent. Total Quality Management (TQM) programs could make use of better process improvement metrics. Project management, with good metrics, would be more effective.
The most prevalently used metrics are heuristics based on experience; experienced software developers are sometimes pretty good at guessing the answers to the four preceding questions. The second most widely used metric is the Source Line of Code (SLOC); developers guess how many SLOCs will be in the final product and then multiply the error of this estimate by errors in the estimate of how many lines of code can be produced in one person hour. Less widely used is Function Point Analysis [1] — a measure of software size that is language independent and has a more precise definition than the SLOC.
The argument can be made that estimates based on experience are not really metrics at all. A counter to this is that most existing software productivity metrics are also fairly unscientific, but are disguised to appear scientific. In fact, there is little conclusive evidence that use of any of the current metrics will produce any more accurate results than experience based estimates. Following are the specific disadvantages of using SLOCs or FPA metrics for your next software development project.
Source Lines of Code (SLOC) Metrics Discourage Productivity
Software developers are paid to develop software applications, not to write lines of code. Suppose that two developers are working on the same application in different organizations. One is using a third generation language (3GL), such as FORTRAN, and another using a fourth generation language (4GL), such as an SQL dialect. The 4GL developer will have fewer lines of code than the 3GL developer (by one order of magnitude) for the same amount of functionality. This is documented by many studies and is well presented in Dreger's Function Point Analysis [1].
D3.2-1
Object-Oriented Productivity Metrics
While it is true that 4GL programmers will finish coding a given amount of functionality sooner than their 3GL programmer counterparts, this does not mean that they will finish a similar development project significantly faster. The reason this is true is that they will not write requirements and design specifications or test plans any faster. Suppose that coding is 20% of total development effort. Then, suppose that a 4GL programmer can do the coding in 10% the time required for a 3GL programmer. This will result in a total 18% productivity improvement — not very dramatic. On the other hand, the 4GL programmer will probably write 10% of the number of lines of code needed by the 3GL programmer. Therefore, a SLOC metrics system will show that a 4GL programmer is very unproductive, compared to a 3GL programmer — unless the organization has been collecting 4GL metrics previously and never attempts to compare productivity between environments. The bottom line is that, even if you collect 4GL metrics, there are always vendors out there developing new higher productivity languages and development systems. Consider how badly SLOC systems fail in visual programming environments where icons, menu choices, and drawing tools take the place of text-based syntax.
Function Point Analysis (FPA): Improved, but Short of Ideal
FPA requires estimating how many Inputs, Outputs, Queries, Files, and Interfaces a proposed system will contain. An Input is basically a program that is mostly about capturing data. An Output is a program that mostly issues data. Queries are combinations since they require a fair amount of input and always produce output. Files are static data storage locations, including database tables. Interfaces are external entities: other applications, users, and devices.
In his book, Dreger presents a scheme for identifying and classifying each of these elements as to their complexity and assigning each element a number of Function Points depending on its complexity. He, of course, recommends collecting your own data to use in converting Function Points to hours, but does give some examples so that you can see that a good starting place, if you have not been collecting data, might be about 20 hours per Function Point in a typical 3GL environment. The beauty of this approach is that it is language independent; you will develop the same number of Function Points regardless of which language you choose.
FPA rewards productivity realized from using more advanced development tools by producing statistics that show more Function Points being created in fewer hours. It is true you must still estimate how many Inputs, Outputs, Queries, Files, and Interfaces a proposed system will contain, and then multiply the error of that estimate by the error in conversion to hours, but conventional structured analysis and design (SA/SD) methodologies will produce specifications from which these estimates can be rather precisely extracted. For this reason, FPA estimates based on fairly complete structured specifications, have proven to be much more accurate than the average SLOC based estimate.
Object-Oriented Productivity Metrics
Unfortunately, the very reason for the success of Function Point Analysis is also its major weakness. Since structured specifications must be fairly complete before a meaningful estimate can be generated, FPA does not work well when a rapid prototyping approach such as that recommended in Structured Rapid Prototyping [2] is used. The rapid prototyper needs a reliable estimate in order to plan for when prototype iteration must be complete, but does not want to completely pre-specify requirements before they have been discovered through prototyping.
Also, it is not clear that FPA is compatible with modern Object-Oriented development techniques. In the Object–Oriented paradigm, the concept of Program (Inputs, Outputs and Queries) is obsolete, as is the concept of File. These archaic concepts are replaced with the new term Object. An Object encapsulates both data (as Object attributes), and methods, or services (what the object does). The best of the new Object–Oriented Analysis methodologies [3] do not provide a means of developing specifications from which Function Points could be easily derived.
Introducing Object-Oriented Productivity Metrics (OOPM)
The following material presents something new — Object–Oriented Productivity Metrics (OOPM). This is an approach that, similar to FPA, is language independent, but is also very compatible with Object-Oriented Analysis and rapid prototyping. The developer using OOPM will be counting Object–Oriented Effort Points (OEOPs) instead of Function Points. An OEOP is intuitively straightforward, it is a unit of measure used to determine how long it takes to develop an Object Class.
In order to determine how long it will take to develop a new Object Class, you will need to specify how many attributes the Object will have, how many services or methods of various types it will contain, what external entities it will get data from, and to what external entities it will deliver data. Objects will be simple, average, or complex depending on how many attributes they have. A simple Object might be defined as one with fewer than seven attributes. An average Object would perhaps have seven to 14 attributes, and complex Objects would have greater than 14 attributes. This classification is similar to and based on Dreger's system [1] for classifying the complexity of files, except that files usually have significantly more fields than Objects have attributes [3].
Experience on actual projects shows that most of the time spent developing the data structure of an Object Class is spent in requirements analysis and design. Actual development of data structure instantiation scripts takes almost no time once the design details have been specified. This is why it is important to classify Objects as to their data complexity. Then give simple Objects 3 OEOPs, average Objects 5 OEOPs, and complex Objects 8 OEOPs — an assignment similar to FPA File classification.
Services, or methods, should be counted separately, as each one will contribute significantly to effort required to implement an Object Class. Here you could classify Services into four categories, consistent with both Dreger [1] and Coad/Yourdon [3]: Add/Modify/Delete Services, System Screen (Menus, Helps) Services, Output Services, and Computationally Intensive Services. Some of these categories would be further classified as simple, average, or complex, depending primarily on how much data is processed. Add/Modify/Delete Services could get 3, 4, or 6 points, and Output Services 4, 5, or 7 points. System Screen Services, such as menus and help screens, do not normally process data, so they would always get the same number of points, say 4. One would classify services as computationally intensive so that they will always get a high number of points, regardless of amounts of data processed, say 8.
Finally, count the external entities the proposed application will have to interface with, similar to the Interface count in FPA. Classify external entities as simple, average, or complex depending on how many Object Classes the external will interface with. Give less than two Classes 7 points, two to five Classes 10 points, and more than five Classes 15 points. Figure 1 summarizes the system for classifying Object–Oriented Effort Points.
Object-Oriented Productivity Metrics
<table>
<thead>
<tr>
<th>Object Class</th>
<th>Simple</th>
<th>Average</th>
<th>Complex</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>< 7 Attributes</td>
<td>7 - 14 Attributes</td>
<td>> 14 Attributes</td>
</tr>
<tr>
<td></td>
<td>3 OOEPs</td>
<td>5 OOEPs</td>
<td>8 OOEPs</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Service:</th>
<th></th>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>Add/Modify/</td>
<td>3 OOEPs</td>
<td>4 OOEPs</td>
<td>6 OOEPs</td>
</tr>
<tr>
<td>Delete</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>Output</td>
<td>4 OOEPs</td>
<td>5 OOEPs</td>
<td>7 OOEPs</td>
</tr>
<tr>
<td>Sys. Screen</td>
<td>4 OOEPs</td>
<td>N/A</td>
<td>N/A</td>
</tr>
<tr>
<td>Comp. Intense</td>
<td>N/A</td>
<td>N/A</td>
<td>8 OOEPs</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>External Entity</th>
<th>< 2 Classes</th>
<th>2 - 5 Classes</th>
<th>> 5 Classes</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>7 OOEPs</td>
<td>10 OOEPs</td>
<td>15 OOEPs</td>
</tr>
</tbody>
</table>
Figure 1, Object-Oriented Effort Point Classification System
Using Object-Oriented Analysis to Count OOEPs
What the OOPM estimator will need to do at the start of a new software development project is to prepare a preliminary requirements specification, using graphic models depicting the Object Classes, Services, and External Entity Interfaces the new application will need. This can be very incomplete, for a rapid prototyping project, and an expansion factor applied to the initial estimate to obtain a total development effort estimate.
For rapid prototyping, OOPM will work better than an FPA approach based on SA/SD because there will be a direct correlation between expansion of the OOA specification and expansion of the prototype — more Object Classes and Services will be added with each prototype iteration. With SA/SD, expansion of a prototype resulted in more primitive processes to specify in the Structured Analysis, requiring re-partitioning and balancing of all higher levels in the Dataflow Diagram hierarchy. This is what is recommended in Structured Rapid Prototyping [2] — possible, but somewhat awkward.
Figure 2 shows an example of an Object-Oriented information model diagram for a Harbor Information System. This application will contain ten simple Object Classes for an OOEP count of 30. It will contain 6 simple Add/Modify/Delete Services for 18 OOEPs, 4 simple Output Services for 16 points and, let's say, one menu and one help screen, for 8 more points. This gives a total of 72 OOEPs.
Object-Oriented Productivity Metrics
But, what about External Entity Interfaces? In Structured Rapid Prototyping, [2] two dataflow diagrams, The Context Diagram and the Essential Functions Diagram, are recommended as part of the preliminary rapid requirements analysis to help work out the external data interfaces. How to model these interfaces seems to be missing from the Coad/Yourdon OOA approach. On the other hand, it does not seem appropriate any longer to advocate that an Object–Oriented developer begin by drawing Dataflow Diagrams. Instead, consider an Object Class Source/Sink Diagram as shown in Figure 3.
In Figure 3 there are 5 external entities. Harbor Manager and Employee are of average complexity and get 10 points each. Acme Placement Agency Database, Ship Owner, and Payless Shoes are simple and get 7 points each. The total External Interface OOEPs are 41. This brings the grand total for the Harbor Information System to 113 OOEPs.
Hours per OOEP
In Function Point Analysis [1], Dreger states that the norm for hours required to code one Function Point, in a third generation language, such as COBOL is about 20. Examples given of languages in this range are Pascal, JOVIAL, FORTRAN, COBOL, ALGOL and C (C is identified as the least
Object-Oriented Productivity Metrics
productive language in this list). Object-Oriented software development projects will not use one of these languages; they will hopefully use an Object-Oriented Programming Language (OOPL) such as Smalltalk or C++. Dreger says that such languages will require about one-fifth as many lines of code per Function Point as a third generation language. Presumably this is due to language extensibility and component reuse through inheritance. This puts an OOPL in almost the same category as a 4GL — about 4 hours per Function Point.
From this analysis, it can be determined that, using OOPM, a simple Add/Modify/Delete Service (data entry screen) should take an average of about 12 hours of effort (3 OOEPs times 4 hours per OOEP). The conversion of 4 hours per OOEP can be applied to all 113 OOEPs for The Harbor Information System, which then should take about 452 person hours to develop. This estimate should include OOA, OOD, programming with an OOPL, test and all documentation. Keep in mind that documentation effort is one of those aspects of development that will vary widely according to various organizations’ documentation standards and that all such estimates will need to be adjusted to specific project conditions, such as developer skills and experience.
Figure 3, Harbor Information System Source/Sink Diagram
Object-Oriented Productivity Metrics
Benefits of Using Object-Oriented Productivity Metrics
The metrics presented here will be compatible with, and encourage the use of, very advanced development environments such as those used in Object-Oriented Rapid Prototyping [4]. Developers will be rewarded according to their proficiency in rapid development of new Objects (something of more perceived value to application users than lines of code or Function Points). Software reuse, an important modern productivity enhancing technique is fostered by Object-Oriented techniques; it will be rewarded by Object-Oriented Productivity Metrics. OOPM will generate metrics that are tightly coupled to Objects. When those Objects are reused, their OOEPs are encapsulated and will move with the Objects to the new environment where they become available for processor sizing and other purposes.
Whether or not rapid prototyping is used, OOPM provides a way to get reliable sizing and effort estimates very early in the project — before requirements have been finalized — based on preliminary Object-Oriented graphics models. When using a rapid prototyping approach, OOPM will provide an estimate of how long it will take to develop the initial prototype and an expansion ratio can be used to estimate how long it will take to develop the entire application. The same expansion ratio can also be applied to estimate how many Objects will exist in the final application.
Research Evidence Supporting Feasibility of OOPM
Dreger has been cited as a source of valuable software metric research [1] and Coad/Yourdon OOA [3] has been cited as a good approach to Object-Oriented requirements analysis. The basic philosophy of FPA is that software metrics should be based on things the user wants to pay for, not lines of code. The basic philosophy of OOA is that users are mostly interested in Objects within the domain of their field of interest. It seems logical, therefore to merge these two philosophies to create Object-Oriented Productivity Metrics.
Attempts at defining what a software Object is, to the universal satisfaction of all software engineers, have mostly met with failure. Undaunted, we will propose one more definition: an Object is a thing of interest to a software application user — defined, for the purposes of that application, by the attributes of the Object that are of interest to the user.
The reason this working definition of an Object is useful is that it allows for comparisons that may tie OOPM back to FPA for purposes of supporting feasibility. An Object has attributes; in terms of Information Modeling methodology, so does an Entity. In implementation, an entity often becomes a database table. Thus, Object attributes are very similar to database attributes which are, in turn, very similar to fields in files (except that database tables are usually normalized and have fewer attributes than files have fields). Therefore, FPA File counts can be converted to OOPM Object counts.
For FPA, SA/SD methodologies are used to provide precise counts of how many files will be created for a new application. For OOPM, you can use OOA models to accurately determine how many Objects of what level of complexity will be created. Determining Object complexity from preliminary OOA models may, in fact, be easier than determining File complexity from preliminary Structured Analysis. It is difficult, during the early stages of requirements analysis, to determine how many fields will be in a proposed file. It is not so difficult, in the early stages of OOA, to determine how many attributes a proposed Object will have.
One of the authors is the manager of several projects at NASA Ames Research Center that have been collecting metrics on application development in an environment using Sybase development tools. Sybase is a relational database management system with a bundled collection of development tools including a 4GL, reportwriters, and a forms generator. On these projects, entities are defined and modeled in a manner consistent with defining and modeling Objects in OOA (even though Sybase is not Object-Oriented).
Object-Oriented Productivity Metrics
The metrics kept are with respect to how long it takes to develop a Form and how long it takes to develop a Report. Only development with Sybase tools is tracked; very little development work on these projects uses third generation language programming. Four years of data have been averaged with the result that it can be stated that the average Form takes 24 hours to develop and the average Report 16 hours. There is a variance, depending on complexity, yielding a range of 4 to 40 (sometimes more) hours per Screen and 4 to 24 hours per Report. These metrics do not include final system documentation or full integration testing.
Some of the Screens are very complex. They include what has been described above as Add/Modify/Delete Services, System Screen Services, and Computationally Complex Services. The Reports are equivalent to the Output Services described above. Data has not been kept on the amount of requirements and design effort required to develop an Entity (Object).
The purpose of reporting these effort metrics here is that they fall well within an acceptable range of the suggested values for OOPM given above. Suppose that the average input Service is worth 4 OOEPs and the average output Service 5 OOEPs. Then suppose, using Sybase tools, an OOEP takes 4 hours to develop (this would be consistent with statistics provided by Dreger [1]). Then, developing an input Service would require an average of 16 hours and an output Service 20 hours, according to OOPM metrics. These estimates are acceptably close to the actuals of 24 hours and 16 hours, respectively, considering that Dreger says the average of 20 hours per COBOL Function Point has a range of 3 to 87 hours. To get really accurate metrics, you will have to collect your own statistics.
Summary of Findings
With the advent of rapid prototyping; Object-Oriented Analysis (OOA), Design (OOD), and programming (OOP); and, recently, Object-Oriented Rapid Prototyping (OORP) — it is time to abandon software productivity metrics based on either Source Lines of Code (SLOCs) or Function Point Analysis (FPA). Productivity metrics need to support and encourage the use of the best modern software engineering practice if the desired result is continuous process improvement.
A new approach, Object-Oriented Productivity Metrics (OOPM) has been defined here that is compatible with modern software development techniques such as Object-Oriented Rapid Prototyping. Arguments made in and supported by Coad/Yourdon's Object-Oriented Analysis [3] and Dreger's Function Point Analysis [1] provide proof of the soundness of the OOPM approach. Actual project experience, using metrics similar to those that would be used with OOPM, provides evidence indicating that OOPM based estimates would be highly accurate. Based on this research, the following recommendations can be made without hesitation:
- Use OOA, OOD, and OORP techniques for software development.
- Estimate effort using OOPM.
- Evaluate productivity using OOPM.
- Collect your own OOPM statistics and continually refine them over time.
References
|
{"Source-Url": "https://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19940008688.pdf", "len_cl100k_base": 4982, "olmocr-version": "0.1.53", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 25170, "total-output-tokens": 5334, "length": "2e12", "weborganizer": {"__label__adult": 0.0003323554992675781, "__label__art_design": 0.0002191066741943359, "__label__crime_law": 0.00026917457580566406, "__label__education_jobs": 0.000782012939453125, "__label__entertainment": 3.5703182220458984e-05, "__label__fashion_beauty": 0.00012505054473876953, "__label__finance_business": 0.0003905296325683594, "__label__food_dining": 0.00030732154846191406, "__label__games": 0.00033545494079589844, "__label__hardware": 0.0005087852478027344, "__label__health": 0.0003364086151123047, "__label__history": 0.00013315677642822266, "__label__home_hobbies": 6.645917892456055e-05, "__label__industrial": 0.0002803802490234375, "__label__literature": 0.0001709461212158203, "__label__politics": 0.0001766681671142578, "__label__religion": 0.0002818107604980469, "__label__science_tech": 0.0026073455810546875, "__label__social_life": 6.979703903198242e-05, "__label__software": 0.0033168792724609375, "__label__software_dev": 0.98828125, "__label__sports_fitness": 0.0002505779266357422, "__label__transportation": 0.0003592967987060547, "__label__travel": 0.00016248226165771484}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23846, 0.01672]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23846, 0.39258]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23846, 0.91639]], "google_gemma-3-12b-it_contains_pii": [[0, 3710, false], [3710, 6877, null], [6877, 11205, null], [11205, 13499, null], [13499, 14759, null], [14759, 16123, null], [16123, 20267, null], [20267, 23846, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3710, true], [3710, 6877, null], [6877, 11205, null], [11205, 13499, null], [13499, 14759, null], [14759, 16123, null], [16123, 20267, null], [20267, 23846, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23846, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23846, null]], "pdf_page_numbers": [[0, 3710, 1], [3710, 6877, 2], [6877, 11205, 3], [11205, 13499, 4], [13499, 14759, 5], [14759, 16123, 6], [16123, 20267, 7], [20267, 23846, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23846, 0.16279]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
8d01ed95e68c70d54f4680ce443a4ae0c7f8c2bf
|
ION API for Electronic Signage Final Report
Scott Hazlett
University of British Columbia
EECE 490L
April 08, 2014
Disclaimer: “UBC SEEDS provides students with the opportunity to share the findings of their studies, as well as their opinions, conclusions and recommendations with the UBC community. The reader should bear in mind that this is a student project/report and is not an official document of UBC. Furthermore readers should bear in mind that these reports may not reflect the current status of activities at UBC. We urge you to contact the research persons mentioned in a report or the SEEDS Coordinator about the current status of the subject matter of a project/report”.
Dear Professor Dr. Vincent Wong,
Enclosed is the project report titled ION API for Electronic Signage that was commissioned in January 2014 to integrate the UBC ION metering data with UBC electronic signage.
This report discusses the implementation of ION API as a web-based interface using HTTP queries. This report is restricted to considering meter readings of only electricity consumption. As well, this report assumes usage patterns that are not exposed to the public.
The main findings on the report are:
1. The ION API serves as a protective layer to the ION database by preventing possible malicious access and by lowering the load on the database by hosting cached results
2. The ION API lays the groundwork for developing upstream integration with other platforms for consuming data as well as for developing downstream integration with other producers of data
I would like to thank Wilson Lo, Senior Programmer Analyst for Communication and Collaboration Technologies, for being instrumental in forming key design decisions and providing valuable technical assistance. I would also like to thank Jamil Rhajiak, UBC Communications Services Coordinator, who has met regularly with myself to help assist with the integration of this project’s data with UBC signage.
Finally, I would like to sincerely thank you for having the opportunity to work on this project.
Scott Hazlett
Computer Engineering student
Encl.
University of British Columbia
Electrical and Computer Engineering 490L
ION API for Electronic Signage
Final Report
Scott Hazlett
With special mention
Wilson Lo
Jamil Rhajiak
8th April 2014
ABSTRACT
This report investigates a practical approach to integrating a database with UBC’s Enterprise Cool Sign infrastructure that powers all campus electronic signs. UBC’s ION database collects real-time electricity usage data for all buildings on campus but the data is relatively inaccessible. This project designed and deployed an ION database API that serves to channel this real-time data to electronic signs across both university campuses. The ION API is underpinned by an Apache webserver running Perl CGI scripts that service HTTP GET requests with URL parameters. The ION API was designed to reduce load on the ION database by caching previous search results. Furthermore, the ION API attempts to present the raw data in a graphical way that is appealing to passerby’s so as to affect societal change. Due to ongoing restructuring in the ION database, this project worked with static file of exported data from the ION database. Care was taken to read the database row by row to closely mimic the live database. The ION API was a success deployment allowing for two forms of operations: comparing electricity consumption for one building for the 24 hour period preceding, and another providing inter-building simple comparisons.
TABLE OF CONTENTS
ABSTRACT .........................................................................................................................................ii
LIST OF ILLUSTRATIONS................................................................................................................... iv
GLOSSARY ......................................................................................................................................... v
1. ABSTRACT ..................................................................................................................................... 1
2. EQUIPMENT AND METHODOLOGY ............................................................................................ 2
Personnel .................................................................................................................................... 2
Cool Sign Version 4 to 5 .............................................................................................................. 2
Cool Sign and Data Watcher ....................................................................................................... 3
URL Parameters and XML Results ............................................................................................... 4
Handling the Present ION Database ........................................................................................... 5
3. ION API DESIGN DECISIONS ..................................................................................................... 6
Model-Controller Design Pattern ............................................................................................... 6
Reactive API with Apache and CGI.............................................................................................. 6
Perl XML Library .......................................................................................................................... 6
Local Caching and Calculations ................................................................................................... 7
4. ION API IMPLEMENTATION ..................................................................................................... 8
Generating the Cached Result .................................................................................................... 8
Parsing the ION Database Row by Row .................................................................................... 10
Dealing with ION Database Corner Cases ................................................................................. 10
Templates .................................................................................................................................. 11
5. PROJECT CONCERNS .............................................................................................................. 13
Concurrent Access .................................................................................................................... 13
Effective Communication and Visualization ............................................................................. 13
6. RESULTS ..................................................................................................................................... 15
7. CONCLUSION ............................................................................................................................. 16
LIST OF ILLUSTRATIONS
Figure 1: System Diagram of Integration of ION API with UBC Signage Infrastructure ................. 3
GLOSSARY
**API**: *Application Programming Interface* is the published abilities of a used program to its users.
**CGI**: *Common Gateway Interface* is a way for users to connect to a URL and do more than HTTP is designed to do by launching an environment, say PHP or Perl, which can then run a script as if the script was launched locally on the machine.
**CSV**: *Comma-Separated Values* file stores data structured along rows by separating them with a carriage-return character and along columns by separating them with the comma character. CSV files popular because they are easy for programs to process.
**kWh**: *kiloWatt* hour is a measurement of electrical work and represents the amount of electricity needed to power a 100 Watt bulb for 10 hours.
**SIS name**: SIS names can be looked up under the details tab of a building in Wayfinding UBC and uniquely identify a given UBC buildings. Most UBC buildings have an SIS name although the University Services Building is a notable exception.
**XML**: *eXtensible hypertext Markup Language* is a popular text-based markup language that allows for simple transmission of structured data. One popular example of XML usage is an RSS feed.
1. INTRODUCTION
This project investigates an optimal approach to delivering meter data to UBC Signage. The ION API for Electronic Signage project proposes to adapt data collected about campus electricity usage and present it on campus digital signs in a form that is consumable within the 6-second attention span of passing people.
Visitors to UBC as well as the local community invariably pass by many of these signs in any given day. I feel that this is a good opportunity to contribute towards lasting campus behavioural change through awareness of resource consumption.
This project works with existing UBC signage infrastructure to allow streaming of consumption data to any sign. As well, this work lays the groundwork for downstream integration with personal mobile devices as well as upstream integration with additional sources of data.
This report first provides background on the existing UBC infrastructure for electronic signage and the Cool Sign Enterprise software that powers the signs. Then this report transitions into the deployment of ION API and the primary design decisions. Further along, this report reviews some implementation highlights including providing for multiple behaviour based on templates and soft. Finally, this report concludes with the possibilities of upstreaming and downstreaming as well as some achievements and concerns.
2. EQUIPMENT AND METHODOLOGY
The following subsections will cover existing infrastructure as well as instrumental personnel.
Personnel
Working with Wilson Lo, Senior Programmer Analyst for Communication and Collaboration Technologies, and Jamil Rhajiak, UBC Communications Services Coordinator, we have deployed dynamic content to display nodes using the Cool Sign infrastructure. Because a primary goal of this project was to integrate well with existing infrastructure, we did not consider alternate signage software.
Cool Sign Version 4 to 5
However, UBC is currently undergoing an upgrade from Cool Sign version 4 to version 5. Most notably, Cool Sign version 5 will now offer operators the option to display a website natively on signs as an alternative to manually creating content as was necessary with version 4. This is a big incentive for operators to shift towards dynamic content on a website. A unified front can be easily displayed on multiple platforms, for example, mobile devices and web browsers.
UBC Signage enjoys a wide audience and so its security is accordingly well-guarded. Thus, it was a natural choice to deploy the ION API on a Virtual Server Service provided by UBC IT Services. This keeps the entire system protected behind the same virtual network. The system specifications are 1 CPU, 2 GB RAM, 32 GB Tier 1 System Disk, RHEL 6 OS. This accrues an annual operating cost of about C$125 per year from the ECE department.
Cool Sign and Data Watcher
Cool Sign is an enterprise software that UBC licenses to display contents on electronic signs across both campuses. Figure 1 shows how the Cool Sign Data Watcher periodically queries sources of information, in this case the ION API, used for broadcasting dynamic content to UBC electronic signs. The Data Watcher is configured by a Data Watcher configuration file which is very flexible and suits this project:
1. We can configure the Data Watcher to connect to any URL
2. We can configure the Data Watcher to handle an XML-formatted result
3. We can configure the periodicity of updates
Figure 1: System Diagram of Integration of ION API with UBC Signage Infrastructure
We have created a dedicated Data Watcher that queries the ION API via HTTP and receives XML-formatted data on the electricity consumption of a building. All results can then be automatically streamed to any UBC display node through the Cool Sign infrastructure.
URL Parameters and XML Results
The Cool Sign Data Watcher can query in different ways. A common way is to query the data source using an HTTP GET request to a URL with parameters. Similarly, Cool Sign can receive data in various formats. We chose XML-formatted results because there are many other nodes already configured in this fashion, and because XML-formatted data is easily processed by most applications.
The following shows the XML structure that we expect after querying the ION API. The URL for the ION database is not currently registered with the DNS server and so it is directly accessed by IP address.
```
Content-type: text/html
<item>
<Block>
<BldName>USB</BldName>
<Consump24>2026</Consump24>
<Date>07/04/2013</Date>
</Block>
</item>
```
Here, the XML entries have been indented for clarity but should actually be on one long line to not break compatibility with the Data Wacher. This XML reply states that the University Services Building “USB” consumed 2026 kWh in the past calendar day.
Handling the Present ION Database
The ION database is currently undergoing a migration in naming and reordering to better reflect the current deployment of meters with physical buildings. Each building may have smaller buildings for which the same department is responsible for. Thus, many buildings have multiple meters. For example, MacLeod is responsible for Rusty Hut.
This project has focused only on kWh readings of energy consumption as it is more widely deployed. As well, UBC is in the process of migrating completely from steam to hot water and so there is flux with those meters.
The ION database can export data for a given period, for example one year and multiple buildings, into CSV format. This project has been using such a file for development. It contains the readings for the calendar year of 2013 across 14 meters which aggregate to 9 buildings. Each reading is taken at an interval of 15 minutes. We extracted the kWh columns for each meter to shorten the file size.
3. ION API DESIGN DECISIONS
The following sub headings will discuss the design and implementation of the ION API.
Model-Controller Design Pattern
This project used a Model-Controller design pattern to separate decision making from the database access. IonController.pl is the URL-accepting interface and only performs basic sanity checks and sanitization on the URL parameters. It then dispatches the work to IonModel.pm.
IonController is a Perl script that can be called directly and has a .pl extension, meanwhile IonModel is a Perl module that contains a constructor and member methods and has a .pm extension. The former is analogous to a main method in C or Java, while the latter is analogous to a software library.
Reactive API with Apache and CGI
The ION API is a reactive host in that it does not store any state other than previously cached results. This is easily implemented with the Common Gateway Interface “CGI” which allows for URL queries to run local scripts in various languages, for example, PHP, Perl, and Python. This project choose Perl as it performs well with per-line file processing. In conjunction with this, we chose Apache as the webserver because it is widely deployed and easy to setup.
Perl XML Library
This project uses the Perl Module XML::Generator to generate XML-formatted replies. We hope to reduce the number of programmer errors by re-using proven software libraries.
Local Caching and Calculations
An important design decision was to have all the calculations be performed on the ION API host so that no unnecessary load is placed on the Cool Sign DataWatcher. Similarly, The ION API will cache past results to serve requests faster and to alleviate the burden on the ION database.
4. ION API IMPLEMENTATION
The following subsections cover interesting implementation details.
Generating the Cached Result
The Perl code illustrating the core functionality for a query is shown below. This subroutine will generate the cache file for a query. If it does not already exist, it will query the ION Database to generate the cache file. Otherwise, it does not contact the ION Database.
```perl
sub execute() {
my $class = shift;
# This will generate the regex used for the starting
# row (smallest reading) to the end row (largest reading)
# from which we will take a delta
$class->generate_regex_for_time_bounds();
# This will perform the mapping from SIS name
$class->lookup_ion_name_with_sis_name();
# This will assign $class->{'_cache_filename'}
$class->generate_cache_filename();
# This will create the cached result if it does not
# already exist. If it exists, no need to create it.
# Otherwise, it reads this from the
# ION database line-by-line and saves the result
# to the designated cache file
$class->generate_cache_file();
}
```
First a regular expression is formed for the starting and ending timestamps that we are interested in. Next, we look up the relevant meters based upon the SIS name for the UBC building.
With this information, we can now generate the deterministic name of the cache file that has this information. If the file is not found, it will query the database for the relevant result and save it in XML format as a cache file.
After a return from this subroutine, a called to the following code will return the XML-formatted result. It merely reads in the entire file and pipes it to the client.
```perl
sub get_reply_string($) {
my $class = shift;
open CACHE, "<$class->{'_cache_filename'}" or die "Could not open cache file $class->{'_cache_filename'} for reading";
while (<CACHE>) {
print $_;
}
close CACHE;
}
```
While this algorithm performs well with a single access, in its present state it can have concurrency issues which are discussed in section
PROJECT CONCERNS
This subsection will consider some concerns.
Concurrent Access.
Parsing the ION Database Row by Row
Each row corresponds to one 15 minute window and one sample. The first six rows and five columns for this file are shown below. There has been minor editing for clarity.
To calculate the consumption for a one hour period, we took the difference between the reading at the starting time, say 1:00:00AM, and the reading at one hour later, say 2:00:00AM. Each sample interval is 15 minutes, and so a total of 5 rows are evaluated.
Dealing with ION Database Corner Cases
It became apparent that some entries were not usable, for example they were blank or 0. A design decision in this project was to fail gracefully. If there was a valid reading at both the desired start and end times, we simply took the difference. For case of summing an entire day, it was possible that the end point might be unavailable but a suitable row nearby would be workable. Thus, the algorithm evolved into first looking for the starting row by looking at time, then continuing iteration through rows until a suitable kWh on that row was obtained, that is, a non-zero kWh reading. This will be our lower bound kWh reading. Then, we continue iterating through rows until the ending time is encountered and we retrieve that row’s kWh reading as
our upper bound. A subtle point is that at each row we need to keep track of the latest valid kWh reading because it is entirely possible that the reading on the final line is not usable.
This way, we only have to traverse forwards.
There is also the case of overflow as these meters all overflow at $10^{10}$. First we have to make sure that Perl variables can handle numbers this large. Running the following code at a Linux prompt will show what the current system uses for maximum float values in Perl:
```
$ perl -MPOSIX -le 'print POSIX::FLT_MAX; print POSIX::FLT_MIN'
3.40282346638529e+38
1.17549435082229e-38
```
Next, we need to detect overflow. While iterating to the end kWh reading, for every valid kWh reading that we read, we ensure that it is a larger number than the previous valid reading. If it turns out to be a smaller number, and non-zero, then we validate that it is at least smaller by more than $10^{(10-1)} = 10^9$. More specifically, in code:
```perl
if ($latest != 0 and ($latest - $previous + $ROLL) < $ROLL_ONE_TENTH) {
# if the reading is smaller than the previous,
# we can still take it as long as
# it was a wrap-around. We also reject zero values.
$previous = $latest;
}
```
This case is treated as overflow and the rollover amount is added to the final result. Overflow happens about once a year for these meters.
**Templates**
We have created two templates. Accessing template one returns the energy consumption for a specified building compared with itself at the same time of day one day ago. The HTTP GET
request accepts the building name and template ordinal, and in return replies with the kWh energy consumption during a one-hour period as well as the date asked for. It also returns a simple result which scores the lowest at 1 and the highest at 5. Currently, we expose the actual kWh usage along with the vote but there is concern that when this database is eventually exposed to the public that we would want to keep that information internal.
Accessing template two returns the energy consumption as a comparison between two buildings at a specified time of day one day ago as well as one week ago. Buildings vary widely in the energy consumption and thus only similar buildings would provide meaningful comparison. For example, the comparison of student residence buildings.
To reduce load on the ION API, previous results that have been cached will be saved to disk with the query terms as part of the filename. As the cached values are of ION readings in the past, the cached values will never become stale. This is a simple and deterministic caching mechanism. A scheduled task on the ION API host must periodically trim cached files by sorting them according to their time stamp to prevent exhausting disk space.
5. PROJECT CONCERNS
This subsection will consider some concerns.
Concurrent Access
There is a concern of concurrent access to the database causing an inconsistent state if two nodes are writing to the same file. This could only happen if both connections were running the exact same query at the exact same time. Both threads could see the cached result not existing, and then both start to generate the cached result and write it to file. Because the code writes to cache file one line at a time, there is a chance of corruption if both write at the same time. A possible solution to this would be to save state to the local machine in the form of a semaphore of each file.
It is important to filter all URL parameters from the public as there can be deliberately as well as unintentionally malicious parameters. Currently, because the API sits behind the UBC firewall, it is not exposed to this. As well,
Effective Communication and Visualization
Effective visualization has been a hurdle for us as we have found it to be easier to recognize an ineffective visualization than to come up with a novel representation. The result of a comparison is a number between 1 and 5. This can be configured in Cool Sign to map to a table of pre-generated graphics. One visualization is to display an energy-conserving idea when consumption is high, for example, reminding people to switch off lights when not in use. Another visualization would use humor, for example a drenched squirrel chastising the audience’s excessive use of laundry driers while he is hand-wringing his own fur.
There really are a lot of possibilities with communicating the idea to conserve energy. Perhaps more important than the specific visualization itself is that the messages should be random or at least appear to passerby’s that there is frequently new content each time they look. Otherwise, it is likely for people to gradually tune out the perpetually drenched squirrel.
6. RESULTS
We achieved the project objectives of automating the process of transferring ION Database information to display nodes. As well, we found that the existing infrastructure in place works well with the ION API implementation. Meanwhile, we did not have the opportunity to interact with the public to solicit feedback on visualization methods due to time constraints. As well, an outstanding work item is full sanitization of received URL parameters. I would recommend that persons involved in future work ensure that they fully understand the subtleties of Perl data structure as I had a significant ramp up period due to this. This project was a success and a good interim step for integration with personal mobile devices and web browsers.
7. CONCLUSION
This report investigated how to integrate ION Database meter readings with existing UBC Electronic Signage infrastructure.
We have successfully implemented two templates to accommodate different consumption patterns. In addition, we have laid the groundwork for upstreaming to other data producers such as RSS feeds as well as downstreaming to other consumers such as personal mobile devices.
Although the current implementation reads from a static file, it does so strictly row by row so that it can be easily ported to a live database without much modification. On a similar vein, the ION API was designed with a Model-Controller design pattern, completely abstracting one half of the code from the internal representation of the database. The ION API project was a success.
|
{"Source-Url": "https://open.library.ubc.ca/media/download/pdf/18861/1.0108880/3", "len_cl100k_base": 4869, "olmocr-version": "0.1.53", "pdf-total-pages": 23, "total-fallback-pages": 0, "total-input-tokens": 35569, "total-output-tokens": 5821, "length": "2e12", "weborganizer": {"__label__adult": 0.00037479400634765625, "__label__art_design": 0.0008606910705566406, "__label__crime_law": 0.0002865791320800781, "__label__education_jobs": 0.002105712890625, "__label__entertainment": 9.119510650634766e-05, "__label__fashion_beauty": 0.0001885890960693359, "__label__finance_business": 0.0005540847778320312, "__label__food_dining": 0.000347137451171875, "__label__games": 0.0003428459167480469, "__label__hardware": 0.00539398193359375, "__label__health": 0.0005898475646972656, "__label__history": 0.0003120899200439453, "__label__home_hobbies": 0.00018513202667236328, "__label__industrial": 0.0013580322265625, "__label__literature": 0.0002199411392211914, "__label__politics": 0.0001958608627319336, "__label__religion": 0.0003986358642578125, "__label__science_tech": 0.08404541015625, "__label__social_life": 0.00010818243026733398, "__label__software": 0.01141357421875, "__label__software_dev": 0.88916015625, "__label__sports_fitness": 0.0002236366271972656, "__label__transportation": 0.0008478164672851562, "__label__travel": 0.00017917156219482422}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25605, 0.02373]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25605, 0.20528]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25605, 0.89461]], "google_gemma-3-12b-it_contains_pii": [[0, 686, false], [686, 2114, null], [2114, 2308, null], [2308, 3551, null], [3551, 7004, null], [7004, 7130, null], [7130, 8328, null], [8328, 9697, null], [9697, 11154, null], [11154, 11855, null], [11855, 13146, null], [13146, 14138, null], [14138, 15555, null], [15555, 15871, null], [15871, 16992, null], [16992, 17974, null], [17974, 19317, null], [19317, 20885, null], [20885, 22108, null], [22108, 23689, null], [23689, 24060, null], [24060, 24812, null], [24812, 25605, null]], "google_gemma-3-12b-it_is_public_document": [[0, 686, true], [686, 2114, null], [2114, 2308, null], [2308, 3551, null], [3551, 7004, null], [7004, 7130, null], [7130, 8328, null], [8328, 9697, null], [9697, 11154, null], [11154, 11855, null], [11855, 13146, null], [13146, 14138, null], [14138, 15555, null], [15555, 15871, null], [15871, 16992, null], [16992, 17974, null], [17974, 19317, null], [19317, 20885, null], [20885, 22108, null], [22108, 23689, null], [23689, 24060, null], [24060, 24812, null], [24812, 25605, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25605, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25605, null]], "pdf_page_numbers": [[0, 686, 1], [686, 2114, 2], [2114, 2308, 3], [2308, 3551, 4], [3551, 7004, 5], [7004, 7130, 6], [7130, 8328, 7], [8328, 9697, 8], [9697, 11154, 9], [11154, 11855, 10], [11855, 13146, 11], [13146, 14138, 12], [14138, 15555, 13], [15555, 15871, 14], [15871, 16992, 15], [16992, 17974, 16], [17974, 19317, 17], [19317, 20885, 18], [20885, 22108, 19], [22108, 23689, 20], [23689, 24060, 21], [24060, 24812, 22], [24812, 25605, 23]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25605, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
713bd4609f6c9df1db1e851e54caa8105601ebfc
|
Today’s Plan
- SQL (Chapter 3, 4)
- Views (4.2)
- Transactions (4.3)
- Integrity Constraints (4.4)
- Triggers (5.3)
- Functions and Procedures (5.2), Recursive Queries (5.4), Authorization (4.6), Ranking (5.5)
- Some Complex SQL Examples
Views
- Provide a mechanism to hide certain data from the view of certain users. To create a view we use the command:
```
create view v as <query expression>
```
where:
- `<query expression>` is any legal expression
The view name is represented by `v`
- Can be used in any place a normal table can be used
- For users, there is no distinction in terms of using it
Example Queries
- A view consisting of branches and their customers
```
create view all-customers as
(select branch-name, customer-name
from depositor, account
where depositor.account-number = account.account-number)
union
(select branch-name, customer-name
from borrower, loan
where borrower.loan-number = loan.loan-number)
```
Find all customers of the Perryridge branch
```
select customer-name
from all-customers
where branch-name = 'Perryridge'
```
Views
- Is it different from DBMS’s side?
- Yes; a view may or may not be *materialized*
- Pros/Cons?
- Updates into views have to be treated differently
- In most cases, disallowed.
## Views vs Tables
<table>
<thead>
<tr>
<th>Creating</th>
<th>Create view V as (select * from A, B where …)</th>
<th>Create table T as (select * from A, B where …)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Can be used</td>
<td>In any select query. Only some update queries.</td>
<td>It’s a new table. You can do what you want.</td>
</tr>
<tr>
<td>Maintained as</td>
<td>1. Evaluate the query and store it on disk as if a table. 2. Don’t store. Substitute in queries when referenced.</td>
<td>It’s a new table. Stored on disk.</td>
</tr>
<tr>
<td>What if a tuple inserted in A?</td>
<td>1. If stored on disk, the stored table is automatically updated to be accurate. 2. If we are just substituting, there is no need to do anything.</td>
<td>T is a separate table; there is no reason why DBMS should keep it updated. If you want that, you must define a trigger.</td>
</tr>
</tbody>
</table>
Views vs Tables
- Views strictly supercede “create a table and define a trigger to keep it updated”
- Two main reasons for using them:
- Security/authorization
- Ease of writing queries
- E.g. IndividualMedals table
- The way we are doing it, the IndividualMedals table is an instance of “creating table”, and not “creating view”
- Creating a view might have been better.
- Perhaps the only reason to create a table is to force the DBMS to choose the option of “materializing”
- That has efficiency advantages in some cases
- Especially if the underlying tables don’t change
Update of a View
- Create a view of all loan data in loan relation, hiding the amount attribute
```
create view branch-loan as
select branch-name, loan-number
from loan
```
- Add a new tuple to branch-loan
```
insert into branch-loan
values ('Perryridge', 'L-307')
```
- This insertion must be represented by the insertion of the tuple
```
('L-307', 'Perryridge', null)
```
into the loan relation
- Updates on more complex views are difficult or impossible to translate, and hence are disallowed.
- Many SQL implementations allow updates only on simple views (without aggregates) defined on a single relation
Today’s Plan
- SQL (Chapter 3, 4)
- Views (4.2)
- Transactions (4.3)
- Integrity Constraints (4.4)
- Triggers (5.3)
- Functions and Procedures (5.2), Recursive Queries (5.4), Authorization (4.6), Ranking (5.5)
- Some Complex SQL Examples
A transaction is a sequence of queries and update statements executed as a single unit
- Transactions are started implicitly and terminated by one of
- commit work: makes all updates of the transaction permanent in the database
- rollback work: undoes all updates performed by the transaction.
Motivating example
- Transfer of money from one account to another involves two steps:
- deduct from one account and credit to another
- If one steps succeeds and the other fails, database is in an inconsistent state
- Therefore, either both steps should succeed or neither should
If any step of a transaction fails, all work done by the transaction can be undone by rollback work.
Rollback of incomplete transactions is done automatically, in case of system failures
In most database systems, each SQL statement that executes successfully is automatically committed.
- Each transaction would then consist of only a single statement
- Automatic commit can usually be turned off, allowing multi-statement transactions, but how to do so depends on the database system
- Another option in SQL:1999: enclose statements within
**begin atomic**
...
**end**
Today’s Plan
- SQL (Chapter 3, 4)
- Views (4.2)
- Transactions (4.3)
- Integrity Constraints (4.4)
- Triggers (5.3)
- Functions and Procedures (5.2), Recursive Queries (5.4), Authorization (4.6), Ranking (5.5)
- Some Complex SQL Examples
A **trigger** is a statement that is executed automatically by the system as a side effect of a modification to the database.
Suppose that instead of allowing negative account balances, the bank deals with overdrafts by
- 1. setting the account balance to zero
- 2. creating a loan in the amount of the overdraft
- 3. giving this loan a loan number identical to the account number of the overdrawn account
create trigger overdraft-trigger after update on account
referencing new row as nrow
for each row
when nrow.balance < 0
begin atomic
actions to be taken
end
create trigger overdraft-trigger after update on account
referencing new row as nrow
for each row
when nrow.balance < 0
begin atomic
insert into borrower
(select customer-name, account-number
from depositor
where nrow.account-number = depositor.account-number);
insert into loan values
(nrow.account-number, nrow.branch-name, nrow.balance);
update account set balance = 0
where account.account-number = nrow.account-number
end
Triggers...
- **External World Actions**
- How does the DB order something if the inventory is low?
- **Syntax**
- Every system has its own syntax
- **Careful with triggers**
- Cascading triggers, Infinite Sequences...
- **More Info/Examples:**
- Google: “create trigger” oracle download-uk
Today’s Plan
SQL (Chapter 3, 4)
- Views (4.2)
- Transactions (4.3)
- Integrity Constraints (4.4)
- Triggers (5.3)
- Functions and Procedures (5.2), Recursive Queries (5.4), Authorization (4.6), Ranking (5.5)
Some Complex SQL Examples
Next:
- Integrity constraints
- ??
- Prevent semantic inconsistencies
IC’s
- Predicates on the database
- Must always be true (checked whenever db gets updated)
- There are the following 4 types of IC’s:
- **Key constraints** (1 table)
- e.g., *2 accts can’t share the same acct_no*
- **Attribute constraints** (1 table)
- e.g., *accts must have nonnegative balance*
- **Referential Integrity constraints** (2 tables)
- E.g. *bnames associated w/ loans must be names of real branches*
- **Global Constraints** (*n* tables)
- E.g., *all loans must be carried by at least 1 customer with a savings acct*
Key Constraints
Idea: specifies that a relation is a set, not a bag
SQL examples:
1. **Primary Key:**
CREATE TABLE branch(
bname CHAR(15) PRIMARY KEY,
bcity CHAR(20),
assets INT);
or
CREATE TABLE depositor(
cnamen CHAR(15),
acct_no CHAR(5),
PRIMARY KEY(cname, acct_no));
2. **Candidate Keys:**
CREATE TABLE customer (
ssn CHAR(9) PRIMARY KEY,
cnamen CHAR(15),
address CHAR(30),
city CHAR(10),
UNIQUE (cname, address, city));
Effect of SQL Key declarations
PRIMARY (A1, A2, .., An) or
UNIQUE (A1, A2, ..., An)
Insertions: check if any tuple has same values for A1, A2, .., An as any inserted tuple. If found, reject insertion
Updates to any of A1, A2, ..., An: treat as insertion of entire tuple
Primary vs Unique (candidate)
1. 1 primary key per table, several unique keys allowed.
2. Only primary key can be referenced by “foreign key” (ref integrity)
3. DBMS may treat primary key differently
(e.g.: create an index on PK)
How would you implement something like this?
Idea:
- Attach constraints to values of attributes
- Enhances types system (e.g.: >= 0 rather than integer)
In SQL:
1. **NOT NULL**
e.g.: CREATE TABLE branch(
bname CHAR(15) NOT NULL,
....
)
Note: declaring bname as primary key also prevents null values
2. **CHECK**
e.g.: CREATE TABLE depositor(
....
balance int NOT NULL,
CHECK( balance >= 0),
....
)
affect insertions, update in affected columns
Attribute Constraints
Domains: can associate constraints with DOMAINS rather than attributes
e.g.: instead of:
```
CREATE TABLE depositor(
....
balance INT NOT NULL,
CHECK (balance >= 0)
)
```
One can write:
```
CREATE DOMAIN bank
balance INT (
CONSTRAINT not-overdrawn CHECK (value >= 0),
CONSTRAINT not-null-value CHECK( value NOT NULL));
```
```
CREATE TABLE depositor (
....
balance bank-
balance,
)
```
Advantages?
Attribute Constraints
Advantage of associating constraints with domains:
1. can avoid repeating specification of same constraint for multiple columns
2. can name constraints
e.g.: CREATE DOMAIN bank-balance INT (
CONSTRAINT not-overdrawn
CHECK (value >= 0),
CONSTRAINT not-null-value
CHECK( value NOT NULL));
allows one to:
1. add or remove:
ALTER DOMAIN bank-balance
ADD CONSTRAINT capped
CHECK( value <= 10000)
2. report better errors (know which constraint violated)
Idea: prevent “dangling tuples” (e.g.: a loan with a bname, Kenmore, when no Kenmore tuple in branch)
Referential Integrity:
ensure that:
foreign key value → primary key value
(note: don’t need to ensure ←, i.e., not all branches have to have loans)
Referential Integrity Constraints
In SQL:
```
CREATE TABLE branch(
bname CHAR(15) PRIMARY KEY
....)
CREATE TABLE loan (
........
FOREIGN KEY bname REFERENCES branch);
```
Affects:
1) Insertions, updates of referencing relation
2) Deletions, updates of referenced relation
Referential Integrity Constraints
What happens when we try to delete this tuple?
Ans: 3 possibilities
1) reject deletion/update
2) set \( t_i[c], t_j[c] = \text{NULL} \)
3) propagate deletion/update
DELETE: delete \( t_i, t_j \)
UPDATE: set \( t_i[c], t_j[c] \) to updated values
Create Table A (.....
FOREIGN KEY c REFERENCES B action
..........)
Action: 1) left blank (deletion/update rejected)
2) ON DELETE SET NULL/ ON UPDATE SET NULL
sets ti[c] = NULL, tj[c] = NULL
3) ON DELETE CASCADE
deletes ti, tj
ON UPDATE CASCADE
sets ti[c], tj[c] to new key values
Global Constraints
Idea: two kinds
1) single relation (constraints spans multiple columns)
- E.g.: CHECK (total = svngs + check) declared in the CREATE TABLE
2) multiple relations: CREATE ASSERTION
SQL examples:
1) single relation: All Bkln branches must have assets > 5M
CREATE TABLE branch (
...........
bcity CHAR(15),
assets INT,
CHECK (NOT(bcity = 'Bkln') OR assets > 5M))
Affects:
insertions into branch
updates of bcity or assets in branch
Global Constraints
SQL example:
2) Multiple relations: every loan has a borrower with a savings account
CHECK (NOT EXISTS (
SELECT *
FROM loan AS L
WHERE NOT EXISTS(
SELECT *
FROM borrower B, depositor D, account A
WHERE B.cname = D.cname AND
D.acct_no = A.acct_no AND
L.lno = B.lno)))
Problem: Where to put this constraint? At depositor? Loan? ....
Ans: None of the above:
CREATE ASSERTION loan-constraint
CHECK( ..... )
Checked with EVERY DB update!
very expensive.....
## Summary: Integrity Constraints
<table>
<thead>
<tr>
<th>Constraint Type</th>
<th>Where declared</th>
<th>Affects...</th>
<th>Expense</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Key Constraints</strong></td>
<td>CREATE TABLE (PRIMARY KEY, UNIQUE)</td>
<td>Insertions, Updates</td>
<td>Moderate</td>
</tr>
<tr>
<td><strong>Attribute Constraints</strong></td>
<td>CREATE TABLE CREATE DOMAIN (Not NULL, CHECK)</td>
<td>Insertions, Updates</td>
<td>Cheap</td>
</tr>
</tbody>
</table>
| **Referential Integrity** | Table Tag (FOREIGN KEY .... REFERENCES ....) | 1. Insertions into referencing rel’n
2. Updates of referencing rel’n of relevant attrs
3. Deletions from referenced rel’n
4. Update of referenced rel’n | 1,2: like key constraints. Another reason to index/sort on the primary keys
3,4: depends on
a. update/delete policy chosen
b. existence of indexes on foreign key |
| **Global Constraints** | Table Tag (CHECK) or outside table (CREATE ASSERTION) | 1. For single rel’n constraint, with insertion, deletion of relevant attrs
2. For assertions w/ every db modification | 1. cheap
2. very expensive |
### Summary: Integrity Constraints
<table>
<thead>
<tr>
<th>Constraint Type</th>
<th>Where declared</th>
<th>Affects...</th>
<th>Expense</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Key Constraints</strong></td>
<td>CREATE TABLE (PRIMARY KEY, UNIQUE)</td>
<td>Insertions, Updates</td>
<td>Moderate</td>
</tr>
<tr>
<td><strong>Attribute Constraints</strong></td>
<td>CREATE TABLE CREATE DOMAIN (Not NULL, CHECK)</td>
<td>Insertions, Updates</td>
<td>Cheap</td>
</tr>
<tr>
<td><strong>Referential Integrity</strong></td>
<td>Table Tag (FOREIGN KEY .... REFERENCES ....)</td>
<td>1. Insertions into referencing rel'n</td>
<td>1,2: like key constraints. Another reason to index/sort on the primary keys</td>
</tr>
<tr>
<td></td>
<td></td>
<td>2. Updates of referencing rel'n of relevant attrs</td>
<td>3,4: depends on</td>
</tr>
<tr>
<td></td>
<td></td>
<td>3. Deletions from referenced rel'n</td>
<td>a. update/delete policy chosen</td>
</tr>
<tr>
<td></td>
<td></td>
<td>4. Update of referenced rel'n</td>
<td>b. existence of indexes on foreign key</td>
</tr>
<tr>
<td><strong>Global Constraints</strong></td>
<td>Table Tag (CHECK) or outside table (CREATE ASSERTION)</td>
<td>1. For single rel'n constraint, with insertion, deletion of relevant attrs</td>
<td>1. cheap</td>
</tr>
<tr>
<td></td>
<td></td>
<td>2. For assertions w/ every db modification</td>
<td>2. very expensive</td>
</tr>
</tbody>
</table>
Today’s Plan
- SQL (Chapter 3, 4)
- Views (4.2)
- Transactions (4.3)
- Integrity Constraints (4.4)
- Triggers (5.3)
- Functions and Procedures (5.2), Recursive Queries (5.4), Authorization (4.6), Ranking (5.5)
- Some Complex SQL Examples
SQL Functions
- Function to count number of instructors in a department
```sql
create function dept_count (dept_name varchar(20))
returns integer
begin
declare d_count integer;
select count(*) into d_count
from instructor
where instructor.dept_name = dept_name
return d_count;
end
```
- Can use in queries
```sql
select dept_name, budget
from department
where dept_count (dept_name) > 12
```
SQL Procedures
- Same function as a procedure
```sql
create procedure dept_count_proc (in dept_name varchar(20),
out d_count integer)
begin
select count(*) into d_count
from instructor
where instructor.dept_name = dept_count_proc.dept_name
end
```
- But use differently:
```sql
declare d_count integer;
call dept_count_proc('Physics', d_count);
```
- HOWEVER: Syntax can be wildly different across different systems
- Was put in place by DBMS systems before standardization
- Hard to change once customers are already using it
Recursion in SQL
- Example: find which courses are a prerequisite, whether directly or indirectly, for a specific course
```sql
WITH recursive rec_prereq(course_id, prereq_id) AS (
SELECT course_id, prereq_id
FROM prereq
UNION
SELECT rec_prereq.course_id, prereq.prereq_id,
FROM rec_prereq, prereq
WHERE rec_prereq.prereq_id = prereq.course_id
)
SELECT *
FROM rec_prereq;
```
Makes SQL Turing Complete (i.e., you can write any program in SQL)
But: Just because you can, doesn’t mean you should
Ranking
- Ranking is done in conjunction with an order by specification.
- Consider: \(\text{student\_grades}(ID, \text{GPA})\)
- Find the rank of each student.
```sql
SELECT ID, rank() OVER (ORDER BY GPA DESC) AS s_rank
FROM student_grades
ORDER BY s_rank
```
- Equivalent to:
```sql
SELECT ID, (1 + (SELECT COUNT(*)
FROM student_grades B
WHERE B.GPA > A.GPA)) AS s_rank
FROM student_grades A
ORDER BY s_rank;
```
Authorization/Security
- GRANT and REVOKE keywords
- `grant select on instructor to U_1, U_2, U_3`
- `revoke select on branch from U_1, U_2, U_3`
- Can provide select, insert, update, delete privileges
- Can also create “Roles” and do security at the level of roles
- Some databases support doing this at the level of individual “tuples”
- PostgreSQL: [https://www.postgresql.org/docs/10/ddl-rowsecurity.html](https://www.postgresql.org/docs/10/ddl-rowsecurity.html)
Today’s Plan
- SQL (Chapter 3, 4)
- Views (4.2)
- Transactions (4.3)
- Integrity Constraints (4.4)
- Triggers (5.3)
- Functions and Procedures (5.2), Recursive Queries (5.4), Authorization (4.6), Ranking (5.5)
- Some Complex SQL Examples
Fun with SQL
- [https://blog.jooq.org/2016/04/25/10-sql-tricks-that-you-didnt-think-were-possible/](https://blog.jooq.org/2016/04/25/10-sql-tricks-that-you-didnt-think-were-possible/)
- Long slide-deck linked off of this page
- Complex SQL queries showing how to do things like: do Mandelbrot, solve subset sum problem etc.
- The MADlib Analytics Library or MAD Skills, the SQL; [https://arxiv.org/abs/1208.4165](https://arxiv.org/abs/1208.4165)
1. Everything is a Table
```sql
1 SELECT *
2 FROM (SELECT *
3 FROM person
4 ) t
```
Everything is a table. In PostgreSQL, even functions are tables:
```sql
1 SELECT *
2 FROM substring('abcde', 2, 3)
```
2. Recursion can be very powerful
WITH RECURSIVE t(v) AS (
SELECT 1 -- Seed Row
UNION ALL
SELECT v + 1 -- Recursion
FROM t
)
SELECT v
FROM t
LIMIT 5
It yields
```
| v |
---|
| 1 |
| 2 |
| 3 |
| 4 |
| 5 |
```
### 3. Window Functions
```sql
SELECT depname, empno, salary, avg(salary) OVER (PARTITION BY depname) FROM empsalary;
```
<table>
<thead>
<tr>
<th>depname</th>
<th>empno</th>
<th>salary</th>
<th>avg</th>
</tr>
</thead>
<tbody>
<tr>
<td>develop</td>
<td>11</td>
<td>5200</td>
<td>5020.0000000000000000000</td>
</tr>
<tr>
<td>develop</td>
<td>7</td>
<td>4200</td>
<td>5020.0000000000000000000</td>
</tr>
<tr>
<td>develop</td>
<td>9</td>
<td>4500</td>
<td>5020.0000000000000000000</td>
</tr>
<tr>
<td>develop</td>
<td>8</td>
<td>6000</td>
<td>5020.0000000000000000000</td>
</tr>
<tr>
<td>develop</td>
<td>10</td>
<td>5200</td>
<td>5020.0000000000000000000</td>
</tr>
<tr>
<td>personnel</td>
<td>5</td>
<td>3500</td>
<td>3700.0000000000000000000</td>
</tr>
<tr>
<td>personnel</td>
<td>2</td>
<td>3900</td>
<td>3700.0000000000000000000</td>
</tr>
<tr>
<td>sales</td>
<td>3</td>
<td>4800</td>
<td>4866.666666666666666667</td>
</tr>
<tr>
<td>sales</td>
<td>1</td>
<td>5000</td>
<td>4866.666666666666666667</td>
</tr>
<tr>
<td>sales</td>
<td>4</td>
<td>4800</td>
<td>4866.666666666666666667</td>
</tr>
</tbody>
</table>
(10 rows)
4. Correlation Coefficient
```sql
SET ARITHABORT ON;
DECLARE @OurData TABLE
(
x NUMERIC(18,6) NOT NULL,
y NUMERIC(18,6) NOT NULL
);
INSERT INTO @OurData
(x, y)
SELECT
x, y
FROM (VALUES
(1,32), (1,23), (3,50), (11,37), (-2,39), (10,44), (27,32), (25,16), (20,23),
(4,5), (30,41), (28,2), (31,52), (29,12), (50,40), (43,18), (10,65), (44,26),
(35,15), (24,37), (52,66), (59,46), (64,95), (79,36), (24,66), (69,58), (88,56),
(61,21), (100,60), (62,54), (10,14), (22,40), (52,97), (81,26), (37,58), (93,71),
(64,82), (24,33), (112,49), (64,90), (53,90), (132,61), (104,35), (60,52),
(29,50), (85,116), (95,104), (131,37), (139,38), (8,124)
) f(x,y)
SELECT
((Sy * Sxx) - (Sx * Sxy))
/ ((N * (Sxx)) - (Sx * Sx)) AS a,
((N * Sxy) - (Sx * Sy))
/ ((N * Sxx) - (Sx * Sx)) AS b,
((N * Sxy) - (Sx * Sy))
/ SQRT((
((N * Sxx) - (Sx * Sx))
* ((N * Syy) - (Sy * Sy)))) AS r
FROM
(SELECT SUM([@OurData].x) AS Sx, SUM([@OurData].y) AS Sy,
SUM([@OurData].x * [@OurData].x) AS Sxx,
SUM([@OurData].x * [@OurData].y) AS Sxy,
SUM([@OurData].y * [@OurData].y) AS Syy,
COUNT(*) AS N
FROM @OurData
) sums;
```
5. Page Rank
- Recursive algorithm to assign weights to the nodes of a graph (Web Link Graph)
- Weight for a node depends on the weights of the nodes that point to it
- Typically done in iterations till “convergence”
- Not obvious that you can do it in SQL, but:
- Each iteration is just a LEFT OUTERJOIN
- Stopping condition is trickier
- Other ways to do it as well
https://devnambi.com/2013/pagerank.html
declare @DampingFactor decimal(3,2) = 0.85 --set the damping factor
, @MarginOfError decimal(10,5) = 0.001 --set the stable weight
, @TotalNodeCount int
, @IterationCount int = 1
-- we need to know the total number of nodes in the system
set @TotalNodeCount = (select count(*) from Nodes)
-- iterate!
WHILE EXISTS ( select *
FROM dbo.Notes
WHERE HasConverged = 0
)
BEGIN
UPDATE n SET
NodeWeight = 1.0 - @DampingFactor + isnull(x.TransferWeight, 0.0)
-- a node has converged when its existing weight is the same as the weight it would be given
-- (plus or minus the stable weight margin of error)
, HasConverged = case when abs(n.NodeWeight - (1.0 - @DampingFactor + isnull(x.TransferWeight, 0.0))) < @MarginOfError then 1
else 0 end
FROM Nodes n
LEFT OUTER JOIN
(
-- Here's the weight calculation in place
SELECT
e.TargetNodeId
, TransferWeight = sum(n.NodeWeight / n.NodeCount) * @DampingFactor
FROM Nodes n
INNER JOIN Edges e
ON n.NodeId = e.SourceNodeId
GROUP BY e.TargetNodeId
) as x
ON x.TargetNodeId = n.NodeId
-- for demonstration purposes, return the value of the nodes after each iteration
SELECT @IterationCount as IterationCount
, *
FROM Nodes
set @IterationCount += 1
END
|
{"Source-Url": "https://cdn.inst-fs-iad-prod.inscloudgate.net/96c9257d-92a5-4425-8229-405daf573187/lecture-feb-12.pdf?token=eyJhbGciOiJIUzUxMiIsInR5cCI6IkpXVCIsImtpZCI6ImNkbiJ9.eyJyZXNvdXJjZSI6Ii85NmM5MjU3ZC05MmE1LTQ0MjUtODIyOS00MDVkYWY1NzMxODcvbGVjdHVyZS1mZWItMTIucGRmIiwidGVuYW50IjoiY2FudmFzIiwidXNlcl9pZCI6bnVsbCwiaWF0IjoxNjEwMzA5MjgyLCJleHAiOjE2MTAzOTU2ODJ9.-bq0Gg164IL40-DI34rb3rqNRrVfK0iSFVCEYn9ES8SadAE6FU8iQyGXCa9kKXZhGx3GgG8qHje0FC6mNN08XA&download=1&content_type=application%2Fpdf", "len_cl100k_base": 6445, "olmocr-version": "0.1.50", "pdf-total-pages": 46, "total-fallback-pages": 0, "total-input-tokens": 67597, "total-output-tokens": 8456, "length": "2e12", "weborganizer": {"__label__adult": 0.0002363920211791992, "__label__art_design": 0.00021696090698242188, "__label__crime_law": 0.00020647048950195312, "__label__education_jobs": 0.0015316009521484375, "__label__entertainment": 4.941225051879883e-05, "__label__fashion_beauty": 8.988380432128906e-05, "__label__finance_business": 0.0003845691680908203, "__label__food_dining": 0.0003139972686767578, "__label__games": 0.0003893375396728515, "__label__hardware": 0.0005507469177246094, "__label__health": 0.0003037452697753906, "__label__history": 0.0001729726791381836, "__label__home_hobbies": 9.644031524658204e-05, "__label__industrial": 0.00034499168395996094, "__label__literature": 0.00014102458953857422, "__label__politics": 0.00014126300811767578, "__label__religion": 0.0002772808074951172, "__label__science_tech": 0.0125885009765625, "__label__social_life": 6.008148193359375e-05, "__label__software": 0.023162841796875, "__label__software_dev": 0.9580078125, "__label__sports_fitness": 0.00017452239990234375, "__label__transportation": 0.0002486705780029297, "__label__travel": 0.0001512765884399414}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 22607, 0.03042]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 22607, 0.52172]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 22607, 0.74535]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 250, false], [250, 622, null], [622, 1115, null], [1115, 1306, null], [1306, 2129, null], [2129, 2727, null], [2727, 3364, null], [3364, 3614, null], [3614, 4385, null], [4385, 4784, null], [4784, 5034, null], [5034, 5442, null], [5442, 5627, null], [5627, 6080, null], [6080, 6492, null], [6492, 6728, null], [6728, 6799, null], [6799, 7359, null], [7359, 7886, null], [7886, 8456, null], [8456, 8927, null], [8927, 9399, null], [9399, 9935, null], [9935, 10187, null], [10187, 10481, null], [10481, 10772, null], [10772, 11056, null], [11056, 11553, null], [11553, 12068, null], [12068, 13067, null], [13067, 14750, null], [14750, 15000, null], [15000, 15436, null], [15436, 16035, null], [16035, 16545, null], [16545, 17020, null], [17020, 17735, null], [17735, 17985, null], [17985, 18613, null], [18613, 18830, null], [18830, 19066, null], [19066, 19828, null], [19828, 20993, null], [20993, 21407, null], [21407, 22607, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 250, true], [250, 622, null], [622, 1115, null], [1115, 1306, null], [1306, 2129, null], [2129, 2727, null], [2727, 3364, null], [3364, 3614, null], [3614, 4385, null], [4385, 4784, null], [4784, 5034, null], [5034, 5442, null], [5442, 5627, null], [5627, 6080, null], [6080, 6492, null], [6492, 6728, null], [6728, 6799, null], [6799, 7359, null], [7359, 7886, null], [7886, 8456, null], [8456, 8927, null], [8927, 9399, null], [9399, 9935, null], [9935, 10187, null], [10187, 10481, null], [10481, 10772, null], [10772, 11056, null], [11056, 11553, null], [11553, 12068, null], [12068, 13067, null], [13067, 14750, null], [14750, 15000, null], [15000, 15436, null], [15436, 16035, null], [16035, 16545, null], [16545, 17020, null], [17020, 17735, null], [17735, 17985, null], [17985, 18613, null], [18613, 18830, null], [18830, 19066, null], [19066, 19828, null], [19828, 20993, null], [20993, 21407, null], [21407, 22607, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 22607, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 22607, null]], "pdf_page_numbers": [[0, 0, 1], [0, 250, 2], [250, 622, 3], [622, 1115, 4], [1115, 1306, 5], [1306, 2129, 6], [2129, 2727, 7], [2727, 3364, 8], [3364, 3614, 9], [3614, 4385, 10], [4385, 4784, 11], [4784, 5034, 12], [5034, 5442, 13], [5442, 5627, 14], [5627, 6080, 15], [6080, 6492, 16], [6492, 6728, 17], [6728, 6799, 18], [6799, 7359, 19], [7359, 7886, 20], [7886, 8456, 21], [8456, 8927, 22], [8927, 9399, 23], [9399, 9935, 24], [9935, 10187, 25], [10187, 10481, 26], [10481, 10772, 27], [10772, 11056, 28], [11056, 11553, 29], [11553, 12068, 30], [12068, 13067, 31], [13067, 14750, 32], [14750, 15000, 33], [15000, 15436, 34], [15436, 16035, 35], [16035, 16545, 36], [16545, 17020, 37], [17020, 17735, 38], [17735, 17985, 39], [17985, 18613, 40], [18613, 18830, 41], [18830, 19066, 42], [19066, 19828, 43], [19828, 20993, 44], [20993, 21407, 45], [21407, 22607, 46]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 22607, 0.06261]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
1d2c5d4454a40cf89ff997f332dc03c8df2d2f34
|
Princeton University
Computer Science 217: Introduction to Programming Systems
Goals of this Lecture
Help you learn about:
- Locality and caching
- Typical storage hierarchy
- Virtual memory
- How the hardware and OS give application programs the illusion of a large, contiguous, private address space
Virtual memory is one of the most important concepts in system programming.
Agenda
Locality and caching
- Typical storage hierarchy
- Virtual memory
Storage Device speed vs. size
Facts:
- CPU needs subnanosecond access to memory (else it can’t run instructions fast enough)
- Fast memories (subnanosecond) are small (1000 bytes),
- Big memories (gigabytes) are slow (60 nanoseconds)
- Huge memories (terabytes) are very slow (milliseconds)
Goal:
- Need many gigabytes of memory,
- but with fast (subnanosecond) average access time
Solution: locality allows caching
- Most programs exhibit good locality
- A program that exhibits good locality will benefit from proper caching
Locality
Two kinds of locality
- Temporal locality
- If a program references item X now, it probably will reference X again soon
- Spatial locality
- If a program references item X now, it probably will reference item at address X±1 soon
Most programs exhibit good temporal and spatial locality
Locality Example
Locality example
```
sum = 0;
for (i = 0; i < n; i++)
sum += a[i];
```
Typical code (good locality)
- Temporal locality
- Data: Whenever the CPU accesses `sum`, it accesses `sum` again shortly thereafter
- Instructions: Whenever the CPU executes `sum += a[i]`, it executes `sum += a[i]` again shortly thereafter
- Spatial locality
- Data: Whenever the CPU accesses `a[i]`, it accesses `a[i+1]` shortly thereafter
- Instructions: Whenever the CPU executes `sum += a[i]`, it executes `i++` shortly thereafter
Caching
**Cache**
- Fast access, small capacity storage device
- Acts as a staging area for a subset of the items in a slow access, large capacity storage device
**Good locality + proper caching**
- Most storage accesses can be satisfied by cache
- Overall storage performance improved
**Caching in a Storage Hierarchy**
- Smaller, faster device at level k caches a subset of the blocks from level k+1
- Larger, slower device at level k+1 is partitioned into blocks
- Blocks copied between levels
**Cache Hits and Misses**
- **Cache hit**
- E.g., request for block 10
- Access block 10 at level k
- Fast!
- **Cache miss**
- E.g., request for block 8
- Evict some block from level k to level k+1
- Load block 8 from level k+1 to level k
- Access block 8 at level k
- Slow!
**Caching goal:**
- Maximize cache hits
- Minimize cache misses
**Cache Eviction Policies**
- **Best eviction policy:** "clairvoyant" policy
- Always evict a block that is never accessed again, or...
- Always evict the block accessed the furthest in the future
- Impossible in the general case
- **Worst eviction policy**
- Always evict the block that will be accessed next!
- Causes thrashing
- Impossible in the general case!
**Reasonable eviction policy: LRU policy**
- Evict the "least recently used" (LRU) block
- With the assumption that it will not be used again (soon)
- Good for straight-line code
- (can be) bad for loops
- Expensive to implement
- Often simpler approximations are used
- See Wikipedia "Page replacement algorithm" topic
**Locality/Caching Example: Matrix Mult**
- Matrix multiplication
- Matrix = two-dimensional array
- Multiply n-by-n matrices A and B
- Store product in matrix C
**Performance depends upon**
- Effective use of caching (as implemented by system)
- Good locality (as implemented by you)
Two-dimensional arrays are stored in either row-major or column-major order.
C uses row-major order:
- Access in row-major order ⇒ good spatial locality
- Access in column-major order ⇒ poor spatial locality
```
for (i=0; i<n; i++)
for (j=0; j<n; j++)
for (k=0; k<n; k++)
c[i][j] += a[i][k] * b[k][j];
```
Reasonable cache effects:
- Good locality for A
- Good locality for B
- Good locality for C
```
for (i=0; i<n; i++)
for (k=0; k<n; k++)
for (j=0; j<n; j++)
c[i][j] += a[i][k] * b[k][j];
```
Poor cache effects:
- Bad locality for A
- Bad locality for B
- Bad locality for C
```
for (j=0; j<n; j++)
for (k=0; k<n; k++)
for (i=0; i<n; i++)
c[i][j] += a[i][k] * b[k][j];
```
Good cache effects:
- Good locality for A
- Good locality for B
- Good locality for C
```
for (i=0; i<n; i++)
for (k=0; k<n; k++)
for (j=0; j<n; j++)
c[i][j] += a[i][k] * b[k][j];
```
**Agenda**
- Locality and caching
- Typical storage hierarchy
- Virtual memory
**Typical Storage Hierarchy**
- CPU registers hold words retrieved from L1/L2/L3 cache
- L1/L2/L3 cache holds cache lines retrieved from main memory
- Main memory holds disk blocks retrieved from local disks
- Local disks hold files retrieved from disks on remote network servers
- Larger slower storage devices
- Local secondary storage (local disks, SSDs)
- Remote secondary storage (distributed file systems, Web servers)
- Virtual memory
- Typical storage hierarchy
- Locality and caching
Typical Storage Hierarchy
Registers
- **Latency:** 0 cycles
- **Capacity:** 8-256 registers
- 8 general purpose registers in IA-32;
- 32 in typical RISC machine (ARM, MIPS, RISC-V)
L1/L2/L3 Cache
- **Latency:** 1 to 30 cycles
- **Capacity:** 32KB to 32MB
Main memory (RAM)
- **Latency:** ~100 cycles
- 100 times slower than registers
- **Capacity:** 256MB to 64GB
Local secondary storage: disk drives
- **Latency:** ~100,000 cycles
- 1000 times slower than main mem
- Limited by nature of disk
- Must move heads and wait for data to rotate under heads
- Faster when accessing many bytes in a row
- **Capacity:** 1GB to 256TB
Remote secondary storage
- **Latency:** ~10,000,000 cycles
- 100 times slower than disk
- Limited by network bandwidth
- **Capacity:** essentially unlimited
Aside: Persistence
- **Do data persist in the absence of power?**
- Lower levels of storage hierarchy store data persistently
- Remote secondary storage
- Local secondary storage
- Higher levels of storage hierarchy **do not** store data persistently
- Main memory (RAM)
- L1/L2/L3 cache
- Registers
Aside: Persistence
Admirable goal: Move persistence upward in hierarchy
Solid state (flash) drives
- Use solid state technology (as does main memory)
- Persistent, as is disk
- Viable replacement for disk as local secondary storage
Storage Hierarchy & Caching Issues
Issue: Block size?
- Slow data transfer between levels k and k+1
- ⇒ use large block sizes at level k (do data transfer less often)
- Fast data transfer between levels k and k+1
- ⇒ use small block sizes at level k (reduce risk of cache miss)
- Lower in pyramid ⇒ slower data transfer ⇒ larger block sizes
<table>
<thead>
<tr>
<th>Device</th>
<th>Block Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>Register</td>
<td>8 bytes</td>
</tr>
<tr>
<td>L1/L2/L3 cache line</td>
<td>64 bytes</td>
</tr>
<tr>
<td>Main memory page</td>
<td>4KB (4096 bytes)</td>
</tr>
<tr>
<td>Disk block</td>
<td>4KB (4096 bytes)</td>
</tr>
<tr>
<td>Disk transfer block</td>
<td>4KB (4096 bytes) to 64MB (67108864 bytes)</td>
</tr>
</tbody>
</table>
Device Managed by:
- Registers (cache of L1/L2/L3 cache and main memory)
- Compiler, using complex code-analysis techniques
- Assembly lang programmer
- L1/L2/L3 cache (cache of main memory)
- Hardware, using simple algorithms
- Main memory (cache of local sec storage)
- Hardware and OS, using virtual memory with complex algorithms (since accessing disk is expensive)
- Local secondary storage (cache of remote sec storage)
- End user, by deciding which files to download
Agenda
Locality and caching
典型存储层次结构
虚拟内存
Main Memory: Illusion
Each process sees main memory as
- Huge: $2^{64} = 16$ EB (16 exabytes) of memory
- Uniform: contiguous memory locations from 0 to $2^{64}-1$
Main Memory: Reality
Memory is divided into pages
- At any time some pages are in physical memory, some on disk
- OS and hardware swap pages between physical memory and disk
- Multiple processes share physical memory
Virtual & Physical Addresses
Question
• How do OS and hardware implement virtual memory?
Answer (part 1)
• Distinguish between virtual addresses and physical addresses
Virtual address
Identifies a location in a particular process’s virtual memory
• Independent of size of physical memory
• Independent of other concurrent processes
• Consists of virtual page number & offset
• Used by application programs
Physical address
Identifies a location in physical memory
• Consists of physical page number & offset
• Known only to OS and hardware
Note:
• Offset is same in virtual addr and corresponding physical addr
CourseLab Virtual & Physical Addresses
<table>
<thead>
<tr>
<th>Virtual Addr</th>
<th>Virtual Page Num</th>
<th>Offset</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>52 bits</td>
<td>12 bits</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Physical Addr</th>
<th>Physical Page Num</th>
<th>Offset</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
On CourseLab:
• Each offset is 12 bits
• Each page consists of 2^12 bytes
• Each virtual page number consists of 52 bits
• There are 2^52 virtual pages
• Each virtual address consists of 64 bits
• There are 2^32 bytes of virtual memory (per process)
Page Tables
Question
• How do OS and hardware implement virtual memory?
Answer (part 2)
• Maintain a page table for each process
Page Table for Process 1234
<table>
<thead>
<tr>
<th>Virtual Page Num</th>
<th>Physical Page Num or Disk Addr</th>
</tr>
</thead>
<tbody>
<tr>
<td>0</td>
<td>Physical page 5</td>
</tr>
<tr>
<td>1</td>
<td>(unmapped)</td>
</tr>
<tr>
<td>2</td>
<td>Spot X on disk</td>
</tr>
<tr>
<td>3</td>
<td>Physical page 8</td>
</tr>
<tr>
<td>...</td>
<td>...</td>
</tr>
</tbody>
</table>
Virtual Memory Example 1
Process 1234 accesses mem at virtual addr 16386
16386 = 000000000010B = Virtual page num = 4; offset = 2
Process 1234 Page Table: 0 VP 3, 1 VP 4, 2 VP 0, 3 VP 6
Physical Mem: 0 VP 3, 1 VP 4, 2 VP 0, 3 VP 6
Hardware consults page table
Hardware notes that virtual page 4 maps to phys page 1
Page hit!
Hardware forms physical addr
Physical page num = 1; offset = 2
= 000000010000000000000100B = 4098
Hardware fetches/stores data from/to phys addr 4098
Virtual Memory Example 2
Process 1234 accesses mem at virtual addr 8200
8200 = 00001000B = Virtual page num = 2; offset = 8
Process 1234 Page Table: 0 VP 3, 1 VP 4, 2 VP 0, 3 VP 6
Physical Mem: 0 VP 3, 1 VP 4, 2 VP 0, 3 VP 6
Hardware consults page table
Hardware notes that virtual page 4 maps to phys page 1
Page hit!
Hardware consults page table
Hardware notes that virtual page 4 maps to phys page 1
Page hit!
Virtual Memory Example 2 (cont.)
Process 1234 accesses mem at virtual addr 8200
8200 = 00001000B = Virtual page num = 2; offset = 8
OS gains control of CPU
OS swaps virtual pages 6 and 2
This takes a long while (disk latency), run another process for the time being; then eventually...
OS updates page table accordingly
Control returns to process 1234
Process 1234 re-executes same instruction
Virtual Memory Example 2 (cont.)
Process 1234 accesses mem at virtual addr 8200
8200 = \text{100000001000}_B\text{ = Virtual page num = 2; offset = 8}
Hardware consults page table
Hardware notes that virtual page 2 maps to phys page 3
Page hit!
Virtual Memory Example 3
Process 1234 accesses mem at virtual addr 4105
4105 = \text{100000001001}_B\text{ = Virtual page num = 1; offset = 9}
Hardware consults page table
Hardware notes that virtual page 1 is unmapped
Page miss!
Hardware generates segmentation fault
(See Signals lecture for remainder!)
Storing Page Tables
Question
- Where are the page tables themselves stored?
Answer
- In main memory
Question
- What happens if a page table is swapped out to disk??!!!
Answer
- OS is responsible for swapping
- Special logic in OS "pins" page tables to physical memory
- So they never are swapped out to disk
Virtual Memory Example 3 (cont.)
Storing Page Tables (cont.)
Question
- Doesn’t that mean that each logical memory access requires two physical memory accesses – one to access the page table, and one to access the desired datum?
Answer
- Yes!
Question
- Isn’t that inefficient?
Answer
- Not really...
Translation Lookaside Buffer
Translation lookaside buffer (TLB)
- Small cache on CPU
- Each TLB entry consists of a page table entry
- Hardware first consults TLB
- Hit ⇒ no need to consult page table in L1/L2/L3 cache or memory
- Miss ⇒ swap relevant entry from page table in L1/L2/L3 cache or memory into TLB; try again
- See Bryant & O’Hallaron book for details
Caching again!!!
Additional Benefits of Virtual Memory
Memory protection among processes
- Process’s page table references only physical memory pages that the process currently owns
- Impossible for one process to accidentally/maliciously affect physical memory used by another process
Memory protection within processes
- Permission bits in page-table entries indicate whether page is read-only, etc.
- Allows CPU to prohibit
- Writing to RODATA & TEXT sections
- Access to protected (OS owned) virtual memory
Virtual memory concept facilitates/enables many other OS features; examples…
Context switching (as described last lecture)
- Illusion: To context switch from process X to process Y, OS must save contents of registers and memory for process X, restore contents of registers and memory for process Y
- Reality: To context switch from process X to process Y, OS must save contents of registers and virtual memory for process X, restore contents of registers and virtual memory for process Y
- Implementation: To context switch from process X to process Y, OS must save contents of registers and page table for process X, restore contents of registers and page table for process Y
Linking
- Same memory layout for each process
- E.g., TEXT section always starts at virtual addr 0x08048000
- E.g., STACK always grows from virtual addr 0x0bfffffff to lower addresses
- Linker is independent of physical location of code
Code and data sharing
- User processes can share some code and data
- E.g., single physical copy of stdio library code (e.g. printf)
- Mapped into the virtual address space of each process
Additional Benefits of Virtual Memory
Dynamic memory allocation
- User processes can request additional memory from the heap
- E.g., using `malloc()` to allocate, and `free()` to deallocate
- OS allocates contiguous virtual memory pages...
- ... and scatters them anywhere in physical memory
Creating new processes
- Easy for “parent” process to “fork” a new “child” process
- Initially: make new PCB containing copy of parent page table
- Incrementally: change child page table entries as required
- See Process Management lecture for details
- `fork()` system-level function
Overwriting one program with another
- Easy for a process to replace its program with another program
- Initially: set page table entries to point to program pages that already exist on disk!
- Incrementally: swap pages into memory as required
- See Process Management lecture for details
- `execvp()` system-level function
Measuring Memory Usage
On CourseLab computers:
```bash
$ ps l
F UID PID PPID PRI NI VSZ RSS WCHAN STAT TTY TIME COMMAND
0 42579 9655 9696 30 10 167568 13840 signal TN pts/1 0:00 emacs
0 42579 9696 9695 30 10 24028 2072 wait SNs pts/1 0:00 -bash
0 42579 9725 9696 30 10 11268 956 RN+ pts/1 0:00 ps l
```
VSZ (virtual memory size): virtual memory usage
RSS (resident set size): physical memory usage
(both measured in kilobytes)
Summary
Locality and caching
- Spatial & temporal locality
- Good locality ⇒ caching is effective
Typical storage hierarchy
- Registers, L1/L2/L3 cache, main memory, local secondary storage (esp. disk), remote secondary storage
Virtual memory
- Illusion vs. reality
- Implementation
- Virtual addresses, page tables, translation lookaside buffer (TLB)
- Additional benefits (many!)
Virtual memory concept permeates the design of operating systems and computer hardware
|
{"Source-Url": "https://www.cs.princeton.edu/courses/archive/fall17/cos217/lectures/19_MemoryHierarchy-6up.pdf", "len_cl100k_base": 4359, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 30614, "total-output-tokens": 4963, "length": "2e12", "weborganizer": {"__label__adult": 0.0004477500915527344, "__label__art_design": 0.0005822181701660156, "__label__crime_law": 0.0004048347473144531, "__label__education_jobs": 0.006443023681640625, "__label__entertainment": 0.00011259317398071288, "__label__fashion_beauty": 0.00023877620697021484, "__label__finance_business": 0.0003879070281982422, "__label__food_dining": 0.0004427433013916016, "__label__games": 0.0008373260498046875, "__label__hardware": 0.0103759765625, "__label__health": 0.0007824897766113281, "__label__history": 0.0004150867462158203, "__label__home_hobbies": 0.00026607513427734375, "__label__industrial": 0.000973224639892578, "__label__literature": 0.0003960132598876953, "__label__politics": 0.0002543926239013672, "__label__religion": 0.00054931640625, "__label__science_tech": 0.1942138671875, "__label__social_life": 0.00014340877532958984, "__label__software": 0.01477813720703125, "__label__software_dev": 0.76513671875, "__label__sports_fitness": 0.0003690719604492187, "__label__transportation": 0.0010919570922851562, "__label__travel": 0.00024700164794921875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 16397, 0.02771]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 16397, 0.53221]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 16397, 0.78213]], "google_gemma-3-12b-it_contains_pii": [[0, 1832, false], [1832, 3680, null], [3680, 5179, null], [5179, 6299, null], [6299, 8311, null], [8311, 10037, null], [10037, 11333, null], [11333, 12235, null], [12235, 14506, null], [14506, 16397, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1832, true], [1832, 3680, null], [3680, 5179, null], [5179, 6299, null], [6299, 8311, null], [8311, 10037, null], [10037, 11333, null], [11333, 12235, null], [12235, 14506, null], [14506, 16397, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 16397, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 16397, null]], "pdf_page_numbers": [[0, 1832, 1], [1832, 3680, 2], [3680, 5179, 3], [5179, 6299, 4], [6299, 8311, 5], [8311, 10037, 6], [10037, 11333, 7], [11333, 12235, 8], [12235, 14506, 9], [14506, 16397, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 16397, 0.04843]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
ca4636620c3a37ae963909d961b5e1718f235193
|
The Struts Application Framework
Introduction........................................................................................................................................... 2
What is Struts? ..................................................................................................................................... 2
An Extension...................................................................................................................................... 2
A Framework ...................................................................................................................................... 2
A Collection of Tags .......................................................................................................................... 5
Where is Struts going? .......................................................................................................................... 7
Getting started ...................................................................................................................................... 7
Introduction
Java Server Pages and Java Servlets are two exiting technologies that enable developers to bring the power and flexibility of the Java platform to the web. By using JSP and Servlets, developers are able to offer exciting and complex applications to their users without relying on them to provide anything more than a web browser. As well, Servlets are becoming an important component of the developing Java SOAP web services model.
Developing web applications in any language can be difficult, and Java is no exception. However, the Struts Framework can help. By providing basic support for many common elements of a web application, it empowers developers to create more reliable applications in a more timely fashion than ever before. This includes support for everything from server- and client-side input validation to database connection pooling, internationalization and the creation of dynamic page templates. As well, it helps separate page content from page layout, so that graphic artists and designers can work to develop attractive pages, while developers work to create the business logic that drives the application, without interfering with each other.
Struts itself is best explained as three different things: An extension to the standard Java Server Context; A framework within which to develop web applications; and, a set of tag libraries for use with Java Server Pages. The rest of this report will serve to describe Struts from each of these different perspectives.
What is Struts?
An Extension
The Struts Framework is an extension of the standard Java Server Context (JSC). Struts is not meant to be used outside the context of a Java Server such as Tomcat, WebSphere, or iPlanet. The Struts package itself consists of only one small .jar archive and an xml configuration file, as well as a number of optional tag library descriptors with are meant to be deployed in your applications /WEB-INF/lib and /WEB-INF directories, respectively. Other features such as support for database pooling may require that the JDBC drivers for your chosen database, or other software, be installed.
As well, JSP’s and Servlets may exist within this context without using Struts. This means that you may choose to use Struts when deploying part of your application, but provide an in-house or third-party solution for the rest. It’s up to you, and it’s always easy to transition your code to use Struts at a later date.
A Framework
Now that it’s clear what Struts is in relation to the JSE, you may be curious as to what exactly it does? The simple answer is that it provides a framework which developers and designers may use to aid them when crafting their web applications.
So how does it do that? Well, first of it starts by providing the developers with the components of a Model / Controller / View architecture. These components encourage the creation of a three tier application where the data and the actions available upon that data (the Model) are separated from the input and output presented to the user (the View) by an object or set of objects that broker the interaction between the two (the Controller).
The Model is committed to maintaining the integrity of its data and therefore will not allow the Controller to access its internal data structures. In and of itself however, the Model cannot successfully complete the actions requested by the user, and relies upon the Controller to direct it in the sequence and nature of its actions. Finally, the View collects the input from the user and turns this into data and requests for actions to be sent to the Controller. As well, the View is responsible for displaying the current state of the model as well as passing any messages received from the Controller to the client. These messages are usually errors, though in the case of larger transactions they may also be confirmations that an action has been completed successfully.
In the Struts framework the Model, Controller, and View tiers all map directly to various parts of the JSE and the objects provided by the Struts framework. The Model is usually represented by the DBMS, J2EE server, a legacy system, and various web services or any combination of these. The Controller is represented by something known as the ActionServlet, as well as its corresponding Action objects. The View is represented by JSP’s and a new type of bean called a FormBean.
To elaborate, perhaps it would be simplest to illustrate the flow of control through the web application as an action is requested by the client and is processed by the rest of the Struts Framework.
A client first initiates an action by sending a request to a Java Server Context. This request’s path must be of a form that matches the mapping assigned to the ActionServlet. Normally this usually involves mapping all requests for resources ending with “*.do” to the ActionServlet. This Servlet is a class provided by Struts and is an instance of the `org.apache.struts.action.ActionServlet` class. It is loaded when a web application is initialized and reads in the “struts-config.xml” configuration file. This file specifies the
actions that this ActionServlet understands, as well as various other Struts parameters. The ActionServlet receives the request, and looks at the path to determine the action that it is associated with. The ActionServlet then forwards the request and response to an instance of the class associated with this action and prepares to receive future requests. All classes associated with an action must extend the `org.apache.struts.action.Action` class. This provides a number of helpful methods as well as a number of methods required by the ActionServlet. When programming your own actions, you need to keep in mind that they are multi-threaded and must therefore be thread-safe.
The actions themselves are where much of the real work gets done. Each action may be associated with a FormBean. FormBeans are created by the developer and must extend the `org.apache.struts.action.ActionFormBean` class. The FormBean works with a corresponding JSP to collect input from the client on the actions behalf. For each input on the JSP form, the FormBean should have a corresponding bean property. When the user submits the JSP form, the values from the form are mapped to their corresponding bean properties and used to populate the FormBean.
FormBean’s are able to validate the input from the user based on its data type and value. If a value collected does not correspond with the correct data type, the web client is redirected back to the JSP form to enter a new value. The action associated with this bean is responsible for testing the collected values based on their relationship with the Model and the business logic of the application. This means testing for required values, as well as verifying that users inputs fall within the constraints of the business logic. By maintaining this abstraction, the same FormBean class may be used with multiple actions.
There is also a standard method for adding cancel buttons to forms, which allows the web client to cancel an action if they cannot provide satisfactory input while still allowing the application the opportunity to respond appropriately.
When a users input has been validated, the action may begin attempting to satisfy the action request. This can involve interacting with J2EE, DMBS, XML or other web services. In fact, this may involve anything that can be preformed using J2SE. On
smaller applications it is normally acceptable to place business logic directly into the action, although for larger distributed applications the use of J2EE, CORBA, or web services is recommended. Doing so will improve reliability, scalability, and flexibility. Under no circumstance should business logic be placed in the Java Server Pages. This will allow web page developers to freely design attractive websites, without accidentally disrupting the performance of the web application.
When the action is completed, the web client is forwarded to a JSP where the results of the action are available. This JSP may be different depending upon the results of the action.
If at any point in this process an error occurs, such as a bad user input, an inaccessible resource, or an exception, there is a standard and application wide error handling mechanism in place. An error may be stored as an `org.apache.struts.action.ActionError`, which is then stored in an `org.apache.struts.action.ActionErrors` object specific to that user session. JSP’s, Servlets, and actions may then retrieve these errors based on name and type. A similar mechanism also exists for the passing of messages.
Besides offering this workflow, Struts also provides standard support for database connection pooling and internationalization of applications.
**A Collection of Tags**
Struts also includes a set of tag libraries intended to help the developer with a number of common tasks as well as to assist in integrating JSP with the Struts Framework. They are
designed especially to facilitate the separation of presentation and business logic and to implement the flow of control to and from the **ActionServlet**.
**HTML Tag Library**
The first of these collections is the HTML tag library. This library includes tags designed to help create dynamic HTML user interfaces. This includes tags for rendering browser specific JavaScript and tags to aid with URL rewriting and session tracking.
As well this library provides an important set of tags that support mapping form inputs to FormBeans. Though it is possible to accomplish this without the use of the Struts HTML form element tags, they do greatly reduce the time and effort involved.
Another important use for the HTML tag library is linking. Links between pages can be created in both hyperlink and anchor formats, which is especially useful for generic forwards since it does not require the creation of heavier objects.
Finally internationalization and regionalization is supported by HTML tags that respond and encode based up the client encoding, and by allowing error messages and image tags to be retrieved from locale specific property files, as per the Java internationalization specification.
**Bean Tag Library**
The bean tag library encapsulates most of the features required to display both static and dynamic web pages. It provides tags for working with, creating, and removing beans. The bean tags help to define new beans from any objects associated with the current request and make them accessible to the remainder of the page via scripting variables or scope attributes.
As well these tags can help render bean, or bean properties, to the output response. This second set of tags is particularly useful as they aid in internationalization by allowing messages to be retrieved from a locale specific user file.
Finally, the bean tag library provides significant enhancements to the basic JSP `<jsp:useBean>` tag by extending three methods of referencing bean properties: simple, nested, and indexed.
**Logic Tag Library**
The logic tag library performs all the first and second order logic functions and thus provides conditional code generation. As well it includes tags for collection iteration and application flow management.
It should be noted that, while most binary logic tags are available, Struts does not provide some of the more complex String operations. Nevertheless, developers can implement their own custom tags, which are merely Java classes that implement special interfaces.
**Template Tag Library**
Finally the template tag library is used for creating dynamic JSP templates. This library provides the dynamic capabilities similar to those static compatibilities from stylesheets or the standard JSP `include` directive. Though the least used of the tag libraries, it is helpful when a developer wishes to be able to change layout and content across multiple pages. It is often used in the development of new web applications when such things may change frequently.
**Where is Struts going?**
With all of this already in Struts, there are still places it needs to go in the future. Already new releases are under development to include tag support for XML and XSLT. However, these new tags are likely not to be included into later stable releases as Sun has now incorporated many of Struts tag libraries into their new Java Server Pages Standard Tag Libraries (http://jcp.org/aboutJava/communityprocess/review/jsr052/index.html). These will include tag libraries to handle iteration, flow control, XML, DBMS, and internationalization amongst other things.
One area of future Struts growth is in new input form elements. It is hoped that there will soon be grid inputs, for spreadsheet style entry. This is currently possible but more difficult than it needs to be. As well, the select input elements will likely be reworked to provide a standard way for a FormBean or action to populate the selections list.
One other feature that is always in demand is support for multi-part, “Wizard” style input forms. There are multi-part forms similar to the “Wizards” available in many popular programming API’s. It is currently possible to implement simple multi-part forms, though more complex “Wizards” require the ability to change the forms that are displayed based upon different user input.
It should be noted however that many of these functions are already available as add-ons to the Struts Framework. For a list of many of the most popular, please visit http://jakarta.apache.org/struts/doc-1.0.2/userGuide/resources.html.
**Getting started**
Now that you have seen what the Struts Framework is all about, you might be wondering how to get started using it.
The first thing you want to do is to visit the official Struts homepage at http://jakarta.apache.org/struts. From there you can download a copy of Struts. I’d also recommend taking a minute to read their User Guide (http://jakarta.apache.org/struts/doc-1.0.2/userGuide/index.html) if you have a minute as it provides specifics on configuring Struts to perform the way you want it.
At the time of this writing, the current production release of Struts is version 1.0.2. To install it, first visit the Struts homepage. Select the ‘Binaries’ link in the ‘Download’ section. Now click on ‘Struts 1.0.2’ from the ‘Release Builds’ section and then select the distribution appropriate for your operating system and development requirements. For these instructions, it will be assumed that you will be developing on a Microsoft Windows operating system, however the directions should differ only slightly on other platforms. In this case, we want to download “jakarta-struts-1.0.2.zip”. This file includes example applications as well as documentation, if you are simply interested in the core Struts libraries then download the “jakarta-struts-1.0.2-lib.zip” file from the “/lib” directory.
Once this file has been downloaded, extract its contents to a convenient location on your computer’s hard-disk such as the desktop. WinZip (www.winzip.com) is a popular utility for this purpose though Windows ME and XP include support for the many archives without requiring the installation of third-party software.
To enable your web application to use Struts start by copying a number of files from the extracted contents of the Struts archive. First copy the “struts.jar” file from the “/lib” directory to the “/WEB-INF/lib” directory of your web application. Now copy the “struts-bean.tld”, “struts-html.tld”, “struts-logic.tld”, and “struts-template.tld” tag library descriptor files from the “/lib” directory of the archive to the “/WEB-INF” directory of your web application.
Next we want to create the “struts-config.xml” file. This is usually located in the “/WEB-INF" folder of your application. This file must be an XML document that conforms to the “http://jakarta.apache.org/struts/dtds/struts-config_1_0.dtd” DTD. An example is as follows:
```xml
<?xml version="1.0" encoding="UTF-8"?>
<!--Configuration file for the Struts Framework-->
<!DOCTYPE struts-config PUBLIC
"-//Apache Software Foundation//DTD Struts Configuration
1.0//EN"
"http://jakarta.apache.org/struts/dtds/struts-config_1_0.dtd">
```
<struts-config>
<!--Data sources allow your application to utilize connection pooling and help abstract the DBMS from the application.-->
<data-sources id="name">
<data-source autoCommit="false" description="Example Data Source Description" driverClass="org.postgresql.Driver" maxCount="4" minCount="2" password="mypassword" url="jdbc:postgresql://localhost/mydatabase" user="myusername"/>
</data-sources>
<!--Form beans are used by actions when collecting input-->
<form-beans>
<form-bean name="logonForm" type="org.apache.struts.example.LogonForm">
<icon>
<small-icon></small-icon>
<large-icon></large-icon>
</icon>
<display-name></display-name>
<description></description>
<set-property property="" value=""/>
</form-bean>
</form-beans>
<!--Global forwards specify the target destinations available to the application.-->
<global-forwards type="org.apache.struts.action.ActionForward">
<forward name="logon" path="/logon.jsp" redirect="false">
<icon>
<small-icon></small-icon>
<large-icon></large-icon>
</icon>
<display-name></display-name>
<description></description>
<set-property property="" value=""/>
</forward>
</global-forwards>
<!--Action mappings associate an action with a given URL.-->
<action-mappings>
<action path="/logon" type="org.apache.struts.example.LogonAction" name="logonForm" scope="request" input="/logon.jsp" unknown="false" validate="true">
<icon>
<small-icon></small-icon>
<large-icon></large-icon>
</icon>
<display-name></display-name>
<description></description>
<set-property property="" value=""/>
</action>
<forward name="logon" path="/logon.jsp" redirect="false">
<icon>
<small-icon></small-icon>
<large-icon></large-icon>
</icon>
<display-name></display-name>
<description></description>
<set-property property="" value=""/>
</forward>
</action-mappings>
</struts-config>
This tutorial does not provide a detailed description of how to configure this file as it is only an overview of the Struts Framework. In summary, however, this configuration file describes the Actions, ActionForms, ActionForwards, and data sources that are defined by this web application. Before you can use any instance of these objects in your application they must first be defined in this file.
Finally to complete the setup of the Struts Framework in your application, you must add the ActionServlet to your deployment descriptor. To do this, simply add the following xml to your application “web.xml” file:
That’s it you’re done! You can now get started using the Struts framework in your application! If you are looking for a good Integrate Development Environment (IDE) to create your web applications in, you might consider giving “Forte for Java” a try. It's free, and more information about it is available from http://www.sun.com/forte/ffj.
A full list of many great Struts tutorials, books, and resources is available at http://jakarta.apache.org/struts/doc-1.0.2/userGuide/resources.html.
Struts is a framework based on set of Java technologies like Servlet, JSP, JSTL, XML etc which provides implementation of MVC architecture. The framework also provides ready to use validation framework. The power of Struts lies in its model layer by which Struts can be integrated with other Java technologies like JDBC, EJB, Spring, Hibernate and many more. Web.xml file is deployment descriptor which keeps all the application related settings while struts-config.xml file maps a request to Action classes and ActionForms(a simple POJO which contains properties related to UI). Model is handled by various Java technologies like EJB, JDBC, Hibernate, Spring etc. It mainly concentrates on the business logic and semantics of the application.
|
{"Source-Url": "https://vopm.pw/zi_hyg_mypip.pdf", "len_cl100k_base": 4228, "olmocr-version": "0.1.53", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 21491, "total-output-tokens": 4756, "length": "2e12", "weborganizer": {"__label__adult": 0.0003032684326171875, "__label__art_design": 0.00021266937255859375, "__label__crime_law": 0.00022494792938232425, "__label__education_jobs": 0.0002548694610595703, "__label__entertainment": 3.3915042877197266e-05, "__label__fashion_beauty": 0.00010508298873901369, "__label__finance_business": 0.00012826919555664062, "__label__food_dining": 0.0002290010452270508, "__label__games": 0.00026297569274902344, "__label__hardware": 0.0003757476806640625, "__label__health": 0.00017130374908447266, "__label__history": 9.85264778137207e-05, "__label__home_hobbies": 4.7206878662109375e-05, "__label__industrial": 0.0001767873764038086, "__label__literature": 0.00010085105895996094, "__label__politics": 0.0001316070556640625, "__label__religion": 0.0002505779266357422, "__label__science_tech": 0.0006804466247558594, "__label__social_life": 5.251169204711914e-05, "__label__software": 0.006175994873046875, "__label__software_dev": 0.9892578125, "__label__sports_fitness": 0.00017774105072021484, "__label__transportation": 0.00020682811737060547, "__label__travel": 0.00015842914581298828}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 21253, 0.00375]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 21253, 0.44763]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 21253, 0.88101]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 1103, false], [1103, 3808, null], [3808, 6242, null], [6242, 8589, null], [8589, 10128, null], [10128, 12647, null], [12647, 15222, null], [15222, 17349, null], [17349, 19402, null], [19402, 20510, null], [20510, 21253, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 1103, true], [1103, 3808, null], [3808, 6242, null], [6242, 8589, null], [8589, 10128, null], [10128, 12647, null], [12647, 15222, null], [15222, 17349, null], [17349, 19402, null], [19402, 20510, null], [20510, 21253, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 21253, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 21253, null]], "pdf_page_numbers": [[0, 0, 1], [0, 1103, 2], [1103, 3808, 3], [3808, 6242, 4], [6242, 8589, 5], [8589, 10128, 6], [10128, 12647, 7], [12647, 15222, 8], [15222, 17349, 9], [17349, 19402, 10], [19402, 20510, 11], [20510, 21253, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 21253, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
2fa9ab7700ab2a30e332696df64afbe7221aacbf
|
# Table of Contents
1 Introduction ................................................. 1
2 Syntax and declarations ................................. 1
3 Input and output ............................................. 1
4 Assert and retract ............................................ 4
5 Failure driven loops ....................................... 5
6 Cuts and indexing ............................................ 6
7 Accumulators and Difference lists ................. 8
8 Determinism .................................................. 9
9 All-solutions predicates ................................. 10
1 Introduction
This document is intended to help the reader translate existing Prolog programs to Mercury. We assume that the reader is familiar with Prolog. This guide should be used in conjunction with the Mercury User’s Guide and Reference Manuals.
If the Prolog code is quite declarative and does not make use of Prolog’s non-logical constructions, the job of converting it to Mercury will usually be quite straightforward. However, if the Prolog program makes extensive use of non-logical constructions, conversion may be very difficult, and a direct transliteration may be impossible. Mercury code typically has a very different style to most Prolog code.
2 Syntax and declarations
Prolog and Mercury have very similar syntax. Although there are a few differences, by and large if the syntax of a program is accepted by a Prolog system, it will be accepted by Mercury. There are however a few extra operators defined by the Mercury term parser (see the “Builtin operators” section of the “Syntax” chapter of the Mercury Language Reference Manual).
In addition, Mercury implements both existential and universal quantification using the syntax
```
some Vars Goal
```
and
```
able Vars Goal
```
The constructor for lists in Mercury is ‘[1]/2’, not ‘./2’.
Terms with functor ‘{}/N’ are treated slightly differently in Mercury than in ISO Prolog. ISO Prolog specifies that “{1, 2, 3}” is parsed as ‘{1, 2, 3}’. In Mercury, it is parsed as ‘[1, 2, 3]’.
Mercury does not allow users to define their own operators.
3 Input and output
Mercury is a purely declarative language. Therefore it cannot use Prolog’s mechanism for doing input and output with side-effects. The mechanism that Mercury uses is the threading of an object that represents the state of the world through the computation. The type of this object is `io.state`, or just `io` for short. Each operation that affects the state of the world must have two arguments of this type, representing respectively the state of the world before the operation, and the state of the world after the operation. The modes of the two arguments that are added to calls are `di` for “destructive input” and `uo` for “unique output”. The first means that the input variable must be the last reference to the original state of the world, and the latter means that the output variable is guaranteed to be the only reference to the state of the world produced by this predicate.
For example, the direct translation of the Prolog predicate
write_total(Total) :-
write('The total is '),
write(Total),
write('.'),
nl.
into Mercury yields this Mercury predicate:
:- pred write_total(int::in, io::di, io::uo) is det.
write_total(Total, IO0, IO) :-
print("The total is ", IO0, IO1),
print(Total, IO1, IO2),
print(’.’, IO2, IO3),
nl(IO3, IO).
The variables IO0, IO1 etc each represent one version of the state of the world. IO0
represents the state before the total is printed, IO1 represents the state after just The total
is is printed, and so on. However, programmers usually don’t want to give specific names
to all these different versions; they want to name only the entities that all these variables
represent different versions of. That is why Mercury supports state variable notation. This
is syntactic sugar designed to make it easier to thread a sequence of variables holding the
successive states of an entity through a clause. You as the programmer name only the
entity, and let the compiler name the various versions. With state variables, the above
clause would be written as
write_total(Total, !IO) :-
print("The total is ", !IO),
print(Total, !IO),
print(’.’, !IO),
nl(!IO).
and the compiler will internally convert this clause into code that looks like the previous clause. (The usual convention in Mercury programs is to name the state variable representing the state of the world !IO.)
In the head of a clause, what looks like an argument that consists of a variable name
prefixed by an exclamation mark actually stands for two arguments which are both variables,
holding the initial and final state of whatever entity the state variable stands for. In this
case, they stand for the state of the world, respectively before and after the line about the
total has been printed. In calls in the body of a clause, what looks like an argument that
consists of a variable name prefixed by an exclamation mark also stands for two arguments
which are both variables, but these hold respectively, the current and the next state.
In Prolog, it is quite normal to give to print an argument that is an atom that is not
used anywhere else in the program, or at least not in code related to the code that does
the printing. This is because the term being printed does not have to belong to a defined
type. Since Mercury is strongly typed, the atom being printed would have to be a data
constructor of a defined type. A Mercury programmer could define a meaningless type just
to give one of its data constructors to a call to print, but it is far better to simply call
a predicate specifically designed to print the string, or integer, or character, you want to
print:
write_total(Total, !IO) :-
io.write_string("The total is ", !IO),
io.write_int(Total, !IO),
io.write_char('.', !IO),
io.nl(!IO).
The `io.` prefix on the predicates called in the body indicates that the callees are in the `io` module of the Mercury standard library. This module contains all of Mercury’s primitive I/O operations. These module qualifications are not strictly necessary (unless two or more modules define predicates with the same names and argument types, the Mercury compiler can figure out which modules called predicates are in), but Mercury convention is to make the module qualifier explicit in order to make the intent of the code crystal clear to readers.
The above could also be written more compactly like this:
write_total(Total, !IO) :-
io.format("The total is %d.\n", [i(Total)], !IO).
The first argument of `io.format` is a format string modelled directly on the format strings supported by `printf` in C, while the second is a list of the values to be printed, which should have one value for each conversion specifier. In this case, there is one conversion specifier, ‘%d’, which calls for the printing of an integer as a decimal number, and the corresponding value is the integer `Total`. Since Mercury is strongly typed, and different arguments may have different types, in the argument list integers must be wrapped inside `i()`, floats must be wrapped inside `f()`, strings must be wrapped inside `s()`, and chars must be wrapped inside `c()`. Despite appearances, in the usual case of the format string being constant, the wrappers and the list of arguments have neither time nor space overhead, because the compiler optimizes them away, replacing the call to `io.format` with the calls to `io.write_string`, `io.write_int` etc above.
One of the important consequences of our model for input and output is that predicates that can fail may not do input or output. This is because the state of the world must be a unique object, and each I/O operation destructively replaces it with a new state. Since each I/O operation destroys the current state object and produces a new one, it is not possible for I/O to be performed in a context that may fail, since when failure occurs the old state of the world will have been destroyed, and since bindings cannot be exported from a failing computation, the new state of the world is not accessible.
In some circumstances, Prolog programs that suffer from this problem can be fixed by moving the I/O out of the failing context. For example
... (solve(Goal) -> ... ; ... ; ..., ... )
where `solve(Goal)` does some I/O can be transformed into valid Mercury in at least two ways. The first is to make `solve` deterministic and return a status:
...solve(Goal, Result, !IO),
(Result = success(...),
...;
Result = failure,
...),
...)
The other way is to transform ‘solve’ so that all the input and output takes place outside it:
...io.write_string("calling: ", !IO),
solve.write_goal(Goal, !IO),
(solve(Goal) ->
io.write_string("succeeded\n", !IO),
...;
...
),
...)
4 Assert and retract
In Prolog, calls to the built-in predicates assert and retract can change the set of clauses of the program currently being executed. This makes compilation very tricky, and different Prolog systems react differently when the program alters the definition of a predicate that has active calls. It also makes program analysis almost impossible, since the program that the compiler should analyze is not actually available at compilation time. Since Mercury is a compiled language, it does not allow the compiled program to be altered in any way.
Most uses of assert and retract in Prolog programs are not actually intended to alter the program. Their purpose is just to maintain a set of facts, with semantically separate sets of facts being stored in separate predicates. (Most Prolog systems require these predicates to be marked as dynamic predicates.) A Mercury programmer who wants to store a set of facts would simply store those facts as data (not as code) in a data structure.
The standard library contains several abstract data types (ADTs) for storing collections of items, each of which is useful for different classes of problems.
If the order of the items in the collection is important, consider the list and cord ADTs. list has lower constant factors, but the cord ADTs supports concatenation in constant time. The stack and queue ADTs implement lists with specific semantics and operations appropriate to those semantics.
If the order of items in the collection is not important, and if the items are key-value pairs, you can store them in ADTs implementing several different kinds of trees, including rbtree and tree234. In the absence of a compelling reason to choose a different implementation,
we recommend the map ADT for generic use. Maps are implemented using 234 trees, which
are guaranteed to be balanced and thus have good worst-case behavior, but also have good
performance in the average case. bimap, injection, multi_map and rtree are specialized
version of maps.
If the items in the collection are not key-value pairs, then consider the set and bag
ADTs. The set ADT itself has several versions, some based on trees and some based on
bit vectors, each with its own tradeoffs.
The Mercury standard library has some modules for more specialized collections as well,
such as graphs. And of course, if needed, you can always create your own ADT.
If for some reason you cannot thread variables holding some data through the parts of
your program that need access to that data, then you can store that data in a ’mutable’,
which is as close as Mercury comes to Prolog’s dynamic predicates. Each Mercury mutable
stores one value, though of course this value can be a collection, and that collection may
be (but doesn’t have to be) implemented by one of the Mercury standard library modules
listed above.
Each mutable has a getter and setter predicate. You can set things up so that the getter
and setter predicates both function as I/O operations, destroying the current state of the
world and returning a new state of the world. This effectively considers the mutable to be
part of the state of the world outside the Mercury program. The io module also provides
another way to do this, by allowing the storage of information in the io.state using the
predicates io.get Globals and io.setGlobals. These predicates take an argument of
type univ, the universal type, so that by using type_to_univ and univ_to_type it is
possible to store data of any type in the io.state.
Alternatively, you can set things up so that the getter and setter predicates of a mutable
are not I/O operations, but in that case calls to those predicates are not considered pure
Mercury, and must instead use Mercury’s mechanisms for controlled impurity. These mech-
anisms require all code that is not pure Mercury to be explicitly marked as such. They are
intended to allow programmers to implement pure interfaces using small pieces of impure
code, for use in circumstances where there is no feasible way to implement that same in-
terface using pure code. Most Mercury programs do not use impure code at all. The ones
that do make use of it use it very sparingly, with 99.9+% of their code being pure Mercury.
5 Failure driven loops
In pure Mercury code, the goal Goal, fail is interchangeable with the goal fail, Goal,
and Goal cannot have any side effects. As a consequence of these two facts, it is not possible
to write failure driven loops in pure Mercury code. While one could try to use Mercury’s
mechanisms for controlled impurity to implement failure driven loops using impure Mercury
code, this is not part of the culture of Mercury programming, because failure driven loops
are significantly less clear and harder to maintain than other means of iterating through a
sequence. Since they are inherently imperative and not declarative, they are also very hard
for compilers to optimize.
If the sequence must be generated through backtracking, then a Mercury programmer
could just collect all the solutions together using the standard Mercury library predicate
solutions, and iterate through the resulting list of solutions using an ordinary tail recursive predicate.
However, most Mercury programmers would prefer to generate a list of solutions directly. This can be easily done by replacing code that generates alternative solutions through backtracking, using predicates like this:
```mercury
generate_solutions(In1, In2, Soln) :-
( % Generate one value of Soln from In1 and In2.
generate_one_soln(In1, In2, Soln)
; % Compute a new value for the second input.
In2' = ....
% Generate more values of Soln from In1 and In2'.
generate_solutions(In1, In2', Soln)
).
```
in which the different solutions are produced by different disjuncts, with predicates in which the different solutions are produced by different conjuncts, like this:
```mercury
generate_solutions(In1, In2, [Soln | Solns]) :-
generate_one_soln(In1, In2, Soln),
In2' = ....
generate_solutions(In1, In2', Solns).
```
Unlike predicates following the previous pattern, predicates following this pattern can exploit Mercury’s determinism system to ensure that they have considered all the possible combinations of the values of the input arguments. They are also more efficient, since choice point creation is expensive.
They can be made even more efficient if the consumption of the solutions can be interleaved with their generation. For example, if the solutions are intended to be inputs to a fold (i.e. each solution is intended to update an accumulator), then this interleaving can be done like this:
```mercury
generate_and_use_solutions(In1, In2, !Acc) :-
generate_one_soln(In1, In2, Soln),
use_solution(Soln, !Acc),
In2' = ....
generate_and_use_solutions(In1, In2', !Acc).
```
6 Cuts and indexing
The Prolog cut operator is not part of the Mercury language. Most Prolog code that uses cuts should probably be translated into Mercury using if-then-elses.
In both Prolog and Mercury, the behavior of an if-then-else ‘\( C \rightarrow T ; E \)’ depends on whether the condition ‘\( C \)’ has any solutions. If it does, then they both execute the then-part ‘\( T \)’; if it does not, then they both execute the else-part ‘\( E \)’. However, Prolog and Mercury differ in what they do if the condition has more than one solution.
In most versions of Prolog, if the condition of an if-then-else has more than one solution, the if-then-else throws away all its solutions after the first. The way this is usually
implemented is that after the condition generates its first solution, and before execution continues to the then-part goal with the bindings in that first solution, the Prolog implementation cuts away all the choice points in the condition. This prevents backtracking into the condition, which thus cannot generate any of its other solutions.
Mercury does not prune away later solutions in conditions. If the condition has more than one solution, Mercury will execute the then-part goal on every one of them in turn, using the usual rules of backtracking.
Mercury allows if-then-elses to be written not just as ‘C -> T ; E’, but also as ‘if C then T else E’. These two syntaxes have identical semantics.
Prolog programs that use cuts and a ‘catch-all’ clause should be transformed to use if-then-else in Mercury.
For example
\[
p(this, \ldots) :- !,
\]
\[
\ldots
\]
\[
p(that, \ldots) :- !,
\]
\[
\ldots
\]
\[
p(Thing, \ldots) :-
\]
\[
\ldots
\]
should be rewritten as
\[
p(Thing, \ldots) :-
\]
\[
(\text{Thing} = \text{this} ->
\]
\[
\ldots
\]
\[
; \text{Thing} = \text{that} ->
\]
\[
\ldots
\]
\[
; \ldots
\]
\[
).
\]
The Mercury compiler does much better indexing than most Prolog compilers. Actually, the compiler indexes on all input variables to a disjunction (separate clauses of a predicate are merged into a single clause with a disjunction inside the compiler). As a consequence, the Mercury compiler indexes on all arguments. It also does deep indexing. That is, a predicate such as the following will be indexed.
\[
p([f(g(h)) \mid \text{Rest}]) :- \ldots
\]
\[
p([f(g(i)) \mid \text{Rest}]) :- \ldots
\]
Since indexing is done on disjunctions rather than clauses, it is often unnecessary to introduce auxiliary predicates in Mercury, whereas in Prolog it is often important to do so for efficiency.
If you have a predicate that needs to test all the functors of a type, it is better to use a disjunction instead of a chain of conditionals, for two reasons. First, if you add a new functor to a type, the compiler will still accept the now incomplete conditionals, whereas if you use a disjunction you will get a determinism error that pinpoints which part of the code needs changing. Second, in some situations the code generator can implement an indexed disjunction (which we call a switch) using binary search, a jump table or a hash table, which can be faster than a chain of if-then-elses.
Chapter 7: Accumulators and Difference lists
Mercury does not in general allow the kind of aliasing that is used in difference lists. Prolog programs using difference lists fall in to two categories — programs whose data flow is “left-to-right”, or can be made left-to-right by reordering conjunctions (the Mercury compiler automatically reorders conjunctions so that all consumers of a variable come after the producer), and those that contain circular dataflow.
Programs which do not contain circular dataflow do not cause any trouble in Mercury, although the implicit reordering can sometimes mean that programs which are tail recursive in Prolog are not tail recursive in Mercury. For example, here is a difference-list implementation of quick-sort in Prolog:
```prolog
qsort(L0, L) :- qsort_2(L0, L - []).
qsort_2([], R - R).
qsort_2([X|L], R0 - R) :-
partition(L, X, L1, L2),
qsort_2(L1, R0 - R1),
R1 = [X|R2],
qsort_2(L2, R2 - R).
```
Due to an unfortunate limitation of the current Mercury implementation (partially instantiated modes don’t yet work correctly), you need to replace all the ‘-’ symbols with commas. However, once this is done, and once you have added the appropriate declarations, Mercury has no trouble with this code. Although the Prolog code is written in a way that traverses the input list left-to-right, appending elements to the tail of a difference list to produce the output, Mercury will in fact reorder the code so that it traverses the input list right-to-left and constructs the output list bottom-up rather than top-down. In this particular case, the reordered code is still tail recursive — but it is tail-recursive on the first recursive call, not the second one!
If the occasional loss of tail recursion causes efficiency problems, or if the program contains circular data flow, then a different solution must be adopted. One way to translate such programs is to transform the difference list into an accumulator. Instead of appending elements to the end of a difference list by binding the tail pointer, you simply insert elements onto the front of a list accumulator. At the end of the loop, you can call ‘list.reverse’ to put the elements in the correct order if necessary. Although this may require two traversals of the list, it is still linear in complexity, and it probably still runs faster than the Prolog code using difference lists.
In most circumstances, the need for difference lists is negated by the simple fact that Mercury is efficient enough for them to be unnecessary. Occasionally they can lead to a significant improvement in the complexity of an operation (mixed insertions and deletions from a long queue, for example) and in these situations an alternative solution should be sought (in the case of queues, the Mercury library uses the pair of lists proposed by Richard O’Keefe).
8 Determinism
The Mercury language requires the determinism of all predicates exported by a module to be declared. The determinism of predicates that are local to a module may be declared but don’t have to be; if they are not declared, they will be inferred. By default, the compiler issues a warning message where such declarations are omitted, but if you want to use determinism inference, you can disable this warning using the ‘--no-warn-missing-det-decls’ option.
Determinism checking and inference is an undecidable problem in the general case, so it is possible to write programs that are deterministic, and have the compiler fail to prove the fact. The most important aspect of this problem is that the Mercury compiler only detects the clauses of a predicate (or the arms of a disjunction, in the general case) to be mutually exclusive, allowing the execution of at most one disjunct at runtime, if the clauses or disjuncts each unify the same variable (or a copy of that variable) with distinct functors, with these unifications all taking place before the first call in the clause or disjunct. For such disjunctions, the Mercury compiler generates a switch (see the earlier section on indexing). If a switch has a branch for every functor in the type of the switched-on variable, then the switch guarantees that exactly one of its arms will be executed. If all the arms are deterministic goals, then the switch itself is deterministic.
The Mercury compiler does not do any range checking of integers, so code such as:
```
factorial(0, 1).
factorial(N, F) :-
N > 0,
N1 is N - 1,
factorial(N1, F1),
F is F1 * N.
```
would be inferred to be “nondeterministic”. The compiler would infer that the two clauses are not mutually exclusive, because it does not know about the semantics of `/2`, and it would infer that the predicate as a whole could fail because (a) the unification of the first argument with 0 can fail, so the first clause is not guaranteed to generate a solution, and (b) the call to `/2` can fail, and so the second clause is not guaranteed to generate a solution either.
The general solution to such problems is to use a chain of one or more if-then-elses.
```
:- pred factorial(int::in, int::out) is det.
factorial(N, F) :-
( N < 0 ->
unexpected($pred, "negative N")
; N = 0 ->
F = 1
;
N1 is N - 1,
factorial(N1, F1),
F is F1 * N
)
```
Chapter 9: All-solutions predicates.
The unexpected predicate is defined in the require module of the Mercury standard library. Calls to it throw an exception, and unless that exception is caught, it aborts the program. The terms $pred is automatically replaced by the compiler with the (module-qualified) name of the predicate in which it appears.
9 All-solutions predicates.
Prolog’s various different all-solutions predicates (‘findall/3’, ‘bagof/3’, and ‘setof/3’) all have semantic problems. Mercury has a different set of all-solutions predicates (‘solutions/2’, ‘solutions_set/2’, and ‘unsorted_solutions/2’, all defined in the library module ‘solutions’) that address the problems of the Prolog versions. To avoid the variable scoping problems of the Prolog versions, rather than taking both a goal to execute and an aliased term holding the resulting value to collect, Mercury’s all-solutions predicates take as input a single higher-order predicate term. The Mercury equivalent to
\[
\text{intersect}( ext{List1, List2, Intersection}) :- \\
\text{setof}(X, (\text{member}(X, \text{List1}), \text{member}(X, \text{List2})), \text{Intersection}).
\]
is
\[
\text{intersect}( ext{List1, List2, Intersection}) :- \\
\text{solutions}( \\
( \text{pred}(X::\text{out}) \text{ is nondet} :- \\
\text{list.member}(X, \text{List1}), \\
\text{list.member}(X, \text{List2}) ) , \text{Intersection} ).
\]
Alternately, this could also be written as
\[
\text{intersect}( ext{List1, List2, Intersection}) :- \\
\text{solutions}(\text{member_of_both}(\text{List1, List2}), \text{Intersection}).
\]
\[
:- \text{pred}\text{ member_of_both}(\text{list}(T)::\text{in, list}(T)::\text{in, T::out}) \text{ is nondet}.
\]
\[
\text{member_of_both}( ext{List1, List2, X} :- \\
\text{list.member}(X, \text{List1}), \\
\text{list.member}(X, \text{List2}).
\]
and in fact that is exactly how the Mercury compiler implements lambda expressions.
The current implementation of ‘solutions/2’ is a “zero-copy” implementation, so the cost of ‘solutions/2’ is independent of the size of the solutions, though it is proportional to the number of solutions.
|
{"Source-Url": "https://mercurylang.org/information/doc-latest/transition_guide.pdf", "len_cl100k_base": 6046, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 26701, "total-output-tokens": 6805, "length": "2e12", "weborganizer": {"__label__adult": 0.0003159046173095703, "__label__art_design": 0.00017571449279785156, "__label__crime_law": 0.00022983551025390625, "__label__education_jobs": 0.00024771690368652344, "__label__entertainment": 4.631280899047851e-05, "__label__fashion_beauty": 0.00010716915130615234, "__label__finance_business": 0.00010067224502563477, "__label__food_dining": 0.0003178119659423828, "__label__games": 0.0006704330444335938, "__label__hardware": 0.0005130767822265625, "__label__health": 0.0002193450927734375, "__label__history": 0.00012409687042236328, "__label__home_hobbies": 4.947185516357422e-05, "__label__industrial": 0.0002658367156982422, "__label__literature": 0.00020420551300048828, "__label__politics": 0.00017249584197998047, "__label__religion": 0.00040841102600097656, "__label__science_tech": 0.0029544830322265625, "__label__social_life": 5.0902366638183594e-05, "__label__software": 0.004222869873046875, "__label__software_dev": 0.98779296875, "__label__sports_fitness": 0.00023186206817626953, "__label__transportation": 0.0002970695495605469, "__label__travel": 0.00014972686767578125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 26341, 0.01299]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 26341, 0.62919]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 26341, 0.88589]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 0, null], [0, 612, false], [612, 3107, null], [3107, 5795, null], [5795, 8539, null], [8539, 10624, null], [10624, 13987, null], [13987, 16460, null], [16460, 18890, null], [18890, 21758, null], [21758, 24201, null], [24201, 26341, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 0, null], [0, 612, true], [612, 3107, null], [3107, 5795, null], [5795, 8539, null], [8539, 10624, null], [10624, 13987, null], [13987, 16460, null], [16460, 18890, null], [18890, 21758, null], [21758, 24201, null], [24201, 26341, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 26341, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 26341, null]], "pdf_page_numbers": [[0, 0, 1], [0, 0, 2], [0, 612, 3], [612, 3107, 4], [3107, 5795, 5], [5795, 8539, 6], [8539, 10624, 7], [10624, 13987, 8], [13987, 16460, 9], [16460, 18890, 10], [18890, 21758, 11], [21758, 24201, 12], [24201, 26341, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 26341, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
9f0ced6842a520aeee16ad3067c9ddefc0df8c38
|
Author(s): Ali, Liaqat; Jahankhani, Hamid; Jahankhani, Hossein
Title: Accessibility evolution tools comparison
Year of publication: 2007
Link to published version: http://www.uel.ac.uk/act/proceedings/documents/ACT07.pdf
ACCESSIBILITY EVOLUTION TOOLS COMPARISON
Liaqat Ali, Hamid Jahankhani, Hossein Jahankhani
Innovative Informatics Research Group, School of Computing and Technology
{Alil, hamid.jahankhani, h.jahankhani}@uel.ac.uk
Abstract: The Web is widely used as a delivery channel and the importance of eAccessibility to digital resources is now widely acknowledged. By developing a series of guidelines and designing standards the W3C WAI has played an important role to achieve the goal of eAccessibility and to ensure that Web resources can be accessed by people with special need. Accessibility tools play a critical and important role in ensuring the accessibility of the Web and perform a static analysis of home pages or sites regarding their accessibility. This paper claims that because of no standardization these tools often provide different results of the same tested Website according to their own interpretation and due to the lack of standard testing methods eAccessibility is a difficult goal to achieve.
1. Introduction
The growth of the World Wide Web means that people with serious sight problems now have the opportunity to enjoy a wealth of information and services that was previously unavailable to them, from up-to-the-minute news and travel timetables to online shopping and banking. With the help of synthesised speech and Braille display technology, even completely blind people can use the Web. Braille is a system of raised dots which blind people can read with their fingers. Many blind and partially sighted people prefer particular types of information in Braille.
Louis Braille was born in 1809, at Coupvray, near Paris. In 1826, he was elected Professor at the Institution. Both as pupil and teacher he spent most of his leisure trying to find a system by which the blind could write in relief. One, which had been invented by M. Barbier, appeared the most promising. In 1825, he suggested embossing by means of a point method, the character containing 12 dots, 6 high and 2 wide, arranged in a rectangle. The character thus obtained was large and unwieldy, though capable of an almost unlimited number of combinations. Louis Braille cut Barbier's character to two and thus produced his well-known 3 by 2. On this basis Braille was the first who devised a practical scheme for printing and writing in tangible form, suitable to the tactile capacity of all. This was in 1829. The below figure is also an image of 1829.
Figure 1, Braille of 1829
Braille publishing of his code. This shows his original alphabet involved the use of a horizontal rule and it was eliminated from the alphabet. This is scanned from a pamphlet originally published by the Royal National Institute for the Blind.
Usability is paramount for the success of web sites. The World Wide Web and its use are growing at a very fast rate with 100 million Web sites expected by 2002 (Jahankhani, 2002). Design for usability therefore is of principal importance. In today's age of pervasive computing, users have the ability to access information stored on powerful networks anywhere, anytime. Such things as personal digital assistants (PDAs), smart phones, wearable computers and other mobile devices give the user instant access to global information systems (Jahankhani 2002).
The challenge is to ensure that the information from these devices takes into account both the user's capabilities and his/her device. Therefore by adapting video, images, audio and text to both individual devices and individual user requirements will help to ensure that people with disabilities can take advantage of the benefits of pervasive computing.
Visually impaired people, ‘read’ web pages using software tools known as screen readers, which generate speech and/or refreshable Braille output. Even the simplest web pages generally feature images and use tables to format their navigation menus and content, while many others use JavaScript, animation and other technologies to - supposedly - make their navigation systems more user-friendly. This creates a number of problems for people with visual impairments, as they cannot see the images and their screen readers can have serious problems interpreting tables, animation and JavaScript. In many cases this renders entire sites unusable.
The W3C WAI plays an important role to use a Website over the internet. It’s a term which is traditionally refers to the development of Web sites that are accessible to all users regardless any disability. WAI have developed a series of guidelines and standards to ensure that Web resources can be accessed by people with special need.
Now it is time to have a look to find out how far these standards have been implemented during the design of the Web sites. If these standards and guidelines are followed then Websites can be made more accessible to a wider range of users with disabilities and more objectives can be achieved in public sectors. The assessment and validation of Websites is an art. Different tools are available for the assessment and validation of websites which is itself a positive step towards eAccessibility. These tools play a critical and important role in ensuring the accessibility of the Web and perform a static analysis of home pages or sites regarding their accessibility. Unfortunately due to the lack of standard and regulations these tools often provide different results of the same website.
This paper aims to review different eAccessibility evolution tools and why the results of these tools are different. By comparing the results obtained by three different tools the research also report that despite standards set by the international agencies like W3C, many websites still fall short of accessibility standard.
2. eAccessibility Testing and Validation Tools
The situation has improved significantly in regard of the web compatibility with access tools from the last few years such as screen readers, magnifiers, alternative
Advances in Computing and Technology,
The School of Computing and Technology 2nd Annual Conference, 2007
keyboards and mouse systems. The enormous challenge facing to web is to make sure that all developers and designers follow accessibility guidelines in providing description that optimize access to end users with disabilities. There has been a lot of discussion on different forums regarding eAccessibility on the part of web developers and designers. This causes the development of a plethora of new software utilities and the interesting part of these utilities is that all are claiming to automate the process of evaluating and/or repairing web pages.
The aim of all these utilities and tools is to assist the developer and authors of HTML is to pin point the changes needed in the HTML coding to make sure and achieve the high level of eAccessibility.
The two available set of standards to achieve the goal of eAccessibility used by developers of evaluation and repair products are Web Content Accessibility Guidelines 1.0 from the Web Accessibility Initiative (WAI) of the World Wide Web Consortium (W3C) finalized in 1999 that provides a priority based (Priority 1, 2 and 3) checklist of guidelines. This document is now using as a reference for evaluation and assessment of the eAccessibility and web-based resources.
In addition, the rehabilitation act amendments of 1988, section 508, covers access to United State Federal Agencies in making their electronic and information technology more accessible to people with special needs. An independent US Federal Agency, The Access Board, whose goal is to achieve the highest level of eAccessibility has established a “Guide to the Section 508 Standards for Electronic and Information Technology.”
As two of these standards provide the context for evaluation, assessment and repair, a very little attention has been given to the role of authoring tools like HTML editors, in designing and development of accessible Web resources. Authoring tool is a mechanism that helps developers and authors of HTML with a limited knowledge of inclusive design practices. This absence of integrated authoring tools causes to the development of other products that have been designed to examine the accessibility of websites after it has been designed and developed (Jahankhani, 2002). A wide variety of these products available in market, responsible for determining how well their site accommodates the feature of eAccessibility guidelines choosing one product is really a difficult task. The reason behind this difficulty is the non availability of any comprehensive reviews available for these types of software’s.
Although all of these tools play a critical and important role in ensuring the accessibility of the Web and perform a static analysis of home pages or sites regarding their accessibility, testing and validation of a website is still very important. The two common types of HTML testers are validators and linters. People new to eAccessibility should know the difference between validators and linters.
The main difference between a validator and a linter is that a validator checks a page against a published HTML specification for technical errors, whereas a linter checks a page for commonly made mistakes. It is often a good idea to use both as they can sometimes find different types of problems (Any Browser, 2006).
All available tools for the assessment and validation of eAccessibility are itself a positive step towards the goal and achievement of high level of eAccessibility but the use of the right tool among these tools for developing and designing an
accessible website is the most important task. A tool that can help to identify the inaccessible elements rather than interfering with the websites should be selected. A key point to understand in regard of accessibility tools is that these tools can only partially check the accessibility of websites through automation and still required human judgement and checking or manual check of the website.
No automated accessibility evaluation tool can find all of your content's accessibility errors. Automated programs can only evaluate a few of the many possible accessibility issues that can arise in a particular Web site (WebAim 2006).
Skiome tools include prompting for alternative text while other supports some HTML elements for increasing and ensuring the accessibility of the Web. The three basic concepts for the effectiveness of a tool are completeness, correctness and effectiveness (Brajnik 2004). A complete list of these tools is available at http://www.w3.org/WAI/ER/existingtools.html. W3C divided these tools in three sub categories i.e.
- Evaluation Tools
- Repair Tools
- Filter and Transform Tools (WAI, 2006).
The Evaluation tools are further divided into three sub-categories:
- General, Tools that perform test for a variety of accessibility issue
- Focused, Tools that test for one or limited aspect of accessibility.
- Service, Tools that run on an ongoing basis such as proxies, Web services and Monitors (WAI, 2006)
Tools can check the accessibility of the website according to the standard of Section 508 or checkpoints in WCAG 1.0 Priority 1, 2 and 3. It is important to understand that up to what standard the website is accessible. The developed standard by W3C for accessible Web sites is prioritized according to their impact on accessibility as
Priority [1] or ‘A’ checkpoints are those that the developer of the Web must satisfy to insure that the page itself is accessible.
Priority [2] or ‘AA’ checkpoints are those that the Web developer should satisfy to ensure that certain groups will be able to access information the web page.
Priority [3] or ‘AAA’ checkpoints are those the web developer may do to ensure that all content on the page is completely accessible (W3C, 2000)
Tools designed to evaluate web pages against WCAG1.0 Priority 1, 2 and 3 are displaying errors automatically but these tools generate a variety of reports based on results and analysis of the web page or web site.
Till now there are several different tools for testing, assessment and validation of Websites which are different from one another in several dimensions. Some of them do only testing while some other tools perform fixing of a page as well. They are different from each other in terms of effectiveness, cost and reliability. The important thing is to evaluate the quality of these tools. For a common Web developer to develop and design a better and accessible Website, the key role of these tools is very critical. By evaluation and comparing the accessibility tools, Web developers and designers can act upon the appropriate selection and choice. This evaluation will also provide a competition between the tools manufacturers and will improve the tool’s quality itself.
The automated tools identify different features of the Websites that might cause a failure of the Website in term of its
accessibility to disable people. For example if an image element in a Website does not contain the Alt attribute then the Website will become an accessibility failure because the page can not be accessed through the speaking browser.
The US federal agencies and corporations are spending millions of dollars on such tools that claim to test web sites for accessibility (Thatcher, 2006). There are over 30 automated tools (Ivory et al, 2003). These includes Accessibility check, Accessibility Wizard, A-Prompt ATRC Web Accessibility Checker, Bobby WebXACT WatchFire, EvalAccess, Hera, Hermish, HiSoftware AccVerify Cynthia Says, Silvinha Accessibility Validator and Repair Tool, Site, Page and Accessibility Valet Demonstrator, TAW Online Accessibility Tool, UsableNet LIFT and W3C HTML Validator (World Wide Web Consortium) etc. Some of these tools are commercial while some of them provide free online assessment of the Website.
Unfortunately there is no standardization of these tools and even these tools provide different results of the single selected Websites according to their own interpretation. This might cause another problem for a Web developer where a Web developer can not decide to select which tool for developing Website according to the standard provided by the W3C WCAG 1.0. To prove that we selected three tools Bobby, HiSoftware AccVerify Cynthia Says and Hermish among existing tools and checked the home pages of 256 UK and US Universities Websites i.e. 128 from UK and 128 from USA. All 256 Websites has been checked on these three tools. The following table 1 shows the total number of tests conducted to find out the eAccessibility level of universities websites and to proof the diverse result of these accessibility tools.
<table>
<thead>
<tr>
<th>Name of Accessibility Tool</th>
<th>Total No. of Universities Websites checked for Accessibility</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>UK</td>
</tr>
<tr>
<td>Hermish</td>
<td>28</td>
</tr>
<tr>
<td>Bobby</td>
<td>28</td>
</tr>
<tr>
<td>Cynthia Says</td>
<td>28</td>
</tr>
</tbody>
</table>
Table 1, Total Number of Conducted Accessibility Tests
The analysis found that the accessibility result of these tools is different from one another. Before further analysis I would like to introduce these tools first. Here is a short introduction to these tools.
3. Bobby WebXACT WatchFire
Bobby is one of the famous tools for assessment and validation of websites and brings the Website up to the required standard of Web Content Accessibility Guidelines (WCAG 1.0) and Section 508 of the US Rehabilitation Act. It is especially designed for small Websites to help and expose barriers of eAccessibility page by page through Bobby Spider of the whole Website including readability by screen readers, animated elements, audio and video displays and the provision of text equivalent for all images. It checks HTML against select accessibility guidelines and then reports on the accessibility of each page.
Bobby tool can spider the local Web pages as well as Web pages behind the installed firewall and is really ideal for large scale
advances in computing and technology, the school of computing and technology 2nd annual conference, 2007
Accessibility testing and perform over 90 accessibility checks during the assessment and validation of required Website. It was first released in 1996 and in July 2002 WatchFire Acquired from Centre for Applied Special Technology (CAST) and held the responsibility for its marketing and distribution. Originally Bobby was based on the Trace Research and Development Centre guidelines (Trace, 2006) but when WAI introduced the WCAG 1.0 guidelines, Bobby conformed to them. Now Bobby is to be considering one of the best and famous tools to test the accessibility standard defined by the WAI W3C.
A window based version, Bobby 5.0, incorporates the scanning and reporting functionality of WatchFire WebQA and includes the following enhancements:
- Spidering: Flash links, JavaScript parsing and execution, http(s), Session ID(s)
- Scalability: able to scan larger sites
- Reporting
- HTML Editor Integration
- Extensive Online Help: Explains why certain errors are reported as issue (WatchFire, 2005)
Bobby is therefore has been selected as one of the tools to generate this report. The tool is available at [http://webxact.watchfire.com/](http://webxact.watchfire.com/)
4. HiSoftware AccVerify Cynthia Says
HiSoftware AccVerify Cynthia Says provides for the verification of accessibility policy and standards required for Web sites. It allows user to define and conduct custom tests. Complete HTML Validation custom and standard based testing and test management and also provides solution and repair for section 508 and WCAG Priority 1, 2 and 3. It’s a very robust program with many features and options. It also allows for customized scripts to be created. It has extensive report generation capability, including statistics and graphics. The actual report is based on the possible errors for the compliance standards are cited, along with a pass and fail notation for each of the checklist. The new version 3.0 also contains new features like project management tool in which the user can identify the areas of Website to be validated and repaired. Cynthia Says is special edition of the AccVerify tool by Hisoftware which is designed to identify errors related to US Rehabilitation Act Section 508 standards and the W3C Web Content Accessibility Guidelines (WCAG 1.0). Its aim is to educate Website developers and designers about eAccessibility and validate one page at a time. The tool can be found at [http://www.hisoftware.com/acmmonitorsitewithxact](http://www.hisoftware.com/acmmonitorsitewithxact) and see for how to use the tool [http://www.ceb.wisc.edu/accessibility/hisoftware/AccVerify/accverifyquick.htm](http://www.ceb.wisc.edu/accessibility/hisoftware/AccVerify/accverifyquick.htm)
5. Hermish
Hermish is a free web accessibility tool designed to help you bring your web site up to required standards outlined by The World Wide Web Consortium (W3C) and Section 508. Hermish can checked the source code of the page and can provide report on online pages. A new tool of Hermish is available to check the CSS and different browsers compatibility of the Websites. Hermish also checks the Web page for deprecated elements and attributes. It can check the compatibility of different browsers and also provide the
screen test report for visual elements. The Hermish test can identify any incorrectly placed attributes in HTML and XHTML. The CSS browser compatibility test checks the style against a table of known compatibility issues featuring several well-known browsers and platforms. The tool can be found at [http://www.hermish.com](http://www.hermish.com)
6. Example of different results of the tools
The following figures 2, 3 and 4 show the different result of accessibility test for Priority 1 of the same tested website [www.barclays.co.uk](http://www.barclays.co.uk). As can be seen in figure 2 and 3, the Website achieved the Level A conformance according to Bobby WatchFire and Hermish but in figure 4 according to HiSoftware AccVerify Cynthia Says the Website did not achieve the same Level A conformance for the same Website. The criterion for testing a Website for all these tools is the same standard as defined by WAI WCAG 1.0.
7. Priority One Result
The result of each tool that has been obtained for Level A conformance is completely different from one another. According to Cynthia Says 48% of Websites achieved the Level A conformance. The Bobby says that the Level A conformance for tested Websites is 60% while Hermish says that 69% of Websites achieved the Level A conformance. This is illustrated in the following figure 5.
Figure 2, Bobby Accessibility Result
Figure 3, Hermish Accessibility Result
Figure 4, HiSoftware Accessibility Result
Figure 5, Priority 1 result of WCAG 1.0
8. Priority 2 Result
For the high Level of AA conformance the result obtained from Cynthia Says for tested Websites is only 9%. Bobby says that 11% of Websites achieved the AA conformance but with a huge difference Hermish say that 80% of Websites achieved the high Level of AA conformance. See the following figure 6.
Figure 6, Priority 2 result of WCAG 1.0
9. Priority 3 Result
For the Level of AAA conformance according to Bobby only 3% of tested Websites achieved the AAA conformance while Cynthia Says and Hermish both shows that 36% of tested Websites achieved the high Level of AAA conformance. The following figure illustrates this.
Figure 7, Priority 3 result of WCAG 1.0
10. Conclusion
The growth of the World Wide Web means that people with serious sight problems now have the opportunity to enjoy a wealth of information and services that was previously unavailable to them. Automated tools have the potential to support designers and to make Web pages accessible. There are differences between development and maintenance of Websites but all automated accessibility tools can play a significant and positive role in the development of these websites. Unfortunately because of no standardization of these automated tools, tools present the report of accessibility according to their own interpretation. The data acquired from the comparison of UK and USA Universities Websites on three different tools shows different accessibility levels for these Websites. The standard for checking Websites of these tools is same. Each of them checks the accessibility of Websites according to the defined standard of WAI WCAG 1.0 but the diverse result shows the ineffectiveness of these tools. The estimation of the result shows that the over all accessibility of the tested websites for Level A, AA and AAA conformance of the Bobby tool is 37% different from the Hermish tool and 06% different from the Cynthia Says and vice versa. The overall difference between Hermish and Cynthia Says result is calculated as 31% which shows that these tools can be updated and can reflect the state of the art of the available technology. Although the achievement of total accessibility is really difficult because of so many problems like different disabilities, language barriers, and hardware and software inconsistencies but the adoption of these tools is also limited because of the limited awareness of the benefits of the accessibility. The need to standardize these tools is now vital. The World Wide Web Consortium can play an important role to standardize
these tools. A further thorough review of other existing tools is required to bring these tools in a standard format.
Acknowledgements
This research is supported by a grant from Dr. Wali Muhammad Trust and High Commission for Pakistan in London UK.
References
Thatcher (2006), Web Accessibility testing http://www.jimthatcher.com/testing.htm
|
{"Source-Url": "http://roar.uel.ac.uk/865/1/Ali,%20L%20(2007)%20AC&T%2028-36.pdf", "len_cl100k_base": 4844, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 23743, "total-output-tokens": 5832, "length": "2e12", "weborganizer": {"__label__adult": 0.0003666877746582031, "__label__art_design": 0.0018138885498046875, "__label__crime_law": 0.0006270408630371094, "__label__education_jobs": 0.004489898681640625, "__label__entertainment": 0.0001844167709350586, "__label__fashion_beauty": 0.0002715587615966797, "__label__finance_business": 0.0005636215209960938, "__label__food_dining": 0.0003969669342041016, "__label__games": 0.0005984306335449219, "__label__hardware": 0.0018148422241210935, "__label__health": 0.001750946044921875, "__label__history": 0.0005044937133789062, "__label__home_hobbies": 0.00010883808135986328, "__label__industrial": 0.0003032684326171875, "__label__literature": 0.0007863044738769531, "__label__politics": 0.0005135536193847656, "__label__religion": 0.0006194114685058594, "__label__science_tech": 0.1165771484375, "__label__social_life": 0.0001671314239501953, "__label__software": 0.1929931640625, "__label__software_dev": 0.673828125, "__label__sports_fitness": 0.00021970272064208984, "__label__transportation": 0.0003674030303955078, "__label__travel": 0.0002849102020263672}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25875, 0.02835]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25875, 0.55795]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25875, 0.91744]], "google_gemma-3-12b-it_contains_pii": [[0, 479, false], [479, 3198, null], [3198, 6428, null], [6428, 10063, null], [10063, 13394, null], [13394, 16674, null], [16674, 20135, null], [20135, 21637, null], [21637, 24199, null], [24199, 25875, null]], "google_gemma-3-12b-it_is_public_document": [[0, 479, true], [479, 3198, null], [3198, 6428, null], [6428, 10063, null], [10063, 13394, null], [13394, 16674, null], [16674, 20135, null], [20135, 21637, null], [21637, 24199, null], [24199, 25875, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25875, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25875, null]], "pdf_page_numbers": [[0, 479, 1], [479, 3198, 2], [3198, 6428, 3], [6428, 10063, 4], [10063, 13394, 5], [13394, 16674, 6], [16674, 20135, 7], [20135, 21637, 8], [21637, 24199, 9], [24199, 25875, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25875, 0.05505]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
09815c0c39001529b1f3bc0d0d5cad8bce678e44
|
G/MAX: An Object-Oriented Framework for Flexible and Efficient Multi-Protocol Communications
Donald F. Box
Tatsuya Suda
Technical Report 93-39
Notice: This Material may be protected by Copyright Law (Title 17 U.S.C.)
This material is based upon work supported by the National Science Foundation under Grant No. NCR-8907909. This research is also in part supported by University of California MICRO program.
G/MAX: An Object-Oriented Framework for Flexible and Efficient Multi-Protocol Communications
Donald F. Box and Tatsuya Suda
Department of Information and Computer Science.
University of California, Irvine,
Irvine, CA 92717-3425
(714) 725-3097 (voice)
(714) 856-4056 (fax)
dbox@ics.uci.edu, suda@ics.uci.edu
Abstract
This paper describes a new architecture for high performance distributed applications and a supporting framework. This architecture applies object-oriented design and implementation techniques to build a framework for platform-independent distributed application specification and implementation using existing programming languages and operating systems. It utilizes an efficient and extensible layering architecture that allows new abstract data types, new presentation-layer protocols, and new interprocess communication mechanisms to be added as they become necessary. Experimental results are presented demonstrating that the multiple layers of abstraction used do not compromise efficiency.
This material is based upon work supported by the National Science Foundation under Grant No. NCR-8907909. This research is also in part supported by University of California MICRO program.
I Introduction
Developing high performance distributed applications that must communicate with a diverse range of remote entities is a non-trivial task due to the complexity and heterogeneity of interapplication communication mechanisms and their interfaces. Adding distribution to existing applications can result in an inordinate amount of reengineering due to the lack of high-level support for distribution in most traditional environments. There has been considerable research in the areas of both distributed operating systems[1,2,3,4,5,6] that seek to provide distributional transparency to the designer, as well as programming languages for distributed application development[7,8,9,10,11] that offer language constructs to facilitate distribution. However these systems typically are not designed to interoperate with the wide variety of protocols or data formats that are used in a global internet environment.
The emergence of large scale high speed networks is shifting the performance bottleneck up from the communication substrate to the actual bandwidth offered to the user. This bottleneck is due largely to presentation formatting that must be performed to address heterogeneous internal data representation. [12] presents an architectural approach to reducing the impact of this processing based on Application Level Framing and Integrated Layer Processing. This approach benefits not from eliminating presentation formatting, but from (a) pipelining multiple processing stages to reduce unnecessary data movement and (b) allowing the application to process incoming data in application-specified units, thus reducing the penalty (at the presentation layer) for lost or misordered packets.
This paper describes the G/MAX system, a framework for high performance distributed application design and implementation. G/MAX is inspired by the concepts proposed in [12], and seeks to provide flexible and efficient application-based communication architecture. G/MAX provides language-level constructs and support for efficient and transparent distribution of application data objects. We are currently implementing and experimenting with this framework using the C++ programming language, which was chosen as the implementation language for its efficiency, its compatibility with the large body of existing C language code, and its support for object-oriented programming. The initial system is developed using traditional communication mechanisms (e.g., sockets[13], Sun RPC[14], and XTI[15]), which will ultimately be replaced by the ADAPTIVE system[16] to allow finer grain control of the actual transmission of data objects. This paper focuses on the Presentation layer of the system, and describes in detail the abstractions used for both presentation objects and communication medium convergence.
II Design Goals
The framework described in this paper is designed to satisfy the following goals:
Basic Encoding Rule Independence and Transparency: Due to variances in processor architec-
tures, operating systems, programming languages and their compilers, data (objects) that must be transported to a foreign host must be encoded into a format that all communicating parties can recognize. Under the OSI Reference Model[17], this task is the responsibility of Layer 6, the Presentation Layer. Presentation protocols are traditionally implemented using a data definition language for humans to specify the data formats to be exchanged. These data definitions are then compiled according to a set of basic encoding rules (BER) that specify the exact binary representation of each data type. For maximum flexibility, our framework allows Presentation Layer services to be transparently selected either for compatibility with existing applications (e.g., ASN.1[18] for compatibility with OSI applications) or for the best match to new applications (e.g., XDR with Multimedia extensions for bandwidth/processing intensive applications). A single object can be transported using multiple encoding schemes, with the correct one for a given end-to-end association selected automatically via strong typing and function/operator overloading. This allows a communicating entity to maintain a single internal data format that is most efficient for local processing, while using ASN.1 to communicate information to network management applications and a more efficient format for more time-critical communications.
Object/Task Location Independence and Transparency: The emergence of distributed object management systems and languages[19,8,20,21] has shown that using a distributed object-oriented paradigm is a powerful and expressive way to design and implement distributed applications. Our framework provides distributed object management facilities by augmenting objects with the necessary member functions to transparently or explicitly designate the location of data members and the constituent operations performed. The actual location of various application objects can be transparently selected by the application designer to match the communication characteristics of the application. Object location can be explicitly designated either at the time of object instantiation, or during the object's lifetime via a single member function. Finer-grain control of object location and migration can be specified by designating member functions or data members for remote invocation/instantiation, allowing the application designer to distribute a single object across multiple locations.
Communication Substrate Independence and Transparency: There is presently a very large number of communication substrates available to the distributed system designer (e.g., TCP[22], OSI-TP4[23], VMTP[24], NETBLT[25], XTP[26], etc.), with new protocols on the horizon (e.g., Bellcore's TP++, OSI HSTP). Each of these protocols provides varying levels of performance and types of service. To take advantage of advances in network technology, it is essential that applications sufficiently insulate themselves from idiosyncrasies of a given substrate without unduly reducing efficiency. Our framework seeks to decouple object transmission/reception from the underlying communication subsystem. This allows the same code to be used portably across many different communication subsystems without regard to the selected substrate. The frame-
work provides a minimal yet functional base interface to basic communication services, while allowing access to substrate-specific features, functions and formats in an efficient yet isolatable manner.
**Efficient yet Robust “Higher Layer” Protocol Services:** By designing the layering architecture for both transparency and efficiency, protocol layers which were previously considered bottlenecks in distributed applications can now be used in high performance systems. Studies have shown that presentation layer processing is a major bottleneck in network performance[12,27], due to both the complexity of the processing involved and the additional data movement incurred from translating data between formats. Our framework addresses both of these issues:
1. **Complexity** — The fundamental data types used in a presentation protocol are hand-coded and inlined to yield a highly efficient translation. Additionally, every built-in data type has a hand-tuned *type conversion* operator to allow efficient processing of built-in types. As composite data types are directly composed of the fundamental or built-in types, their translations are efficient as well. However, implementors are free to experiment with and hand-tune a given composite object’s encoding and/or decoding.
2. **Redundant Data Copying** — The entire “data path” of the framework is designed to allow conversion-on-copy operations, scatter-read/gather-write, memory-mapped I/O, and “piping” of protocol processing operations. Additionally, we are experimenting with alternatives to the traditional socket interface to further reduce the need for copying.
**Streamlined Development Process:** Conventional systems require the designer to maintain a data description in a language other than the language being used to develop the application. Our framework allows designers and implementors to specify objects directly in the implementation language (*e.g.*, C++), without requiring an additional data description language or stub compiler. The fundamental data types and formats are precisely defined within the framework specification. This allows formats to be expressed unambiguously, while allowing the development cycle to be streamlined by using rapid prototyping techniques.
**Medium Independence and Transparency:** The class libraries used to encode data objects can easily be combined with the existing C++ *iostream* class libraries currently being standardized by ANSI. This allows objects to be stored in a platform-independent format with no additional implementation effort. It also allows persistent objects to be “played out” over a communication channel by an application that is unaware of the underlying format simply by transmitting the contents of a file. This capability is also useful for debugging purposes, as an entire communication session can be captured to a file for later examination.
---
1 The *iostream* library is the C++ analog to the C programming language’s *stdio* library. However, it offers the advantages of being type-safe and extensible to encompass new data types and I/O devices.
Figure 1 shows the layering model used in our architecture. The Data Transport and Media Convergence layers correspond to the Communication Substrate Dependent level, as shown in Figure 2. The Presentation and Distribution layers correspond to the Communication Substrate Independent level, as shown in Figure 3. The following is a description of each layer.
**Data Transport Layer**
The Data Transport Layer provides the basic local and remote interprocess communication channel. This layer represents both the IPC mechanisms and their constituent Application Programmatic Interfaces (APIs). It is assumed that each underlying IPC mechanism provides either (1) a basic duplex data stream with either connection-oriented or connectionless semantics or (2) a shared memory interface with support for mapping a memory segment into and out of an address space.
**Network Subsystems:** The remote interprocess communication substrate. The basic connection-oriented network service is expected to provide an error-free, in-order delivery of bytes (i.e., TCP[22] or equivalent). The basic connectionless network service is expected to simply provide a best effort delivery of datagrams (i.e., UDP[28] or equivalent). Additionally, more diverse classes of network services can also be supported in this model. For example, ADAPTIVE[16] provides a multi-stream transport substrate that can be flexibly and adaptively configured to provide diverse grades of service to multimedia applications.
The Network Subsystem Layer also includes the APIs to network services, that allow user-space
applications to access data transport operations in a protocol independent manner. Several network APIs supported include the BSD sockets, System V TLI, POSIX XTI, ADAPTIVE API, x-kernel[29], NetBIOS[30], and the WINSOCK[31] library. Each of the interfaces provides both communication operations (e.g., open, close, send, recv) and addressing/naming services (e.g., address formats, name resolution and registration).
Figure 2: Communication Substrate-Dependent Level
**Media Convergence Layer**
The Media Convergence Layer provides a consistent, buffered interface to Data Transport services. It provides a basic data source/sink interface for higher layer subsystems, and uses the underlying Data Transport services to drain or replenish its internal buffering layer.
**SAP Layer:** The collection of uniform Service Access Points (SAPs) that provide a consistent interface to diverse interapplication communication services. Each SAP provides an *impedance match* between the native API provided by a given communication substrate and the basic communication service abstraction required by higher layer subsystems. There are two primary classes of SAPs, those based on duplex communication channels and those based on shared memory. SAPs based on duplex channels are required to support read, write, and connection management operations. SAPs based on shared memory must support basic attachment and detachment operations. Both classes of SAP must support a SAPAddress that can represent a communications endpoint for a given communication mechanism. This separation of addressing from basic communication operations allows a given communication mechanism (e.g., TCP) to utilize several
possible API’s (e.g., sockets, XTI) and still maintain a single abstract SAPAddress format, thus
decoupling the mechanism from the API. The uniformity of the SAP abstraction decouples the
Communication Substrate from higher layer client subsystems. SAPs currently provided include
socketSAP, TLISAP, ADAPTIVESAP, SHMSAP (Shared Memory via mmap). SAPAddresses
include TCPAddress, UDPAddress, UNIXDAddress (UNIX Domain Sockets), ADAP-
TIVEAddress, and SHMAddress.
buf Layer: The collection of transparent buffer managers that provide an efficient buffering
scheme to the SAPs provided above. The buf layer is necessary to reduce the number of system
calls needed to send a composite object (i.e., an object with multiple data members) and to mini-
mize the amount of redundant data copying. The buf Layer is based on and is interoperable with
the C++ iostream library[32,33], which provides two sets of abstractions:
1. streambuf — the abstraction for a consumer/producer of bytes. To extend the iostream
library to include a new I/O device or interface, one need only supply a streambuf interface
to the device, and combine it with four specific classes via object composition2 to allow
existing classes to read and write to it automatically. The iostream library that accompanies
the AT&T distribution of C++ provides streambuf interfaces to files and in-core mem-
2. iostream — the abstraction for formatted insertion and extraction of objects into/from a
stream. The base classes ostream and istream each provide the insertion (output) and
extraction (input) operators (<< and >> respectively) for each of the built-in data types sup-
ported by the language (e.g., char, int, float). istreams and ostreams must be
combined with a streambuf to provide a usable stream (e.g., istream + filebuf = ifstream, a stream that extracts objects from a file). iostreams are not only extensible
with respect to the devices they can support, but also with respect to the types of objects
they can insert or extract. User-created data types (classes) can define their own input and
output operations by overloading the insertion and extraction operators to support the new
data type.
Each available communication subsystem has a corresponding SAP and buf that accesses its ser-
dvices (e.g., socketbuf, tlibbuf, adaptivesap, shmbuf). These bufs can then be combined
with the standard istream and ostream classes to provide a formatted I/O channel, or with a
new stream class (described below) to provide an encoded I/O channel.
---
2 There are two primary mechanisms for object reuse, composition and inheritance. Composition expresses has-a relationships, where object A contains an instance of object B. Inheritance expresses is-a relationships, where object A is a (specialized) instance of object B.
**Presentation Layer**
The Presentation Layer is responsible for resolving differences in data representations between heterogeneous host architectures. It accomplishes this by translating local internal data formats into an external format that can be interpreted by the remote entity. It typically accomplishes this via one of two means:
1. *Explicit Typing* — each data object is tagged with a type identifier field that specifies the data type of the object in transit. It can then be followed by a length field that indicates the remaining number of octets (or fundamental data units). These two fields are then followed by the actual data octets. This is known as a T-L-V scheme (Tag-Length-Value) and is used as the basis for the Basic Encoding Rules of OSI ASN.1. This approach is in contrast to:
2. *Implicit Typing* — it is assumed that the receiver of the data is aware of exactly what type of data object is coming, therefore the tag field is redundant at best. This is the approach taken by XDR, a protocol that is designed to take advantage of regular data alignment and hardware-dictated formats.
[34] contains a comparison of three well-known presentation protocols (XDR, ASN.1, and Apollo NDR). The authors resolve that T-L-V encodings are more general and potentially more bandwidth efficient, yet can be more complex to process, while fixed-format encodings such as XDR are more efficient to process, only slightly less efficient with respect to bandwidth, and can provide T-L-V functionality if necessary.
---
**Figure 3: Communication Substrate-Independent Level**
stream Layer: The stream layer is used to bind the various coding schemes listed below to an I/O channel (via its corresponding buf). For a given encoding scheme Z, an izstream and ozstream are implemented, providing at least the basic insertion or extraction operators for the built-in data types. Additionally, insertion or extraction operators will be provided for the corresponding GEncode class hierarchy that defines the basic data types used by the encoding scheme. In addition to providing the capability to statically bind an encoding scheme (stream) to an I/O device (buf) via object composition, stream manipulators⁴ are provided to allow encoding schemes to be switched on the fly. By inserting (or extracting) a z_on manipulator into a stream, the previous formatting/encoding scheme is suspended and replaced with the z encoding. Inserting (or extracting) a z_off manipulator restores the original formatting/encoding scheme.
GEncode/XDR: GEncode/XDR is a class library of primitive base classes which correspond directly with the standard eXternal Data Representation (XDR)[35], as shown in Figure 4. This provides a set of Basic Encoding Rules that allow objects to be shared across diverse host platforms. For each built-in data type (e.g., char, int, double) a type-conversion operator is provided to convert between language/compiler dependent types and formats to their corresponding GEncode/XDR base class. By leveraging off of C++ type management mechanisms, the presence of the GEncode layer can be completely transparent to the application programmer. GEncode also supports collection classes (e.g., Lists, Dictionaries, Sets), C++ references, and pointers.

GEncode/ASN.1: An OSI Abstract Syntax Notation One version of GEncode/XDR. GEncode/ASN.1 is more complex than GEncode/XDR, as it must address overflow issues for all data types
---
⁴ Manipulators are “functions” that can be inserted or extracted into/from a stream. Inserting/extracting a manipulator has the effect of calling the manipulator’s corresponding function with the target stream as the function’s first argument. Manipulators allow otherwise complex encoding/formatting expressions to be written as a simple series of insertions/extractions.
(i.e., reading an 8 octet integer into a 4 octet long). Also, it is difficult to directly support the
ASN.1 notion of Set using only C++ constructs.
**GEncode/MAX:** A set of primitive base classes that represent Multimedia Activity eXtensions.
These classes allow multimedia objects and basic application activities to be represented in a host
platform independent manner. The Multimedia eXtensions we are currently implementing in-
clude support for
- 8KHz, 8 bit μ-law PCM audio
- 44.1KHz 16 bit linear PCM audio
- 44.1KHz 16 bit linear PCM audio (multi-channel)
- Indexed and Direct Color Pixmaps
- JPEG Still Image
- MPEG Motion Image
The Activity eXtensions we are currently incorporating include non-blocking and asynchronous
remote procedure calls, C++ pointer-to-member-function semantics, and language-independent
procedure name binding.
**Distribution Layer**
From the Presentation layer down, the support for distribution consists primarily of efficient
mechanisms for copying objects to and from heterogeneous systems. The Distribution layer is the
layer that creates an infrastructure for transparently migrating objects both with and without ex-
licit initiation from the programmer. The application programmer can simply specify the loca-
tion where the object should be located (if desired) and can then access the object as if it were
located in the local address space. The Distribution layer consists of the following 2 sublayers:
**Location Management Layer:** The Location Management layer orchestrates the migration of
objects based on both explicit (i.e., the object's existOn member function is explicitly invoked)
and implicit (i.e., a member function declared as remote is invoked) events. Object locations are
managed through the use of:
1. **Distributed Instantiation** — a technique where by overloading the language's `new` and de-
lete operators, objects can be instantiated on remote hosts simply by passing an addition-
al argument to the `new` operator. Calls to `new` without this parameter are routed to the
standard `new` operator that allocates the object locally. The dereference operators (-> and
*) automatically dispatch member calls and accesses to the appropriate host (local or re-
mote).
2. **Distributed Collection Classes** — a set of SmallTalk-like collection classes that allow a
collection of objects to be distributed across the network. Distributed Iterators are used to dispatch member function calls to multiple objects in a collection at once. Additionally, these classes can work in tandem with the ADAPTIVE system to perform prefetching of related objects to compensate high latency environments. When used with ADAPTIVE, these classes also allow application-based delivery of objects to the application (e.g., potentially out-of-order, via upcalls, etc.).
Remote Delegation Layer: The Remote Delegation layer manages and execution of application object's member functions and arbitrates multiple accesses to a single object. By using a technique called remote delegation, an object's member functions are automatically invoked on the proper host system without programmer intervention. This is accomplished via two mechanisms: (1) overloading the dereference operators (-> and *) to transparently dispatch member function calls, and (2) passing an additional remote parameter to the member function, which allows member functions to be executed on arbitrary hosts, not just the local host or the host the object actually exists on.
IV Object/Class Relationships
As the previous section describes, the G/MAX system consists of multiple hierarchies of C++ classes. This section provides concrete examples using the G/MAX system to illustrate the relationships among these class and object hierarchies. Figure 5 illustrates one vertical slice of the
system. The configuration shown uses the BSD socket facilities for IPC, and the Sun XDR encoding for presentation formatting. The \texttt{xdrstream} and \texttt{sockbuf} classes can bind the XDR protocol to the underlying communication channel either \textit{statically} though composition and inheritance, or \textit{dynamically} via the use of stream manipulators.
Figure 6 shows the detailed class and object hierarchy used to compose an \texttt{xsockstream}, a static binding of XDR to a buffered socket. The \texttt{has-a} relationships are via object composition, and the \texttt{is-a} relationships are via inheritance. The \texttt{sockbuf} uses its \texttt{socksap} data member as a uniform interface to the BSD send and recv calls. The \texttt{sockstreambase} acts as a virtual base class for all stream classes that will use \texttt{sockbuf} services. \texttt{sockstreambase} adds a consistent mechanism for error handling and exports the \texttt{sockbuf}'s connection management interface to derived stream classes. The classes \texttt{ixdrstream} and \texttt{oxdrstream} provide the insertion (\texttt{<<}) and extraction (\texttt{>>}) operators for both the set of built-in types (\textit{e.g.,} \texttt{int}, \texttt{float}) and the set of GEncode/XDR (\textit{e.g.,} \texttt{SignedInteger}, \texttt{FloatingPoint}) classes.
The classes just described are used in both the static and dynamic cases. To statically bind the XDR encoding to a buffered socket, the classes \texttt{ixsockstream}, \texttt{oxsockstream}, and \texttt{xsockstream} are used. \texttt{ixsockstream} is a class that is derived from both \texttt{sockstreambase} and \texttt{ixdrstream}. Using multiple inheritance, the new subclass inherits the behavior and interfaces of both superclasses (\textit{e.g.,} the communication management is inherited from \texttt{sockstreambase}, and the XDR decoding is inherited from \texttt{ixdrstream}). It is this relationship that allows the encodings for a given data type to be defined exactly once (for \texttt{ixsockstream}) and to be inherited by all subsequent \texttt{ix*streams}, irrespective of the underlying communication mechanism. The \texttt{oxsockstream} is derived in a similar manner.
Finally to create a class capable of both sending and receiving XDR encoded data, the class \texttt{xsockstream} is composed of both an \texttt{ixsockstream} and an \texttt{oxsockstream}. It should be noted that for each \texttt{xsockstream} there is exactly one \texttt{sockbuf}. This is because the \texttt{sockstreambase} is a \textit{virtual base class} to the classes \texttt{ixsockstream} and \texttt{oxsockstream}.
---
\footnote{Virtual base classes allow multiple inheritance hierarchies to safely take the form of a DAG as well as a tree. If class A is a virtual base of classes B and C, and classes B and C are bases of class D, D will only have one instance of class A. If class A were a non-virtual base, objects of type D would have two instances of A, one for B and one for C.}
Listing 1 demonstrates the steps necessary to encode a user defined type using xdrstreams. In the class declaration file (foo.h), one preprocessor macro declares the insertion and extraction operators and grants them friend status. As the operators are not actually member functions, friend status is necessary to access the class’s private and protected data structures. The class definition file (foo.cp) requires a single macro to actually implement the insertion and extraction operators. Preprocessor macros are used to avoid the overhead of an additional preprocessor step (G/MAK macros are expanded by the standard C preprocessor). The form of the macros used is consistent enough to warrant a dedicated preprocessor, however, macros are used while the system is being developed to reduce initial development time. The use of macros also provides flexibility, as the programmer can choose to implement the insertion and extraction operators by hand, allowing selective transmission of each data member. Additionally, when used with the ADAPTIVE system, the programmer will be able to explicitly specify both what sub-stream a given data member will be transmitted on as well as what mechanisms will be used to send the member.
Listing 1: foo.h/foo.cp
Listing 2 demonstrates the use of sockstreams both statically and dynamically bound to the XDR format. The object nstr is a sockstream that uses the standard istream and ostream classes that accompany the standard iostream library. Objects sent via nstr would be formatted as ASCII text. The object xnstr is an xsockstream that uses the ixdrstream and oxdrstream classes that use XDR for data encoding. The use of C++'s strong typing and operator overloading allows the appropriate insertion or extraction operator to be selected based on the type of stream being used.
As xnstr is statically bound to XDR, all objects inserted (sent) to it will be formatted using XDR. To send objects via nstr using XDR, the xdr_on manipulator must be used to transform the type of nstr from sockstream to xsockstream. This transformation is temporary, and can be defeated either by an accompanying xdr_off manipulator or upon complete evaluation of the expression. Due the strong typing in C++, unbalanced x_on and x_off manipulators are detected at compile time. As xnstr is statically bound to XDR, no manipulator is necessary. The use of the flush manipulator in both statements is needed to force the stream to send the
## Experimental Results
This section describes the results from a series of benchmarks of the system described in the previous section compared to several alternative solutions. The experiments were designed to illustrate the performance of the Presentation and Media Convergence layers described above. The first set of benchmarks measures the presentation formatting costs relative to memory bandwidth. The second set measures the end-to-end throughput when moving data between two hosts. Both sets of measurements were taken using Sun SPARCstation 2 workstations on an idle Ethernet.
All tests were performed using the class `foo` that appears in the previous examples. The first five data members were held constant, occupying 24 bytes when encoded. The sixth member ranged from a 16 bytes to 4K bytes and was represented as a fixed length opaque data object. This mix was chosen to exercise a variety of data conversions as well as allow some opaque data transfer.
### Processing Costs vs. Memory Bandwidth
As has been described in this paper, there are many classes and objects that must participate in the transmission or reception of a given object. This series of measurements compares the processing costs of using XDR streams to Sun's XDR library. Additionally, a version that simply copies an equivalent number of bytes of data from one location to another is included for comparison. The test program XDR-encoded an array of up to 1024 `foo` objects into a 1MB array of memory. The decoding of the array was measured as well, and the results of encoding and decoding were averaged. The behavior of the test program is similar to the behavior of a multimedia application that...
might read data from a codec and write/encode it to either the playout buffer or a network connection.

**Figure 7: Processing vs. Copying Costs**
Figure 7 illustrates the impact of formatting cost on throughput. When the data size is sufficiently large, the raw copy throughput approaches the memory bandwidth of the host. While both encoding mechanisms were less efficient than no processing at all, the relative performance of the two implementations varies by only +/- 6.3%. As the size of the opaque data member increases, the closer throughput of the two schemes approaches that of no processing whatsoever.
This result demonstrates that the processing costs incurred by the additional flexibility and abstraction of xdrstreams is only marginally greater than that incurred by the SunRPC library.
**End-to-end Performance**
To observe end-to-end performance, both the xdrstream and sockbuf/socksap classes are used. To reduce the system call overhead incurred per object transmission and reception, objects are passed to and from the kernel eight at a time. A larger number was not chosen to reduce the latency introduced by buffering. This required exactly one line of application code for the sockbuf, and was hand coded for the Sun XDR and raw cases.
As Figure 8 illustrates, the xdrstream outperforms the Sun implementation by an average of 3.4%. This is despite the fact that the Sun implementation was on average 1.2% faster than xdrstreams. This is due to the fact that the sockbuf/xdrstream combination are more tightly coupled than the hand-coded buffering scheme and Sun XDR, and thus (a) the compiler is able to make better optimizations, and (b) the behavior of the sockbuf is better suited to receiving partial data objects.
VI Conclusions
This paper has outlined a new architecture for the design and implementation of high performance distributed applications. We have verified experimentally that despite the additional flexibility they offer the application designer, the abstractions chosen for the Presentation and Media Convergence layers implemented thus far impose no performance penalty over production systems currently used. We are currently implementing the ADAPTIVE system as the Data Transport service, and are experimenting with a prototype of the Distribution layer described above which should offer higher application throughput than traditional techniques.
VII References
|
{"Source-Url": "https://escholarship.org/content/qt5zk7m23z/qt5zk7m23z.pdf?t=qf0byv", "len_cl100k_base": 6870, "olmocr-version": "0.1.53", "pdf-total-pages": 24, "total-fallback-pages": 0, "total-input-tokens": 43050, "total-output-tokens": 9673, "length": "2e12", "weborganizer": {"__label__adult": 0.00034165382385253906, "__label__art_design": 0.00034236907958984375, "__label__crime_law": 0.0002872943878173828, "__label__education_jobs": 0.00042128562927246094, "__label__entertainment": 8.83936882019043e-05, "__label__fashion_beauty": 0.00014328956604003906, "__label__finance_business": 0.00025534629821777344, "__label__food_dining": 0.0003273487091064453, "__label__games": 0.0004754066467285156, "__label__hardware": 0.002346038818359375, "__label__health": 0.00047516822814941406, "__label__history": 0.0003228187561035156, "__label__home_hobbies": 8.738040924072266e-05, "__label__industrial": 0.000507354736328125, "__label__literature": 0.00020933151245117188, "__label__politics": 0.0002448558807373047, "__label__religion": 0.0004811286926269531, "__label__science_tech": 0.061614990234375, "__label__social_life": 7.301568984985352e-05, "__label__software": 0.01007843017578125, "__label__software_dev": 0.919921875, "__label__sports_fitness": 0.0002772808074951172, "__label__transportation": 0.0006661415100097656, "__label__travel": 0.00023365020751953125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 39722, 0.02616]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 39722, 0.50781]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 39722, 0.88333]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 411, false], [411, 411, null], [411, 1619, null], [1619, 4630, null], [4630, 7963, null], [7963, 11070, null], [11070, 12653, null], [12653, 14348, null], [14348, 17146, null], [17146, 18738, null], [18738, 21027, null], [21027, 23381, null], [23381, 24860, null], [24860, 27881, null], [27881, 29115, null], [29115, 30351, null], [30351, 32046, null], [32046, 33356, null], [33356, 34940, null], [34940, 37229, null], [37229, 39446, null], [39446, 39722, null], [39722, 39722, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 411, true], [411, 411, null], [411, 1619, null], [1619, 4630, null], [4630, 7963, null], [7963, 11070, null], [11070, 12653, null], [12653, 14348, null], [14348, 17146, null], [17146, 18738, null], [18738, 21027, null], [21027, 23381, null], [23381, 24860, null], [24860, 27881, null], [27881, 29115, null], [29115, 30351, null], [30351, 32046, null], [32046, 33356, null], [33356, 34940, null], [34940, 37229, null], [37229, 39446, null], [39446, 39722, null], [39722, 39722, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 39722, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 39722, null]], "pdf_page_numbers": [[0, 0, 1], [0, 411, 2], [411, 411, 3], [411, 1619, 4], [1619, 4630, 5], [4630, 7963, 6], [7963, 11070, 7], [11070, 12653, 8], [12653, 14348, 9], [14348, 17146, 10], [17146, 18738, 11], [18738, 21027, 12], [21027, 23381, 13], [23381, 24860, 14], [24860, 27881, 15], [27881, 29115, 16], [29115, 30351, 17], [30351, 32046, 18], [32046, 33356, 19], [33356, 34940, 20], [34940, 37229, 21], [37229, 39446, 22], [39446, 39722, 23], [39722, 39722, 24]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 39722, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
04bfd7611bcb839be7782990876785257fe388ce
|
Open Archive TOULOUSE Archive Ouverte (OATAO)
OATAO is an open access repository that collects the work of Toulouse researchers and makes it freely available over the web where possible.
This is an author-deposited version published in: http://oatao.univ-toulouse.fr
Eprints ID : 13145
To cite this version: Touei, Antoine and Broisin, Julien and Sibilla, Michelle. Reconfiguration patterns for Goal-Oriented Monitoring Adaptation. (2014) In: The 6th International Conferences on Pervasive Patterns and Applications (PATTERNS 2014), 25 May 2014 - 29 May 2014 (Venise, Italy).
Any correspondance concerning this service should be sent to the repository administrator: staff-oatao@listes-diff.inp-toulouse.fr
Reconfiguration Patterns for Goal-Oriented Monitoring Adaptation
Antoine Touieir, Julien Broisin, Michelle Sibilla
IRIT, Université Paul Sabatier
Toulouse, France
Email: {touieir, broisin, sibilla}@irit.fr
Abstract—This paper argues that autonomic systems need to make their distributed monitoring adaptive in order to improve their “comprehensive” resulting quality: that means both the Quality of Service (QoS), and the Quality of Information (QoI). In a previous work, we proposed a methodology to design monitoring adaptation based on high level objectives related to the management of quality requirements. One of the advantages of adopting a methodological approach is that monitoring reconfiguration will be conducted through a consistent adaptation logic. However, eliciting the appropriate quality goals remains an area to be investigated. In this paper, we tackle this issue by proposing some monitoring adaptation patterns falling into reconfiguration dimensions. Those patterns aim at facilitating the adaptation design of monitoring behavior of the whole set of distributed monitoring modules part of autonomic systems. The utility of those patterns is illustrated through a case-study dealing with monitoring adaptation based on high level quality objectives.
Keywords—Quality requirements; adaptive monitoring; autonomic systems; goal-oriented adaptation.
I. INTRODUCTION
Autonomic systems that are implemented by virtue of their four characteristics self-configuration, self-optimization, self-healing and self-protection, are serving the ultimate objective of making them self-managed to achieve high level objectives [1]. These objectives are strongly related to the quality level provided by autonomic systems. When large and complex systems are targeted, the self-management characteristic (self-* ) is a key issue to deal with. Basically, self-management is thought as the auto-adaptation capability that brings the system to reach an absolute or preferable state. Concretely, the four self-* characteristics are realized by implementing the Monitoring, Analyzing, Planning, Executing - Knowledge (MAPE-K) loop modules. This implementation is either embedded within a resource, or distributed over several resources.
In MAPE-K loop, the monitoring module plays a crucial role, since wrong decisions might be taken by the analyzing & planning modules. Therefore, autonomic systems need to ensure the quality of information (e.g., correctness, freshness, timeliness, accuracy, etc.) exposed by the distributed monitoring modules. Moreover, within autonomic systems, monitoring is usually QoS-oriented. Thus, the services implemented by the functional system must respect the required QoS level that is determined through the Service Level Agreements (SLAs) that have been agreed with clients. Since the management system (managing the functional system) could provide the possibility to renegotiate or modify the QoS specification afterward, the monitoring system has to adapt its behavior according to these new requirements and constraints.
To summarize, the monitoring of autonomic systems has to be capable of configuring the underlying mechanisms carrying the monitoring functions (e.g., measuring, gathering, calculating, evaluating, filtering, etc.) starting from QoS specification, as well as reconfiguring those mechanisms based on quality requirements.
Most of the time, reconfiguration is held through ad-hoc logic (proposing solutions for particular scenarios dealing with specific issues). But, this approach is not suitable for reuse in other scenarios, and it also does not satisfy high level objectives extended over the whole scale of the autonomic system. To overcome these issues, we adopted a Requirements Engineering methodology to design monitoring adaptation; it starts from high level goals, and ends up with the configuration of monitoring mechanisms [2].
Right now, the key question is: how to identify goals representing the ”starting point” for deriving monitoring (re)configuration? In other words, reconfiguration questions such as: why to delay launching some monitoring mechanisms? Why to substitute remote agents? How to aggregate alarms? What determines the monitoring of this set of metrics and not another one? Why to exchange metrics among distributed management entities? This paper deals with these questions by proposing monitoring adaptation patterns that assist human administrators in designing meaningful adaptations and thus increase the overall quality of the autonomic systems.
The work presented here relies on both a 3-layered adaptive monitoring framework [3][4][5] and our goal-oriented adaptation methodology [2]. We pursue this research by focusing on adaptation patterns dedicated to the identification of high level goals, together with their refinement. The paper is organized as follows: the next section gives an overview of the studied monitoring framework; the monitoring adaptation patterns are discussed in Section III; and then applied to a case-study in Section IV; before concluding, Section V enumerates other monitoring adaptation approaches and points out their weaknesses.
II. THE STUDIED ADAPTIVE MONITORING FRAMEWORK
Our approach is based on a 3-layered framework [3][4][5] illustrated in Figure 1, and defines three fundamental capabilities required to control monitoring: being configurable, adaptable and governable.
The **configurability layer** relies on the Distributed Management Task Force (DMTF) Common Information Model (CIM) standard. In addition to the managed resources, this low level layer aims at representing the metrics [6] and the gathering mechanisms [3] that are required to monitor both the QoS provided by the functional system and the QoI of the monitoring system itself; this layer deals with both mechanisms. The **adaptability layer** provides an interface encapsulating operations to be applied on the lower layer models. Thus, the behavior of the underlying monitoring mechanisms will be reconfigured during runtime by invoking these operations. Finally, the **governability layer** is the top level layer representing the “intelligence” of the monitoring adaptation. To express the quality requirements, it uses Event/Condition/Action (ECA) policies to describe when and how adaptation should take place, that is, in which contexts those operations of the adaptability layer should be invoked.
We exploit the Requirements Engineering (RE) to propose monitoring adaptation methodology, and to build configurability and adaptability models [2]. RE iterates activities of “eliciting, evaluating, documenting, consolidating and changing the objectives, functionalities, assumptions qualities and constraints that the system-to-be should meet based on the opportunities and capabilities provided by new technologies” [7]. Keep All Objectives Satisfied (KAOS) is adopted as RE goal-oriented method, due to its formal assertion layer that proves correctness and completeness of goals [8]. In KAOS, the system-to-be is divided into various models. **Goals Model** determines the objectives to be realized through that system (e.g., minimizing monitoring cost). **Agents Model** comprises the agents (e.g., human automated component) responsible for realizing the refined elicited goals. Notice that the term **Agents** in networks and systems management represents entities responding to management requests coming from other management entities called **Managers**; therefore, the term **Agent** in RE has a different meaning. **Operations Model** deals with the internal operations to be carried by agents (e.g., updating polling period). **Object Model** identifies the system-to-be objects (e.g., entities, agent, relationships).
Therefore, based on KAOS, our methodology identifies the high level quality objectives the monitoring framework carries on. By iterating a refinement process, we finally identify what it is called **leaf goals** or **requirements** (see Figure 1). Once the leaf goals are determined, both policies (to be inserted into the governability layer) and agents (invoking operations of the adaptability layer) will be recognized. Thus, monitoring system adaptation is automatically handled. However, human administrators have to manually identify the leaf goals according to the high level objectives they want to reach. To facilitate this task, we conducted an investigation about the monitoring aspects that could be subject to adaptation. As a result, we have identified various leaf goals belonging to four dimensions (i.e., Spatial, Metric, Temporal, Exchange) [2]. Hereafter, in the next section, we propose monitoring adaptation patterns falling into those dimensions.
### III. Monitoring Adaptation Patterns
With regard to the refinement process, besides the basic AND/OR-decompositions, we rely on some predetermined correct and complete refinement patterns proved mathematically [9]. Those patterns refine **Achieve** goals of the form $P \Rightarrow \diamond Q$ (see Table 1), and are written in **Linear Temporal Logic (LTL)** classical operators where $\diamond$, $\square$ and $\wedge$ mean some time in the future, always in the future, and always in the future unless, respectively. Starting from a given goal ($P$), **milestone pattern** identifies one (various) intermediate goal(s) ($R, [...]$) that must be reached orderly before reaching the ultimate one ($Q$). Rather, **case pattern** identifies the set of different and complete cases ($P_1, P_2$) for reaching final goals ($Q_1, Q_2$) that OR-decompose the ultimate goal ($Q$). Finally, the **guard pattern** requires the recognition of a specific condition ($R$) before achieving the ultimate goal ($Q$).
In order to clarify the exploitation contexts, pattern goals and requirements, as well as some application situations, our pattern structure encompasses: context, pattern refinement, and examples. Notice that we are focusing on adaptation actions taken at the autonomic manager side only. Thus, investigating adaptations at the agent side is out of scope. In addition, the patterns are refined using KAOS graphical language [7].
#### A. Exchange Dimension Pattern
**Context.** Relying on IBM blueprint reference architecture [10], autonomic systems could distribute self-management (MAPE) loops over multiple collaborating autonomic managers. Each of them is responsible for managing a particular scope of managed resources. Patterns belonging to this dimension are useful to overcome metrics gathering/delivering problems. Those problems could manifest either on metrics values, reliability of communication between information sources & destinations, or even on their trustworthiness.
Pattern Refinement. Communications inside autonomic systems could be classified according to the entities involved in information exchange (i.e., managers, agents, shared databases). Therefore, we identify three communication classes: Manager-2-Agent, Manager-2-Manager, and Manager-2-Shared Database (see Figure 1). Besides identifying communication classes, we need to deal with pull & push communication modes. In pull, the entity needing information solicits the one possessing it, which responds with the queried information; where in push, the entity possessing the information reports it to other entities. By taking into consideration push and pull modes, along with previous communications classes, we use case pattern for the first two refinement levels to cover all possible cases (see Figure 2).
Based on the triplet (Information Source, Communication Protocol, Information Destination), the Manager-2-Agent pull mode will be OR-decomposed into Substitute Agent and Substitute Protocol leaf goals. Rather, Substitute Protocol and Substitute Destination OR-decompose both Manager-2-(Manager/Shared DB) push mode. Besides, Activate/Deactivate Polling & Exporting leaf goals are elicited to launch and stop polling & exporting.
Notice that in both Manager-2-(Manager/Shared DB) push mode communications, the manager responding to requests is considered as agent (because it is the information source); therefore, this case becomes identical to Manager-2-Agent pull mode. Moreover, adaptation actions related to Manager-2-Agent push mode are not treated because they need to be held at the agent side.
Examples. This pattern is suitable for the following cases: (1) Increasing accuracy or precision of pulled/pushed metrics values, by replacing information source. (2) Querying more available agents, or blocking fake agents trying to integrate the distributed management system. (3) Securing the communication between information sources and destinations. (4) Modifying information destination when changing the topology of collaborating autonomic managers.
B. Metric Dimension Pattern
Context. The main idea behind building autonomic systems is to delegate decisions, that human administrators are used to make, to autonomic systems themselves. Thus, to be able to make "wise" decisions, the monitoring system needs to instrument specific metrics that could be activated/deactivated according to the management needs during runtime. Patterns belonging to this dimension are useful to control the trade-off between constructing more knowledge and monitoring the information that is necessary for management.
Pattern Refinement. Metric instrumentation must be thought at the whole management system level. In other words, a given autonomic manager could activate/deactivate instrumentation of particular metrics, but when deactivating metrics on that manager, it doesn’t mean necessarily that those metrics are "abandoned", because they could be transferred to other collaborating autonomic manager on which they are activated. These two cases are OR-decomposing the first refinement level (see Figure 3).
Regarding metrics manipulation inside an autonomic manager, the second refinement level uses case pattern to cover metric classes. Our research exploits both CIM Metric Model classifying metrics into Base, Discrete & Aggregation, as well as our mathematical extension [6] classifying base metrics into Resource, Measurable & Mathematical. Each of these classes is OR-decomposed using Add Aspects and Remove Aspects leaf goals. On the other hand, the transfer of metrics among autonomic managers could be refined through milestone pattern, when metrics are activated on the collaborating manager (Add Aspects in Figure 3, as Subgoal 1 in Table I) first, and then removed from the delegating one (Remove Aspects, as Subgoal 2).
It is worth noting that previously mentioned aspects are representing "metric definitions", rather than "metric values". The former encompasses attributes related to the nature of metric (e.g., data type, unit), where the latter attributes describe the instrumented values and their relevant contexts. For further information, the reader is referred to the DMTF Base Metric Profile [11].
Examples. This pattern can be applied in the following cases: (1) Performing troubleshooting, or applying root cause analysis algorithms, because they require the instrumentation of additional metrics. (2) Modifying the hierarchical topology of the management system by instrumenting aggregated metrics to be exported to other managers or shared DBs. (3) "Engineering" the distribution of monitored metrics among autonomic managers.
C. Spatial Dimension Pattern
**Context.** As mentioned earlier, in an autonomic system, each manager is responsible for managing a set of managed resources. In many cases, the number of users consuming the autonomic system services may oscillate rapidly, or even become quite important in term of size. Thus, managed resources are subject to be joined/withdrawn during the runtime. Patterns belonging to this dimension are useful to react in regard with important changes concerning the scope of managed resources.
**Pattern Refinement.** Management of autonomic systems is orchestrated by the collaboration of multiple autonomic managers, each of which can act on its own perimeter, as well as the perimeters of its collaborating peers. Thus, the first refinement level uses case pattern to cover these two cases (see Figure 4).
In fact, acting on its own perimeter is OR-decomposed using Expand and Shrink Monitoring Perimeter leaf goals. Rather, acting on others perimeters is refined using case pattern into deploying a new manager, or soliciting an existing one. First, the case of deploying a new manager is refined using milestone pattern into launching manager (Launch Delegated Manager in Figure 4, as Subgoal 1 in Table I), and then, delegating perimeter (Delegation, as Subgoal 2). In turn, the delegation goal is also refined though milestone pattern into joining delegated perimeter on the delegated manager (Expand Perimeter, as Subgoal 1), and then, deleting this perimeter from the delegating manager (Shrink Perimeter, as Subgoal 2).
In the second case, where acting is held on an existing manager, the refinement is done twice, first using milestone pattern, into delegating the whole perimeter to the delegated manager (Delegation, as Subgoal 1), and then shutting down the delegating one (Shutdown Delegating Manager, as Subgoal 2).
**Examples.** This pattern is suitable for the following cases: (1) Load balancing of monitoring among autonomic managers. (2) Supporting scalability of the autonomic systems. (3) Minimizing the overall monitoring charge in terms of dedicated monitoring entities.
D. Temporal Dimension Pattern
**Context.** Temporal aspects are decisive factors in adapting monitoring behavior. Notice that previous patterns are explained without time considerations, but in fact, they imply some temporal aspects. Patterns belonging to this dimension are useful either to overcome both temporal violations and scheduling problems, or to tune the analysis over the instrumented metrics.
**Pattern Refinement.** Regarding information exchange, once again, we use case pattern to represent the same cases identified in Exchange dimension. Obviously, dealing with information exchange temporal aspects means that the exchange is done iteratively and not once. Thus, Manager-2-Agent case is OR-decomposed into periodic poll, and both Manager-2-(Manager/Shared DB) cases are OR-decomposed into periodic export (see Figure 5). Note that Manager-2-Agent push mode and Manager-2-(Manager/Shared DB) pull mode are not mentioned for the reasons explained in Section III-A.
We distinguish two levels of temporal granularity: the fine-grained level deals with an individual polling (exporting), whereas the coarse-grained level addresses a collective polling (exporting). Based on this distinction, we identify six leaf goals OR-decomposing periodic poll (export), namely: Update Polling (Exporting) Period to update the frequency of a given polling (exporting), Align Polling (Exporting) to launch a set of synchronized parallel pollings (exported metrics) at the same time, and Malign Polling (Exporting) to launch pollings (exported metrics) according to a given/adjustable offset.
Regarding metrics calculation, we identify the case of modifying the temporal interval covered by the metric value. However, the validity of a metric value that is not instantaneous (e.g., throughsput) is equal to the temporal interval through which that value was measured. Therefore, case pattern is used twice to cover all possible metric classes previously mentioned. We refine only the measurable, mathematical & aggregation metrics, because time has a sense in their calculation, but not the other metrics. Thus, at the fourth refinement level, we OR-decompose measurable & mathematical metrics using Update Time Scope Interval. Rather, Update Time Series Interval OR-decomposes aggregation metrics.
**Examples.** This pattern is suitable for the following cases: (1) Controlling (e.g., relaxing, stressing) the monitoring load on autonomic managers, network paths among autonomic managers and shared DBs, as well as remote agents. (2) Tuning temporal parameters of metrics analysis.
Notice that all previous patterns are subject to be updated and enriched, in order to integrate new monitoring adaptation actions. For instance, we can address temporal aspects of alarms filtering by delaying delivery of redundant alarms [12], as well as alarms correlation by modifying the stream interval time during which Complex Event Processing engines (e.g., Esper & Drools) perform correlations. OR-decomposition into Update Waiting Time & Update Window Time could be used for these two cases respectively.
IV. CASE-STUDY
**Context.** Our scenario takes place in a cloud data center hosting a large number of virtual machines (VMs), and pro-
viding its clients with a continuous monitoring of the enforced SLAs metrics. Each VM integrates an agent providing pre-determined metrics reflecting VM healthiness. In most large scale systems, distributed agents periodically push their metrics; in our case, agents push those metrics every 10 seconds to specific pre-configured autonomic managers. To facilitate the case-study, we assume that our studied SLA template encapsulates the same metrics pushed by agents. Besides, this SLA template distinguishes two time-slots: metrics are to be refreshed at the client side with a freshness falling into the range of 3-6 seconds during the first time-slot, and a range of 30-40 seconds for the second one. The SLAs metrics values are instrumented and delivered automatically through polling and exporting, respectively. Once a new SLA is enforced, the autonomic managers use pull mode to collect VMs metrics with the lowest freshness value (3 seconds).
**Objectives.** Human administrators identify two high level goals to be satisfied during the monitoring system runtime: *Respect Metrics Freshness* makes sure that SLAs are monitored appropriately, and *Minimize Monitoring Cost* aims at limiting the resources dedicated to monitoring as much as possible.
**Patterns.** We can exploit several patterns to deal with the first objective. During the first time-slot, we use the *temporal pattern* to relax polling & exporting by updating their periods (*Update Polling & Exporting Period* in Figure 6) with respect to the highest freshness range (6 seconds). If delivering freshness violates the highest freshness, that would be a result of overloading manager [2], thus we apply the *spatial pattern* as a second alternative, and consequently, a new autonomic manager will be deployed to assist the overloaded one (*Launch Delegated Manager, Expand Perimeter & Shrink Perimeter*). As a third alternative, and in case that the overloaded autonomic manager monitors non-SLAs metrics (*e.g.*, physical servers healthiness), the *metric pattern* could be applied to transfer them to other manager, in order to relax the first one (*Add & Remove Aspects*). Since the second time-slot freshness (30-40 seconds) is greater than agents push period (10 seconds), there is no need to poll metrics, nor to export all received metrics. Rather, we apply the *temporal pattern* to update the exporting period from 3-4 to 30-40 seconds (*Update Exporting Period*). This adaptation necessitates applying another one belonging to exchange pattern to stop the pollings that are launched during the first time-slot (*Deactivate Polling*).
The second objective is refined using *spatial pattern* in order to shutdown recently deployed managers, during the first time-slot. Thus, an underloaded manager delegates its whole perimeter to another one, and shuts downs itself (*Expand Perimeter, Shrink Perimeter & Shutdown Delegating Manager*). During the second time-slot, autonomic managers already deliver to clients around one-third of the metrics pushed by agents, thus no adaption actions are to be taken in regard with minimizing monitoring resources.
Autonomic managers would be able to adapt their monitoring, if they recognize adaptation stimulis. Therefore, we exploit *guard pattern* to apply adaptation actions (*Adaptation in Figure 6, as Subgoal 2 in Table I*) as response to specific stimulus (*Guard, as Subgoal 1*), while maintaining the current monitoring behavior unless adaptation takes place (*Unless, as Subgoal 3*).
**V. RELATED WORK**
In this section, we try to align our approach of adapting monitoring using goal-oriented dimensional patterns with other existing trends focusing on monitoring of QoS in autonomic systems [13][14][15][16][17][18].
In order to manage QoS in autonomic systems, the latter applies adaptation actions. In many cases, for instance [13], this adaptation doesn’t concern the monitoring system itself, but precisely, is applied on the managed system services and infrastructure (*i.e.*, reconfiguring resource allocation). Certainly, this adaptation will result in increased quality, but this way, the knowledge of the management system won’t exceed a “maximum ceiling” and management will be limited in terms of treating new situations.
Monitoring more metrics or managed resources is addressed in [14][16][18] either to deal with the managed scope changes, or to operate a ”minimal” monitoring that is able to be extended in case of SLA violations, or even to adapt monitoring to meet SLA modifications. Indeed, it is important to scale up/down monitored metrics and resources. But it isn’t clear whether this capability could be applied in other scenarios for other objectives, if so, how that could be feasible.
Runtime deployment of monitoring resources (*i.e.*, managers, probes) is discussed in [14][15][17] either to integrate monitoring into the SLA management life-cycle of large scale systems, or to replace failed managers, or even to monitor some metrics concerning particular paths or segments. But here also, besides the undeniable gains of deploying monitoring resources during runtime, we don’t see how the system administrators can orchestrate the monitoring adaptation (*i.e.*, planning & executing) of the distributed monitoring among several collaborating managers.
Inspired from the autonomic computing reference architecture proposed in [10], patterns regarding the distribution of the MAPE loop modules were proposed in [19][20]. Those patterns are useful in terms of design reuse as well as clarifying...
the application contexts and benefits, but they target mainly the deployment of the monitoring modules rather than the monitoring behavior itself. In addition, they don't treat the monitoring adaptation in regard with quality requirements.
VI. CONCLUSION & PERSPECTIVES
We proposed a goal-oriented approach for designing self-managed monitoring in autonomic systems. This approach assists human administrators to adapt the monitoring system behavior regarding quality requirements. It means that monitoring is configured starting from quality specification (e.g., SLA), and reconfigured based on adaptation patterns, that are exploited to achieve high level quality objectives. We designed four monitoring adaptation patterns according to dimensions that represent various aspects on which adaptation actions can apply to. Thus, each dimension represents a "starting point" reflection to elicit monitoring goals that are refined till reaching leaf goals.
About perspectives, we are currently investigating how monitoring adaptations could influence the stability at the autonomic system whole scale, in case of applying many overlapped adaptation leaf goals over several autonomic managers. In addition, the agent side adaptations need to be investigated, and orchestrated with those applied at the autonomic manager side.
REFERENCES
|
{"Source-Url": "http://oatao.univ-toulouse.fr/13145/1/toueir_13145.pdf", "len_cl100k_base": 5551, "olmocr-version": "0.1.49", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 22485, "total-output-tokens": 7320, "length": "2e12", "weborganizer": {"__label__adult": 0.0003275871276855469, "__label__art_design": 0.0006160736083984375, "__label__crime_law": 0.00040793418884277344, "__label__education_jobs": 0.00283050537109375, "__label__entertainment": 0.0001468658447265625, "__label__fashion_beauty": 0.0002237558364868164, "__label__finance_business": 0.0007328987121582031, "__label__food_dining": 0.0003342628479003906, "__label__games": 0.0006589889526367188, "__label__hardware": 0.001312255859375, "__label__health": 0.0007610321044921875, "__label__history": 0.000499725341796875, "__label__home_hobbies": 0.00012803077697753906, "__label__industrial": 0.0005846023559570312, "__label__literature": 0.0006570816040039062, "__label__politics": 0.00042939186096191406, "__label__religion": 0.0005183219909667969, "__label__science_tech": 0.276123046875, "__label__social_life": 0.00019156932830810547, "__label__software": 0.033050537109375, "__label__software_dev": 0.67822265625, "__label__sports_fitness": 0.00022208690643310547, "__label__transportation": 0.0005564689636230469, "__label__travel": 0.0002582073211669922}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 32559, 0.02282]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 32559, 0.31478]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 32559, 0.89494]], "google_gemma-3-12b-it_contains_pii": [[0, 711, false], [711, 6133, null], [6133, 11437, null], [11437, 16107, null], [16107, 21469, null], [21469, 27036, null], [27036, 32559, null]], "google_gemma-3-12b-it_is_public_document": [[0, 711, true], [711, 6133, null], [6133, 11437, null], [11437, 16107, null], [16107, 21469, null], [21469, 27036, null], [27036, 32559, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 32559, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 32559, null]], "pdf_page_numbers": [[0, 711, 1], [711, 6133, 2], [6133, 11437, 3], [11437, 16107, 4], [16107, 21469, 5], [21469, 27036, 6], [27036, 32559, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 32559, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
5bafdf4b185b746fdd9846815432b125fa48f29a
|
Abstract
This paper describes an environment for internet-based collaboration in the field of design and test of digital systems. Automatic Test Pattern Generation (ATPG) and fault simulation tools at behavioral, logical and hierarchical levels available at geographically different places running under the virtual environment using the MOSCITO system are presented. The interfaces between the integrated tools and also commercial design tools were developed. The tools can be used separately, or in multiple applications in different design and test flows. The functionality of the integrated design and test system was verified in several collaborative experiments over internet by partners locating in different geographical sites.
1. Introduction
In the field of digital design, system-on-chip (SoC) technology is becoming state-of-the-art. The design of SoCs raises a lot of EDA problems: HW/SW codesign, high-level synthesis, testability evaluation, test pattern generation, fault simulation, physical defect analysis with respect to the whole system to be integrated. Usually not all the needed EDA tools are available for a designer in his working site.
The Internet opens a new dimension, and offers new chances using tools from different sources. The basic idea of this paper aims at exploiting an Internet-based tool integration. For that purpose several design and ATPG tools implemented at geographically different places were successfully integrated into the new virtual environment MOSCITO [1,2]. The essential features due to this integration environment were experimentally proved. The results obtained are presented also.
The paper is organized as follows. The MOSCITO system is described in Section 2. The functionality of the environment via tools description is given in section 3. Experimental results obtained by cooperative use of the environment are shown in section 4.
2. MOSCITO
2.1. General Concept of MOSCITO
Starting from the idea to connect tools via the Internet to form an appropriate workflow for solving dedicated design problems the MOSCITO system at IIS/EAS was developed and implemented.
The MOSCITO system works as an Internet-based multi-agent system which can be controlled and observed by the user’s front end program – the MOSCITO desktop. Over the last years several approaches for coupling tools and providing services via Internet were developed [19]-[24]. Powerful middleware such as CORBA, Java RMI, COM/DCOM or HLA is available today for implementing such distributed infrastructures. MOSCITO is mainly based on the general ideas of these approaches but it uses a very small, pure Java-based implementation. Some reasons for this design decision are:
---
1 This work was partially supported by EC Copernicus project VILAB, the Swedish National Board for Industrial and Technical Development (NUTEK) and Swedish Academy of Engineering Sciences, and the Estonian Science Foundation (Grants No 3658 and 4003), and by the Slovak VEGA project grant 26091/20.
CORBA is very complex. It supports components which can be implemented in different programming languages. In MOSCITO this feature is not necessary because each tool is encapsulated in a Java wrapper class – the MOSCITO Agent.
COM/DCOM is available on Microsoft platforms only. MOSCITO is needed at least on Solaris, Linux and Windows systems.
Main focus of HLA is coupling of simulators. MOSCITO isn’t restricted to this application area.
In some systems only a white-box integration of tools is possible. MOSCITO Agent interface is targeted to support black-box integration. So also commercial tools can be encapsulated with low effort.
Systems like [22] are based on very fine-grained, powerful workflow concepts. MOSCITO uses very simple, dataflow-based workflows which are controlled by the MOSCITO Agents themselves.
The main emphasis in the MOSCITO tool integration was put on the following aspects:
- Encapsulation of design tools and adaptation of the tool-specific control and data input/output to the MOSCITO framework
- Communication between the tools for data exchange to support distributed, Internet-based work.
- Uniform graphical user front-end program for the configuration of the tools, the control of the whole workflow and the visualization of result data.
Moreover, an important goal is to provide the functionality of a tool to a potential user as a service in a local area network (LAN). This approach is similar to the Application Service Provider (ASP) idea or the recent approach of Web Services.
In the present system the following tools have been integrated in MOSCITO:
- Interface to system-level HW/SW co-design environment (C2VHDL code migration tool) [3]
- Behavioral level Automated Test Pattern Generation (ATPG) [4]
- High-level synthesis system CAMAD [5]
- Interface from RTL VHDL to hierarchical ATPG
- Interfaces from low-level EDIF and ISCAS formats to ATPGs and fault simulators
- Hierarchical ATPG DECIDER [6,7]
- Logic level fault simulation and test generation tools Turbo-Tester [8,9]
- Tst2Alb - a data converter between ATPG tools
- DefGen - ATPG for I\textsubscript{DDQ} and voltage testing of digital circuits [10,11].
- ALB - an automatic fault library builder [12].
The listed tools can act as MOSCITO agents and each of them provides a demanded service. The user are empowered to combine all the services to a problem-specific workflow. That means, the needed tools have not to be installed on the users local computer. Due to that fact the user’s effort for installation, configuration and maintenance of software will be drastically reduced. Furthermore, specialized tools can be executed on their native platform with a high performance (e.g. supercomputer with fast CPUs and large memory, Workstation-Cluster). So the entire workflow will speed
up. To facilitate remote computing in this way is important for application with huge amount of computing time: e.g. fault simulation as well as test pattern generation.
The MOSCITO framework was implemented in JAVA and can run on different computing platforms. The only prerequisite is an installed Java Virtual Machine. At the moment MOSCITO is used on SUN workstation (Solaris) and on PCs (Microsoft Windows and LINUX).
2.2. Software architecture
MOSCITO consists of three software layers: kernel layer, interface layer, and user extensions.
The kernel provides functionality for basic object and data management, file handling, XML processing, and communication. Due to the fact that MOSCITO is an open system a special interface layer provides programming interfaces for integration of new tools, new workflows and appropriate viewers such as for diagrams, plain text and images. Each interface is represented by a Java class which contains the basic functionality. The user only needs to extend this class and can implement its own extension. A large number of templates and example implementations helps the user to integrate a new tool or workflow in less than one or two days.
2.3. Tool encapsulation
For the integration of tools with MOSCITO a sophisticated agent interface was introduced. A tool is embedded into a MOSCITO agent for:
- adapting the input data to the embedded tool,
- converting the tool-specific data (simulation results, logfiles, test vectors),
- mapping the control information to the embedded tool and the transfer and conversion of status information (warning and error messages) to be submitted to the user.
For embedding programs into a MOSCITO agent there are three ways:
- Integration of the entire program: the software has to be run capable as a batch job (e.g. ATPG). In this way the integration of commercial tools is possible.
- Embedding of a library via the Java Native Interface (JNI): e.g. C, C++ or FORTRAN routines can be embedded.
- Direct integration of Java-classes and applications, respectively, in particular for software in JAVA.
Encapsulation of the tools as a MOSCITO agent guarantees a uniform interface to the framework. All tool-specific details are aggregated in a special agent description file. This file is necessary to create tool-specific dialogs for the configuration of the tool via the front-end program.
2.4. Communication
The implementation of the tool communication is based on TCP/IP-sockets. The tools can be executed on different computers or on different computing platforms (e.g. UNIX, Windows). All we need for communication is a LAN or Internet access. To minimize the implementation effort for parsers, translators and converters, the format for all data transmitted in MOSCITO was set to a special XML format, the MOSCITO Markup Language (MoscitoML).
2.5. Graphical User Interface (GUI)
To offer a uniform and consistent concept for the user interaction the MOSCITO system has been provided with a graphical front-end with the following functionality:
- The problem description including all data can be read in from a MOSCITO project file.
- Workflows can be chosen from a set of predefined flows for the specific problem.
- A browser supports the choice of agents (tools) needed for the solution of the problem from the set of available services.
- With buttons for start, pause, resume and stop the workflow can be controlled by the user.
- A console window collects all messages from the running tools and allows the observation of the proper operation or trouble shooting, respectively.
- The visualization module MOSCITO Scope supports the display of all result data (test vectors, statistic information).
The graphical front-end aims at using design tools via the Internet in a simple and efficient manner. Actually, the front-end is available as a JAVA application and has to be installed together with the MOSCITO software.
2.6. Internet-based usage
At first it is necessary to start one MOSCITO server on each host belonging to a domain of services. After that an administrator has to register one or more MOSCITO agents so that they are available as remote services via LAN or Internet. Now a user can start the MOSCITO front-end program (GUI) and can browse through registered agents, can select, configure, and initiate the appropriated workflow and the needed agents. MOSCITO automatically calls remote tools and establishes direct connections between the tools for data transfer. Furthermore, the GUI allows the user to control and observe the data processing provided by a certain workflow. Result data are transmitted to the front-end and displayed by appropriate viewers. Finally MOSCITO closes the connections between all remote tools and organizes correct termination of them.
2.7. Tool environment
To validate the MOSCITO system and to collect experiences while using it for real-life applications an experimental tool environment for design and test pattern generation (Fig. 1) was developed and mapped to a MOSCITO workflow. In the following the functionality of the tools will be explained in detail.
Design information can be generated in different ways, by VHDL files to be processed by commercial or experimental high-level or logic synthesis systems, or provided manually by schematic editors. The gate-level design is presented in the EDIF format. In university research practice, ISCAS benchmark families with a dedicated ISCAS format are widely used. For linking test generation and fault simulation tools with all the needed formats, different translators and interfaces were developed (Blocks 5, 6, 10, 11 in Fig. 1). These interfaces make possible to design a circuit in one geographical site, generate test patterns in another site, and to analyze the quality of patterns in a third site. In such a way, joint experiments were carried out in the field of defect-oriented test where high-level synthesis was performed in Germany and Sweden, defect level analysis for complex gates was performed in Poland, and defect oriented fault simulation and test generation were carried out in Slovakia and Estonia.
3. Tool descriptions
3.1. Interface to system-level HW/SW co-design
The initial entry to the testability driven design flow is represented by a system-level HW/SW co-design backend tool (Block 1 in Fig. 1).
3.2. Behavioral ATPG
There has been a lot of research devoted to solve the test generation problem for gate-level circuits. By working at this level of abstraction, an ATPG can generate high quality tests but is computationally expensive in the case of large circuits. We have developed a technique to generate tests at the behavioral level, and therefore the ATPG algorithm works directly on the behavioral specification of the digital circuit to be designed (Block 2 in Fig. 1).
Beside the reduction of complexity, such a behavioral ATPG technique gives the designer an opportunity to perform design for testability already in the early design stages, since testability evaluation can
be based on the ATPG results obtained directly from the behavioral specification.
In the proposed approach, decision diagrams (DDs) [5,6] are used for design modeling for the behavioral specification, which is originally given in VHDL. One important advantage of modeling the behavior with DDs is that DDs can be used to capture a digital design at different abstraction levels, and therefore a hierarchical test generation approach can also be developed. For every internal variable and primary output of the design a data-flow DD will be generated. Terminal nodes of the DD represent arithmetic expressions. Further, an additional DD, which describes the control-flow, will be generated. The control-flow DD describes the succession of statements and branch activation conditions.
There are two types of tests which we consider in the current approach. One set targets nonterminal nodes of the control-flow DD (conditions for branch activation) and the second set aims at testing operators, depicted in terminal nodes of the data-flow DD.
The test generation task is formulated as the following problem: tests will be generated sequentially for each nonterminal node of the control-flow DD. Symbolic path activation is performed and functional constraints are extracted. Solving the constraints gives us the path activation conditions to reach a particular segment of the specification. In this way, the test generation problem is formulated as a constraint-satisfaction problem. This problem is solved by a constraint solver based on SICStus Prolog [4].
In order to test the operations, presented in the terminal nodes of the data-flow DD, different approaches can be used. In our work, we use mutation testing [13] for test generation for the operations at the terminal nodes. For the nonterminal nodes of the control-flow DD, conformity tests will be applied. The conformity tests target errors in branch activation.
3.3. High-level synthesis system CAMAD
The CAMAD high-level synthesis system (Block 3 in Fig.1) is built around an internal design representation, called ETPN (Extended Timed Petri Net), which has been developed to capture the intermediate results during the high-level synthesis process. The use of Petri nets provides a natural description of concurrency and synchronization of the operations and processes of a hardware system. It gives thus a natural platform to represent and manipulate concurrent processes of VHDL specifications.
ETPN is used as a unified design representation which allows the synthesis algorithm to employ an iterative improvement approach to carry out the synthesis task. The basic idea is that once the VHDL specification is translated into the initial design representation, it can be viewed as a primitive implementation. Correctness-preserving transformations can then be used to successively transform the initial design into an efficient implementation. CAMAD integrates the operation scheduling, data path allocation, control allocation and, to some degree, module binding sub-tasks of high-level synthesis. This is achieved by developing a set of basic transformations of the design representation which deals simultaneously with partial scheduling and local data path/control allocation. An optimization algorithm is then used to analyze the (global) design and select transformations during each step of the iterative improvement process.
Fig. 4 illustrates the basic structure of the CAMAD system. The first step of CAMAD is to map the VHDL specification into ETPN and to perform automatic parallelism extraction. After the transformation steps a RTL hardware implementation is generated which consists of a data path netlist and a controller specified in the form of a finite state machine. The final RTL implementation is converted into structural VHDL which, as well as the input system specification, can be simulated for verification.
Fig. 4. Overview of CAMAD
3.4. Logic-level ATPG tools
The Turbo Tester ATPG software (Block 8 in Fig.1) consists of a set of tools (Fig.5) for solving different test related tasks by different methods and algorithms:
- test pattern generation by deterministic, random and genetic algorithms
- test optimization (test compaction)
- fault simulation and fault grading for combinational and sequential circuits
- defect-oriented fault simulation and test generation
- multi-valued simulation for detecting hazards and analyzing dynamic behaviour of circuits
- testability analysis and fault diagnosis.
Fig.5. A set of low-level ATPG tools Turbo-Tester
All the Turbo Tester tools operate on the model of Structurally Synthesized Binary Decision Diagrams (SSBDD) [8,9]. The tools run on the structural logic level. Two possibilities are available - gate-level and macro-level. In the second case, the gate network is transformed into macro network where each macro represents a tree-like sub-network. Using the macro-level helps to reduce the complexity of the model and to improve the performance of tools. The fault model used is the traditional stuck-at model. However, the fault simulator and test generator can be run also in the defect-oriented mode, where defects in the library components can be taken into account. In this case, additional input information about defects in the form of defect tables for the library components is needed.
3.5. Hierarchical ATPG DECIDER
In addition to the gate-level tools, a hierarchical test generation system DECIDER (Block 7 in Fig.1) has been developed and linked to MOSCITO. DECIDER includes a Register-Transfer Level (RTL) VHDL interface for importing high-level design information, and also an EDIF interface for importing gate-level descriptions of logic.
The ATPG uses a top-down approach, with a novel method of combining random and deterministic techniques. Tests are generated for each functional unit (FU) of the system separately. First, a high-level symbolic test frame (test plan) is created for testing the given FU by deterministic search. As the result, a symbolic path for propagating faults through the network of components is activated and corresponding constraints are extracted. The frame will adopt the role of a filter between the random TPG and the FU under test. If the filter does not allow to find a random test with 100% fault coverage for the component under test, another test frame will be chosen or generated in addition to the previously created ones. In such a way, the following main parts in the ATPG are used alternatively: deterministic high-level test frame generator, random low-level test generator, high-level simulator for transporting random patterns to the component under test and low-level fault simulator for estimating the quality of random patterns.
These test patterns are the input stimuli for the RTL design. Since the test generation implements also high-level fault models, we do not know the precise gate-level stuck-at fault coverage of these tests. Therefore, the test patterns have to be converted in order to correspond to the stimuli for the gate-level netlist of the entire design. This is required for gate-level fault simulation in order to measure the quality of tests.
3.6. Defect-oriented ATPG
The DefGen ATPG system (Block 12 in Fig.1) is a hierarchical ATPG for combinational circuits for IDDQ and/or voltage testing. The random, deterministic TPG algorithms and a fault simulator are involved in the ATPG. The TPG process uses the functional fault model and runs over the functional test set specified for each functional cell of a CUT structure. The deterministic TPG techniques are based on justification and propagation of the predefined test patterns for each cell in a circuit. The functional test set for each cell is named a list of faults conditions and it is a part of the fault conditions library for DefGen. These lists can be created e.g. from a defect analysis of circuits cell at the low level or can be specified by the designer with regards to the used fault model for the investigated cells. The input format for circuit description is the language from ISCAS’85 benchmark circuits. The EDIF-ISCAS translator from Turbo tester (Block 8 in Fig.1) can be used as the interface to DefGen.
An Automatic Fault Library Builder (ALB) has been developed and implemented for finding an optimal functional patterns for cells in the CUT structure [14]. The test patterns are generated from different defect/fault tables for selected cell, and they form the fault conditions library of the DefGen ATPG system. Some defect tables have been created for several combination standard gates (e.g. from the 0.8 µm CMOS library) and lists of optimal patterns have been generated by ALB for the ATPG experiments.
Test Pattern Generation (TPG) technique at higher levels of abstraction rests upon a functional fault model and physical defect - functional fault relationships in the form of a defect coverage table at the lower level. Each table (one for a given cell) includes the following information:
- list of all possible faults (shorts, bridges between nonequipotential conducting paths, resulting in a short circuit caused by physical defects);
- erroneous functions performed by the faulty gate;
- list of input patterns detecting physical defects.
- (optional) probabilities of occurrence of the physical defects.
The lists of erroneous functions and test patterns were generated by electrical simulations at the transistor level by WUT [15]. Probabilities of defects occurrence were calculated by layout probabilistic analysis at the physical level taking into account defect density and size distribution.
4. Experiments
The described environment has been tested in the frame of European project VILAB by the partners IIS/EAS and TUD (Germany), IIN (Slovakia), LIU (Sweden), TTU (Estonia) with input from WUT (Poland) for several designs according to the following general algorithm (the reference to exploited tools in Fig. 1 is given in parentheses):
1. The high-level designs were produced in the HW/SW codesign environment (1) or by high-level synthesis system (3).
2. The high-level testability was evaluated at the early design stage by behavioral level ATPG (2).
3. The user evaluates the quality of his own functional test for the new design (4) by using Turbo Tester fault simulator (6,8). Alternatively, he can also make use of other university fault simulators (13).
4. If the results are acceptable (test has obtained the demanded quality) go to END, else go to Step 5.
5. The user can work with the implemented ATPGs 7, 8 or 12. If the stuck-at-fault model is accepted, the user can work with the ATPGs 7, 8 or 12. If the circuit is a simple sequential or combinational one (e.g. only FSM without data-path) go to Step 7. If the circuit consists of the control- and data-paths the user can work with the hierarchical ATPG (5,6,7). In this case, both the RTL description from high-level synthesis system (3) and the gate-level description from logic synthesis system (4) are needed. If the results are acceptable (test has the needed quality), go to END, else go to Step 7.
6. If the stuck-at-fault model is accepted, the user can work with the ATPGs 7, 8 or 12. If the circuit is a simple sequential or combinational one (e.g. only FSM without data-path) go to Step 7. If the circuit consists of the control- and data-paths the user can work with the hierarchical ATPG (5,6,7). In this case, both the RTL description from high-level synthesis system (3) and the gate-level description from logic synthesis system (4) are needed. If the results are acceptable (test has the needed quality), go to END, else go to Step 7.
7. The user can work with the gate-level ATPG (6,8). If the results are acceptable (test has the needed quality), then go to END, else go to Step 8.
8. The testability should be now improved by redesign. Some flip-flops can be included, for example, into the scan-path. For testing the new full or partial scan-path design the user can work again with the gate-level ATPG (4,6,8).
9. Depending on the results, the step 8 can be repeated till the demanded test quality has been obtained.
10. END.
This environment has been utilized for research purposes. For example, the performance of the hierarchical ATPG (7) was compared against the existing university tools GATEST [16] and HITEC [17]. For that the translator 10 was necessary. The results of comparison of different ATPGs are given in Table 1.
<table>
<thead>
<tr>
<th>Circuit</th>
<th>DECIDER</th>
<th>GATEST</th>
<th>HITEC</th>
</tr>
</thead>
<tbody>
<tr>
<td>GCD</td>
<td>91.0</td>
<td>3.4</td>
<td>92.2</td>
</tr>
<tr>
<td>Mult 8x8</td>
<td>79.4</td>
<td>13.6</td>
<td>77.3</td>
</tr>
<tr>
<td>Dileg</td>
<td>95.8</td>
<td>15.8</td>
<td>96.0</td>
</tr>
</tbody>
</table>
Table 1. Experimental results for hierarchical ATPG
Some experiments have been performed with the ATPG tools running at the Tallinn Technical University and the Institute of Informatics of the Slovak Academy of Sciences using defect tables created at the Warsaw Technical University separately.
<table>
<thead>
<tr>
<th>Circuit</th>
<th>Defect coverage for OR-type shorts, %</th>
</tr>
</thead>
<tbody>
<tr>
<td>Counted defects</td>
<td>Probabilistic defects</td>
</tr>
<tr>
<td>C17</td>
<td>92.59</td>
</tr>
<tr>
<td>C432</td>
<td>99.38</td>
</tr>
<tr>
<td>C499</td>
<td>92.80</td>
</tr>
<tr>
<td>C880</td>
<td>95.95</td>
</tr>
<tr>
<td>C1355</td>
<td>93.42</td>
</tr>
<tr>
<td>C1908</td>
<td>92.91</td>
</tr>
<tr>
<td>C3540</td>
<td>94.21</td>
</tr>
<tr>
<td>C5315</td>
<td>94.71</td>
</tr>
<tr>
<td>C6288</td>
<td>92.59</td>
</tr>
</tbody>
</table>
Table 2. Data of defect-oriented fault simulation
The purpose of experiments was to compare the quality of 100% stuck-at test patterns in relation to physical CMOS defects. For that we used the data produced at WUT by probabilistic analysis of physical defects for a restricted library of complex gates. Then we resynthesized the ISCAS'85 circuits using only components from the analysed library. The problem to investigate was to determine how good are the 100% stuck-at tests in detecting physical defects in complex gates. The results in Table 2 show the low quality of 100% stuck-at tests in detecting real physical defects.
The new result of these experiments was to show that the quality of tests in terms of defect coverage is higher when the defect probabilities are not taken into account. From that we can conclude that the traditional methods of test coverage measuring based on simply
counting of not detected defects, where all the faults are assumed to have the same probability, are tending to give overestimated quality measures.
5. Summary
In the paper an Internet-based environment based on MOSCITO system [18] is presented. The environment is focused on providing SW/HW codesign, high-level and logic level design flows with test pattern generation and fault simulation at behavioral, register-transfer, gate and physical defect level operational activities. The main effort was put on linking together test generators and fault simulators with varying functionalities and diverse fault models available at geographically different sites. The system provides interfaces and links to commercial design environments and also to other university tools. The functionality of the integrated design and test system was verified by several benchmark circuits and by different design and test flows. Furthermore, authors believe that the MOSCITO architecture is powerful enough to solve similar problems in other application areas of automated system design. Future work will continue in this direction.
References
|
{"Source-Url": "http://publications.eas.iis.fraunhofer.de/papers/2002/018/paper.pdf", "len_cl100k_base": 5978, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 25237, "total-output-tokens": 7858, "length": "2e12", "weborganizer": {"__label__adult": 0.0004949569702148438, "__label__art_design": 0.0009388923645019532, "__label__crime_law": 0.0004515647888183594, "__label__education_jobs": 0.0007190704345703125, "__label__entertainment": 0.00014138221740722656, "__label__fashion_beauty": 0.00030159950256347656, "__label__finance_business": 0.000362396240234375, "__label__food_dining": 0.0004096031188964844, "__label__games": 0.0009064674377441406, "__label__hardware": 0.0214385986328125, "__label__health": 0.0006794929504394531, "__label__history": 0.0005221366882324219, "__label__home_hobbies": 0.00020182132720947263, "__label__industrial": 0.002651214599609375, "__label__literature": 0.00022149085998535156, "__label__politics": 0.0003647804260253906, "__label__religion": 0.0008912086486816406, "__label__science_tech": 0.3896484375, "__label__social_life": 8.32676887512207e-05, "__label__software": 0.0163116455078125, "__label__software_dev": 0.560546875, "__label__sports_fitness": 0.0004880428314208984, "__label__transportation": 0.0011301040649414062, "__label__travel": 0.0002777576446533203}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 32250, 0.04691]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 32250, 0.48651]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 32250, 0.89455]], "google_gemma-3-12b-it_contains_pii": [[0, 3015, false], [3015, 5826, null], [5826, 10622, null], [10622, 12866, null], [12866, 17186, null], [17186, 22048, null], [22048, 27283, null], [27283, 32250, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3015, true], [3015, 5826, null], [5826, 10622, null], [10622, 12866, null], [12866, 17186, null], [17186, 22048, null], [22048, 27283, null], [27283, 32250, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 32250, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 32250, null]], "pdf_page_numbers": [[0, 3015, 1], [3015, 5826, 2], [5826, 10622, 3], [10622, 12866, 4], [12866, 17186, 5], [17186, 22048, 6], [22048, 27283, 7], [27283, 32250, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 32250, 0.1018]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
44d4a001438ef6473d4e664330b001949546d442
|
Automated Analysis of Industrial Workflow-based Models
Mario Cortes-Cornax
Univ. Grenoble Alpes, CNRS, Grenoble INP, LIG, F-38000 Grenoble France
Adrian Mos
Naver Labs Europe, Meylan, France
Ajay Krishna
Univ. Grenoble Alpes, Inria, CNRS, Grenoble INP, LIG, F-38000 Grenoble France
Gwen Salaün
Univ. Grenoble Alpes, CNRS, Grenoble INP, LIG, F-38000 Grenoble France
ABSTRACT
Modelling and governance of business processes are important concerns in companies all over the world. By better understanding business processes, different optimizations are made possible, concretely resulting into potential efficiency gains, cost reductions and improvements in agility. The use of formal specification languages for the modelling of business processes paves the way for different kinds of automated analysis. Such analysis can be used to infer properties from the modelled processes that can be used to improve their design. In this paper, we particularly explore two important classes of verification, namely verification of behavioural properties using model checking techniques and data-based analysis using SAT solving. Those verifications are fully automated by using different tools such as the CADP verification toolbox and the Z3 solver. We illustrate our approach on a real-world case study.
CCS CONCEPTS
• Applied computing → Business process modeling; • Software and its engineering → Formal software verification; • Theory of computation → Logic and verification;
KEYWORDS
Business process management, SAT, Validation, Data
1 INTRODUCTION
A business process is a collection of structured activities fulfilling a precise goal. Business process management is of prime importance in companies, because they have realized that, by modelling and then mastering their own processes, several improvements could be achieved resulting in time and money savings. Although modelling is necessary to make those optimizations possible, it is unfortunately not enough. Beyond process modelling, there is a need for formal checking that automatically allows one to analyze a process under development or already deployed, and detect whether this process satisfies some precise functional or non-functional requirements. Such verification techniques are helpful to identify possible bottlenecks, missing services or incorrect behaviours. This may then lead to the refinement of the process being analyzed.
In this work, we chose to consider Mangrove [18] for modelling purposes. Mangrove is a meta-model that allows domain specific process languages to be mapped to standard workflow languages, making it easier for non-technical users to design business processes in an intuitive way. Mangrove was used for modelling realistic processes at Xerox as we will show in this paper with a case study.
Given a Mangrove model, we study two kinds of analysis here: the behavioral and the data-based analysis. The first one aims at verifying whether the model satisfies a certain temporal property (e.g., a certain task is never executed after another one). Model checking techniques are used for this analysis. Another kind of behavioural analysis verifies two versions of a process (before and after modification for instance). This is useful to check whether an evolution of a given process respects certain behaviour (a new functionality is present and occurs when expected for example). The aforementioned analysis is achieved using equivalence checking techniques. The data-based analysis looks at the control flow graph that can be derived from the Mangrove model, and check whether all parts of the process are reachable or not. This check is achieved using static analysis of the workflow. Both kinds of verification are automated using existing tools (the CADP toolbox [10] and the Z3 solver [2], respectively). Model transformations from Mangrove are developed in order to fill the gap between the industrial process models and the input languages of those tools.
The rest of this paper is organized as follows. Section 2 introduces the Mangrove meta-model. Sections 3 and 4 present the behavioural verification techniques and the data-based analysis, respectively. In Section 5 our approach on an industrial case study is illustrated. Section 6 reviews related work and finally, Section 7 concludes the paper.
2 PROCESS DEFINITION MODELS
Mangrove [18] is a generic process description meta-model that relies on the Eclipse Modelling Framework. It unifies business processes and Service Oriented Architecture (SOA). It provides behavioural support to a domain definition in order to define the necessary steps in a process focusing on preserving the connection between the common elements of business processes and architectural constructs such as services. It facilitates domain specific design by establishing concept mappings between domain related concepts and process activities.
In the approach supported by Mangrove and used in the scenario illustrated in the case study, processes are defined using a combination of inter-related meta-models. The first one, Domain Meta-model (DomainMM), is mainly used to specify domain-specific behaviour (i.e., activity types) and data. There is one instance of this per application domain. For example, our case study deals with
---
1 This work was carried out when Mario Cortes-Cornax and Adrian Mos were working at the former Xerox Research Center Europe (XRCE) in Meylan (France), which became Naver Labs.
the Document processing domain, which includes behavioural activities such as scanning, optical character recognition (OCR) and several kinds of quality controls.
The second meta-model, Common Meta-model (CommonMM), is used to define the actual processes and their flow of control, including connections to services. It can be seen as a simplified version of BPMN that connects behaviour (process flows, conditions) and domain definitions (types of behaviour elements, forms and data). A specific implementation of the CommonMM derived from the Mangrove open-source project.
In this paper, we focus on process modelling (supported by the Mangrove CommonMM). The verification approach presented in the next sections deals with the behavioural description of these processes. On the left hand side of Figure 1 some excerpts of processes are given. Notice that the notation relies on steps and conditional sequence flows whereas gateways (i.e., control flow) are implicitly defined. Mangrove models are transformed to the Process Intermediate Format (PIF), which is the input of the analysis tools. PIF and the transformation patterns are described in the next section.
3 BEHAVIOURAL VERIFICATION
In this section, a model transformation from Mangrove to the Process Intermediate Format (PIF) is first presented, and this enables behavioural analysis using the CADP toolbox.
3.1 From Mangrove to PIF
Process Intermediate Format (PIF) [21] is a pivot model for workflow-based notations. A transformation from Mangrove to PIF is proposed in this work, because PIF is supported by the automated analysis techniques that are presented later in this section. The PIF scheme is described in Figure 2, which illustrates its main concepts in a meta-model.
As mentioned before and illustrated in Figure 1, the key elements of Mangrove meta-model that we focus on are Steps and Conditional Sequence Flows. Mangrove simplifies convergence and divergence of paths by eliminating gateways. Instead of gateways, Mangrove relies on conditions to determine the flow. Condition expressions are evaluated for each flow and tokens are sent across if the condition expression evaluates to true. Based on the evaluation of condition expressions, Mangrove flows can be transformed into a PIF gateways. PIF captures the workflow as a set of Nodes and Sequence Flows. Nodes are further classified by their type attribute. Gateways and Tasks are the most commonly used node types. Gateways are further classified as Join and Split gateways depending on whether they converge or diverge.
Mangrove to PIF transformation patterns are shown in Figure 1. Each Step in Mangrove is translated as a Task in PIF. Mangrove models identify outgoing flows from a Step as Source transitions and incoming flows as Target transitions. The Mangrove to PIF transformations of events and gateways are the following:
**Start Event.** If there is no target transition for a step, then that Step behaviour is equivalent to a Start event followed by a Task.
**End Event.** If there is no source transition for a step, then the Step instances are executed in the Mangrove process.
**XOR Split Gateway.** When a step has multiple source transitions and each transition has a condition associated with it, all the condition expressions are evaluated using SAT techniques. If these expressions evaluate to true as an XOR clause (CondX ⊕ CondY), which means that they are mutually exclusive, then the transformation is identified as an XOR split.
Figure 2: PIF meta-model
**OR Split Gateway.** This is similar to the XOR pattern as all the condition expressions of source transitions are evaluated. However, in that case, their evaluation shows that the conditions are not exclusive. They evaluate to true as an OR clause (CondX \lor CondY) transforming to an OR split.
**AND Split Gateway.** This pattern is pretty straightforward. If there are multiple source transitions for a step and none of them have a condition expression associated with it, then the behaviour is similar to an AND split gateway.
Merge patterns can exist if there are multiple target transitions for a Step. These transitions are transformed to Join gateways in PIF. The type of Join gateway is determined by the preceding Split gateway in the workflow. If the split preceding the join is an Exclusive split (XOR Split), then the ensuing merge pattern is identified as **XOR Join Gateway** by a pile mechanism. Similarly, merge corresponding to an OR Split and an AND Split is treated as **OR Join Gateway** and **AND Join Gateway**, respectively. AND Join Gateway synchronises on all incoming flows before proceeding with outgoing flow.
When a model-to-model transformation is performed, the question of semantics preservation arises. As Mangrove does not propose a formal semantics, this correspondence is difficult to establish. However, since PIF semantics relies on the LNT process algebra [14, 21], our approach allows us to give an implicit semantics to Mangrove by translation to PIF.
### 3.2 Behavioural Analysis
PIF is supported by the VBPMN platform [14, 21], which connects PIF to the CADP toolbox. This is achieved by translating a PIF process into LOTOS New Technology (LNT) code that is one of the input specification languages of the CADP toolbox [10]. The CADP toolbox can be reused for generating an LTS (Labelled Transition System). The latter describes all the executions of the workflow starting from the initial state, using a set of states, transitions and labels. Labels correspond to the steps in the Mangrove model. CADP is also used for verifying the following two classes of properties on LTS models obtained from Mangrove proces models:
- **Functional verification** aims at checking properties of interest such as the existence of deadlock/livelock states or the satisfaction of safety and liveness properties. In the latter case, since the properties depend on the input process, they have to be provided by the analyst, who can reuse well-known patterns for properties such as those presented in [8].
- **Process comparison** takes as input two process models, a comparison relation and possibly additional parameters for the relation. Several evolution relations can be used. Conservative evolution ensures that the observational behaviour is strictly preserved. Inclusive evolution ensures that a subset of a process behaviour is preserved in a new version of it. Selective evolution allows one to focus on a subset of the process tasks. It is also possible to have VBPMN work up to a renaming relation over tasks. If the two input process models do not fulfill the constraints of the chosen evolution relation, a counterexample indicating the source of the violation is returned. This helps the process analyst in supporting the refinement into a correct evolved version of a process model.
### 4 DATA-BASED ANALYSIS USING SAT
In the former section, the use of model and equivalence checking techniques are proposed to verify workflow behaviour. The LTS generated using these techniques is an over approximation consisting of all possible executions and it is not the exact representation
of the workflow behaviour. This is caused by the fact that the verifi-
cation does not take into account data information encoded in the
workflows, which would make certain possible execution paths in-
valid. Workflow models like Mangrove and BPMN have conditions
encoded in sequence flows. If these conditions are evaluated, we can
easily identify if the corresponding execution path is feasible. As
these models can be viewed as a control flow graph, a static analysis
of the model can be performed by encoding condition expressions
as satisfiability constraints. Satisfiability (SAT) and satisfiability
modulo theories (SMT) \[3, 4\] can be applied to check if the execu-
tion paths are satisfiable. Satisfiability theory is based on solving
propositional formulas. It works on the premise that a formula is
satisfiable, if there exists a set of values that evaluate to true. We
opted to build satisfiability constraints, rather than evaluating con-
dition expressions step by step programmatically, as it allows us to
leverage the power of high-performance SMT solvers like Z3 \[2\] to
solve large sets of condition expressions. Note that we have already
used these techniques in Section 2 in order to check whether a split
pattern transforms to an XOR or OR split gateway.
Figure 3 shows a Mangrove workflow with two instance variables
age and expense (exp for short) and their condition expressions. Us-
ing model checking techniques, we can generate all possible execu-
tions without considering those conditions. Suppose the dataset un-
der consideration has expense values in the range \(124 < \text{exp} < 679\,
, we can refine the earlier over-approximate model, by discarding
the bottom half of the workflow. This optimisation might not seem
significant for small workflows, but for large workflows, especially
with many OR gateways, when behaviour becomes too complex,
refining it helps to understand the model better. Another benefit
from refining is that we can eliminate “false negatives” in verifi-
cation (i.e., we might detect anomalies in parts of the workflow,
which would not be executed for the given dataset). Respecting the
execution semantics, data in Figure 3 can be expressed as a
propositional formula as follows:
\[
((\text{exp} \geq 100) \land (\text{age} \geq 30 \lor \text{age} = 35 \lor \text{age} < 40))
\lor \neg(\text{exp} \geq 100)
\]
Figure 3: Workflow with data
In our approach, the input dataset is added as an initial con-
straint. Starting from the first Step in the workflow, constraints
are incrementally added and removed as condition expressions are
encountered. As the formula is updated, its satisfiability is checked -
if the result is satisfiable, we proceed further, otherwise, the state
is marked as infeasible and we backtrack to see if any other diverging
path is satisfiable. Once we have identified infeasible flows, we
generate an updated PIF model, which can serve as new input for
behavioural verification (Section 3).
The constraint building process can be illustrated using an input
dataset. For the input dataset \(\text{exp} = [125, 678]\) and \(\text{age} = [45, 90]\), af-
after the execution of Start Event, the following propositional formula
would be generated:
\[
((\text{exp} \geq 125 \land \text{exp} \leq 678) \land (\text{age} \geq 45 \land \text{age} \leq 90)) \quad \text{(i)}
\]
Further, as we traverse the Exclusive gateway conditions, we obtain
another constraint, in addition to formula (i). Thus the two formulas
are as follows:
\[
((\text{exp} \geq 125 \land \text{exp} \leq 678) \land (\text{age} \geq 45 \land \text{age} \leq 90)) \land (\text{exp} \geq 100)) \quad \text{(ii)}
\]
\[
((\text{exp} \geq 125 \land \text{exp} \leq 678) \land (\text{age} \geq 45 \land \text{age} \leq 90) \land (\text{exp} < 100)) \quad \text{(iii)}
\]
Clearly, formula (iii) is not satisfiable, thus we can identify Task
2 as infeasible for the input data set. So, we can build on formula
(ii), which is satisfiable, and following propositional formulas can be
derived:
\[
((\text{exp} \geq 125 \land \text{exp} \leq 678) \land (\text{age} \geq 45 \land \text{age} \leq 90) \land (\text{exp} \geq 100)) \land (\text{age} > 30) \quad \text{(iv)}
\]
\[
((\text{exp} \geq 125 \land \text{exp} \leq 678) \land (\text{age} \geq 45 \land \text{age} \leq 90) \land (\text{exp} \geq 100) \land (\text{age} = 35)) \quad \text{(v)}
\]
\[
((\text{exp} \geq 125 \land \text{exp} \leq 678) \land (\text{age} \geq 45 \land \text{age} \leq 90) \land (\text{exp} \geq 100) \land (\text{age} < 40)) \quad \text{(vi)}
\]
Similarly, if we solve (iv), (v) and (vi) for satisfiability, only (iv),
is satisfiable, so we can mark Task 4 and Task 5 as infeasible. The
proposed incremental approach can be extended to large workflows.
Here, we have illustrated our approach using an input dataset.
We can also check the feasibility of execution of Steps in Mangrove
by taking only the conditions as constraints (infinite domains). By
adding the input dataset as a constraint, we are just restricting the
domain of the instance variables to a finite value.
5 CASE STUDY
In this section, a real-world Mangrove model is described and we
illustrate how automated analysis can be performed using our
approach. The context of this (simplified) use-case is the document
treatment process aiming at scanning and extracting information
from physical documents submitted by clients using optical char-
acter recognition (OCR) and sent it back to clients after treatment.
The OCR process is illustrated in Figure 4. When the physical docu-
ment is scanned a numerical image of the document is created. The
OCR transforms the image into actual text, numbers or images. It
is also able to identify the main sections of the document. Finally, the keyword extraction identifies important information based on predefined rules (e.g., the most repeated word, all the numbers followed by a euro symbol or names).

Now let us focus on the whole process introduced in Figure 5. When a document is received, it is scanned and then goes through an OCR analysis. After the OCR, a control is performed in order to verify the treatment quality relying on a predefined quality indicator \(q_{Ind}\). This quality indicator is defined as a process variable, which is set by the OCR’s output. It aggregates information about the brightness of the document, the number of extracted sections or the confidence in the extracted amounts, and scales it from 0 to 100. Three possible thresholds led to exclusive paths: 1) if \(q_{Ind} < 75\) then the OCR will be rejected. The rejection is logged and the process loops (maximum twice) to scan again; 2) if \(q_{Ind} \geq 96\) then the quality is considered enough to execute a parameterized keyword extraction and finally transfer the scanned file as well as the extracted information back to the client; and 3) if \(q_{Ind} > 75\) and \(q_{Ind} < 96\) then a video coding process is performed consisting in a manual verification and validation of the extracted information. A worker verifies on a screen whether the extracted information actually corresponds to the text or numbers of the scanned document. After these manual steps, the keyword extraction and transfer can be performed. Note that the expected quality of service is negotiated with the client through service level agreements.

Figure 6 proposes an evolution of the process in Figure 5. The first process has to be improved introducing a rejection treatment just after the reception. Indeed, some documents can be automatically rejected before performing the whole process (e.g., non registered type of documents). The treatment and consistent notification avoids useless effort, which could just be detected once the process has completed. Note that there are three levels of rejection after an automatic process: 1) if \(rej = 0\) then the document is not rejected; 2) if \(rej = 1\) then there is an immediate rejection; and 3) if \(rej = 2\) then a manual verification is needed, which led to the normal path (Scan Document) or a rejection. We will show on these two versions of the Mangrove process how evolution and property verification as well as data-based analysis are useful.
**Behavioural verification.** Process 1 (Figure 5) and 2 (Figure 6) can be compiled using CDP into LTS models as shown in Figures 7 and 8. Given a process, the corresponding LTS exhibits all possible executions of that process. The generated LTS takes into account the conditions involved. Since the multiple source transitions have mutually exclusive conditions, the generated LTS has diverging paths. These three exclusive paths are present in the LTS (Figure 8) after \(TreatRejection\). When checking for evolution and comparing these models, we first note that the extended workflow does not strictly preserve the previous execution scenarios. This behavioural analysis returns a counterexample, as shown in Figure 9, which indicates a path highlighting the difference between the two models. This information is useful, particularly when the models are large with plenty of possible execution scenarios. As far as automated analysis is concerned, various modes of evolution checking can be used depending on the user needs [21].
The aforementioned LTs are also used to automatically verify the reachability from one step to another step in the process. As the process shows several possible paths and therefore executions, a designer may need an extra help in order to validate a proposed evolution. For instance, an important verification aims at checking that all the rejections produced in the second workflow are notified to the client. It can be checked using property based functional verification. Figure 10 shows a path returned by the model checker where this property is violated. By taking a closer look at the path, the analyst can see that after rejection, the path ends up by a successful transfer of files. Further, the analyst can either decide that a rejection notification was not necessary in that case, or can update the workflow to avoid an execution where a rejection is decided whereas the behaviour terminates successfully.
**Data-based analysis.** The verification has focused so far on the workflow behaviour. In addition, SAT based analysis can be performed to look more carefully at data (conditions) and further optimize the execution scenarios. In the process given in Figure 6, it is possible that a particular input dataset is of registered type \(rej = 0\) and of high quality \(q_{Ind} \geq 96\). For this dataset, SAT based analysis would simplify the workflow behaviour to a much simpler LTS as shown in Figure 11. SAT analysis would result in the following constraints being unsatisfiable:
\[
((rej = 0 \oplus rej = 1 \oplus rej = 2) \land (q_{Ind} \leq 75))
\]
\[
((rej = 0 \oplus rej = 1 \oplus rej = 2) \land (q_{Ind} > 75 \land q_{Ind} < 96))
\]
It is worth noting that without executing the process workflow, through static analysis, we can identify infeasible paths. This information can be valuable for resource scheduling. In the second version of our workflow, given in Figure 6, the top part (video coding) and the bottom part (OCR rejection treatment) are not executed, and the corresponding resources can thus be freed.
6 RELATED WORK
Several works have focused on providing formal semantics and verification techniques for business processes using Petri nets, process algebras, or abstract state machines, see, e.g., [5–7, 11–13, 16, 17, 20, 22, 25, 26]. The main difference in this paper is that we do not only support automated analysis of specific properties but also other kinds of verification (comparison, data-based analysis).
As far as process comparison is concerned, in Chapter 9 of [23], the authors study the migration of processes, and from that point of view define several notions of evolution, migration, and refactoring. We propose off-line analysis techniques and do not propose a solution for applying these changes at runtime. In [24], the authors address the equivalence or alignment of two process models. To do so, they check whether correspondences exist between a set of activities in one model and a set of activities in the other model. They consider Petri net systems as input and process graphs as low-level formalism for analysis purposes. Their approach relies on the identification of regions (set of activities) in each graph that can
matches with respect to an equivalence notion. ADDiff [15] proposes a semantic differencing operator for activity diagrams. As a result, ADDiff performs a semantic comparison and outputs a set of diff witnesses, each of which is an execution trace that is possible in the first activity diagram and is not possible in the second. This solution uses search and fixpoint algorithms whereas we rely on equivalence checking and concurrency theory principles.
As far as data-based analysis is concerned, in [9], the authors propose a translation of BPMN into logic with a special focus on data objects and data-based decision gateways. They provide new mechanisms to avoid structural issues in workflows such as flow divergence by introducing the notion of well-formed BPMN process. Their approach aims at avoiding incorrect syntactic patterns whereas we propose automated analysis at the semantic level.
[19] focuses on the analysis of choreography models. The main property of interest in that context is called conformance and aims at checking whether the distributed implementation and the choreography behave identically. The authors mainly focus on data description. Their approach supports choreographies extended with conditions and relies on SMT solving for conformance checking.
Decision Model and Notation (DMN) is a recent OMG standard for modelling decisions in an interchangeable format. DMN can be used into workflow-based notations for representing conditions. In [1], the authors propose a formal semantics of DMN decision tables, a notion of DMN table correctness, and algorithms that check the detection of overlapping rules and missing rules. These algorithms have been implemented in the DMN toolkit and validated through empirical evaluation. Our modelling language for describing decisions is different than DMN since we have to handle infinite domains, justifying our choice of SMT solving.
7 CONCLUSION
This paper studies the crucial issue of business process modelling and analysis. As modelling language, we have opted for a workflow-oriented domain-specific language (Mangrove), which is simple and expressive enough for dealing with the basic constructs of processes without considering implementation details. Regarding verification, we have focused on that question from a functional point of view. Two kinds of verification techniques are proposed for Mangrove, namely behavioural analysis and data-based verification. They are helpful in analysing the behaviour of a process model in order to identify possible errors such as violated properties or unreachable paths. The detection of such erroneous behaviours may lead to possible improvements in the subsequent versions of the process. In order to automate these checks, we rely on model transformation and the reuse of existing verification frameworks, implementing model/equivalence checking techniques and static analysis. Focusing on an industrial case study, we illustrate how our approach can be used in practice for comparing formally two versions of a process. Our main perspective for future work is to deal with non-functional properties such as execution time, cost analysis or optimal resource allocation.
REFERENCES
|
{"Source-Url": "http://convecs.inria.fr/doc/publications/CortesCornax-Krishna-Mos-Salaun-18.pdf", "len_cl100k_base": 6046, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 25275, "total-output-tokens": 8587, "length": "2e12", "weborganizer": {"__label__adult": 0.0003628730773925781, "__label__art_design": 0.0007281303405761719, "__label__crime_law": 0.0005173683166503906, "__label__education_jobs": 0.0015277862548828125, "__label__entertainment": 0.00011664628982543944, "__label__fashion_beauty": 0.0002281665802001953, "__label__finance_business": 0.0015163421630859375, "__label__food_dining": 0.0005011558532714844, "__label__games": 0.0006203651428222656, "__label__hardware": 0.0010538101196289062, "__label__health": 0.0006384849548339844, "__label__history": 0.0003681182861328125, "__label__home_hobbies": 0.0001571178436279297, "__label__industrial": 0.0021152496337890625, "__label__literature": 0.0003750324249267578, "__label__politics": 0.0003917217254638672, "__label__religion": 0.0004787445068359375, "__label__science_tech": 0.240478515625, "__label__social_life": 0.0001329183578491211, "__label__software": 0.0199737548828125, "__label__software_dev": 0.72607421875, "__label__sports_fitness": 0.0003075599670410156, "__label__transportation": 0.00102996826171875, "__label__travel": 0.00022089481353759768}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33106, 0.02841]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33106, 0.51853]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33106, 0.88313]], "google_gemma-3-12b-it_contains_pii": [[0, 5460, false], [5460, 8962, null], [8962, 12623, null], [12623, 18340, null], [18340, 24044, null], [24044, 25195, null], [25195, 28866, null], [28866, 33106, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5460, true], [5460, 8962, null], [8962, 12623, null], [12623, 18340, null], [18340, 24044, null], [24044, 25195, null], [25195, 28866, null], [28866, 33106, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33106, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33106, null]], "pdf_page_numbers": [[0, 5460, 1], [5460, 8962, 2], [8962, 12623, 3], [12623, 18340, 4], [18340, 24044, 5], [24044, 25195, 6], [25195, 28866, 7], [28866, 33106, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33106, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
a5c4e8acbdeba51ad1f6a48039d55a94ccf2a3b4
|
Abstract- Preliminary research on the development of an intelligent multimedia distributed platform hub (MediaHub) for the fusion and synchronisation of language and vision data is presented. Related research is reviewed and a potential new approach to decision-making within MediaHub based on Bayesian Networks is proposed. A system architecture, including a Dialogue Manager, Semantic Representation Database and Decision-Making Module, is outlined. Bayesian Networks will be employed in the decision-making process within the Decision-Making Module. Initial findings suggest that this will be a promising approach for MediaHub.
Keywords: intelligent multimedia, distributed systems, multimodal synchronisation, multimodal fusion, multimodal semantic representation, decision-making, Bayesian Networks.
I. INTRODUCTION
The area of intelligent multimedia has seen considerable research into creating user interfaces that can accept multimodal input. This has led to the development of intelligent interfaces that can learn to meet the needs of the user, in contrast to traditional systems where the onus was on the user to learn to use the interface. A more natural form of human-machine interaction has resulted from the development of systems that allow multimodal input such as natural language, eye and head tracking and 3D gestures [1] [2]. Considerable work has also been completed in the area of knowledge representation within multimodal systems, with the development of several semantic mark-up languages [3]. Efforts have also been made to integrate natural language and vision processing, and the main approaches in this field are described in [2].
The area of distributed computing has been exploited to create intelligent multimedia systems that are human-centred and directly address the needs of the user. DACS (Distributed Applications Communication System) [4] is a powerful tool for system integration that provides numerous features for the development and maintenance of distributed systems. Communication within DACS is based on simple asynchronous message passing, with additional extensions to deal with dynamic system reconfiguration during run-time. Other more advanced features include both synchronous and asynchronous remote procedure calls and demand streams.
A. Objectives of MediaHub
The principle aim of the research discussed here is to develop a distributed platform hub (MediaHub) for the fusion and synchronisation of multimodal information, specifically language and vision data. The primary objectives of MediaHub are to:
- Interpret/generate semantic representations of multimodal input/output.
- Perform fusion and synchronisation of multimodal data (decision-making).
- Implement and evaluate MediaHub, a multimodal platform hub with a potential new approach to decision-making.
In pursuing these three objectives, several research questions need to be answered. For example:
- Will MediaHub use frames for semantic representation, or will it use XML or one of its derivatives?
- How will MediaHub communicate with various elements of a multimodal platform?
- Will MediaHub constitute a blackboard or non-blackboard model for semantic storage?
- What mechanism will be implemented for decision-making within MediaHub?
MediaHub will be tested as a plug-in within an existing multimodal platform such as CONFUCIUS [5] using multimodal input/output data.
Next, in section 2, we will look at research related to the development of MediaHub. Then, in section 3, we will focus on multimodal semantic representation. Section 4 discusses decision-making within MediaHub. Section 5 presents the proposed system architecture of MediaHub, while section 6 discusses potential tools and future development of MediaHub.
II. RELATED RESEARCH
This section gives a review of related research that is relevant to the design and implementation of MediaHub. Section 2.1 provides a review of the area of distributed processing, whilst section 2.2 looks at existing multimodal distributed platforms.
A. Distributed Processing
Recent advances in the area of distributed systems have seen the development of several software tools for distributed processing. These tools are utilised in the creation of a range of distributed platforms.
The Open Agent Architecture (OAA) [6] is a general-purpose infrastructure for creating systems that contain multiple software agents. OAA allows such agents to be developed in different programming languages and run on different platforms. All agents interact using the InterAgent Communication Language (ICL). ICL is a logic-based declarative language used to express high-level, complex tasks and natural language expressions.
JATLite [7] incorporates a set of Java packages that enable multi-agent systems to be constructed using Java. JATLite provides a Java agent platform that uses the KQML (Knowledge Query and Manipulation Language) Agent Communication Language (ACL) [8] for inter-agent communication. KQML is a message format and message-handling protocol used to support knowledge sharing among agents.
.NET [9] is the Microsoft Web services strategy that allows applications to share data across different operating systems and hardware platforms. The web services provide a universal data format that enables applications and computers to communicate with one another. Based on XML, the web services allow communication across platforms and operating systems, irrespective of what programming language is used to write the applications.
CORBA [10] is a specification released by the Object Management Group (OMG). A major component of CORBA is the Object Request Broker (ORB), which delivers requests to objects and returns results back to the client. The operation of the ORB is completely transparent to the client, i.e. the client doesn’t need to know where the objects are, how they communicate, how they are implemented, stored or executed. CORBA uses the Interface Description Language (IDL), with syntax similar to C++, to describe object interfaces.
B. Multimodal Platforms
Numerous intelligent multimedia distributed platforms currently exist. With respect to these platforms, of particular interest to the design of MediaHub are their methods of semantic representation, storage and decision-making (fusion and synchronisation).
Ymir [11] is a computational model for creating autonomous creatures capable of human-like communication with real users. Ymir represents a distributed, modular approach that bridges between multimodal perception, decision and action in a coherent framework. The modules within Ymir are divided into four process collections. The Reactive Layer operates on relatively simple data. The Process Control Layer controls the global aspects of the dialogue and manages the communicative behaviour of the agent. The Content Layer hosts the processes that interpret the content of the multimodal input and generate suitable responses. The Action Scheduler within Ymir is used to coordinate appropriate actions. There are three main blackboards implemented in Ymir, and communication is achieved via message passing. The first blackboard, called the Functional Sketchboard, is primarily used for information exchange between the Reactive Layer and the Process Control Layer. The second blackboard is called the Content Blackboard. This deals with communication between the Process Control Layer and the Content Layer. The messages that are posted on the Content Blackboard are less time-critical than those on the Functional Sketchboard. The third blackboard is called the Motor Feedback Blackboard and is used to keep track of which part of a stream of actions is currently being planned or carried out by the Action Scheduler. Within the Ymir architecture, a prototype interactive agent called Gandalf has been created. Gandalf is capable of fluid turn-taking and dynamic sequencing.
CHAMELEON [12] is a platform for developing intelligent multimedia applications that makes use of DACS for process synchronisation and intercommunication. The hub of CHAMELEON consists of a dialogue manager and a blackboard. The role of the blackboard is to keep track of interactions over time, using frames for semantic representation. The architecture of CHAMELEON is shown in Fig. 1. CHAMELEON consists of ten modules, mostly programmed in C and C++, which are glued together by the DACS communications system. The blackboard and dialogue manager form the kernel of CHAMELEON. The blackboard stores the semantic representations produced by the other modules, keeping a history of all interactions. Communication between modules is achieved by exchanging semantic representations between themselves or the blackboard.
Fig.1. Architecture of CHAMELEON [12]
SmartKom [13] is a multimodal dialogue system that is being developed to help overcome the problems of interaction between people and machines. SmartKom focuses on developing multimodal interfaces for applications in the home, public and mobile domains. The system uses a combination of speech, gestures and facial expressions to facilitate a more natural form of human-computer interaction, allowing face-to-face interaction with its conversational agent Smartakus. For example, in the public domain, the user can allocate the task of finding a library to Smartakus.
MIAMM [14] is an abbreviation for Multidimensional Information Access using Multiple Modalities. The aim of the MIAMM project is to develop new concepts and techniques that will facilitate fast and natural access to multimedia databases using multimodal dialogues.
III. MULTIMODAL SEMANTIC REPRESENTATION
One of the central questions in the development of intelligent multimedia or multimodal systems is what form of semantic representation should be used. The term ‘semantic representation’ refers to the method employed to represent the meaning of media representation [3]. This semantic representation must support interpretation and generation, multimodal input and output and a variety of semantic theories. The representation may contain architectural, environmental and interactional information. Architectural information comprises the producer/consumer of the information, information confidence and input/output devices. Environmental representation contains timestamps and spatial information, whilst interactional information includes the speaker/user’s state. The majority of the work in multimodal systems employs either frames or XML as the method of semantic representation. A discussion will follow on both of these approaches.
A. Frames
A frame is a collection of attributes with associated values that represent some real world entity. Minsky [15] first introduced frames as a method of semantically representing situations in order to facilitate decision-making and reasoning. The idea of frames is based on human memory and the psychological view that, when faced with a new problem, humans select an existing frame (remembered framework) and adapt it to fit the new situation by changing appropriate details. Although frames have limited capabilities on their own, a frame system provides a powerful mechanism for encoding information to support reasoning and decision-making. Frames can be used to represent concepts, including real world objects, for example “the village of Dromore”. The frames used to represent each concept have slots which represents the attributes of the concept. Frame-based methods of semantic representation are implemented in Ymir [11] and CHAMELEON [12].
[XML]
Fig. 2. Example frame from CHAMELEON [12]
Fig. 2 shows an example of the frame semantic representation that is utilised in CHAMELEON. The example frame in Fig. 2 illustrates how speech and gesture input are represented using input frames in the CHAMELEON platform. Note that although the syntax and structure of frames will vary from system to system, the basic idea of knowledge representation will remain the same.
B. XML
Besides frames, the other most popular method of semantic representation in multimodal systems is XML (eXtensible Mark-up Language). XML, created by W3C (World Wide Web Consortium) [16], is a derivative of SGML (Standard Generalised Mark-up Language). XML was originally designed for use in large-scale electronic publishing but is now used extensively in the exchange of data via the web. XML documents contain both parsed and unparsed data, with the former being either mark-up or character data (data between a pair of start and end mark-ups). The mark-up encodes a description of the storage layout and logical structure of the document. A mechanism is provided within XML that allows constraints to be imposed on the storage layout and logical structure. The main purpose of XML is to provide a mechanism that can be used in the mark-up and structuring of documents. XML is different to HTML in that tags are only used within XML to delimit pieces of data. The interpretation of the data is left completely to the application that reads it. Another advantage of using XML is that it is possible to easily create new XML tags.
With respect to semantic representation, SmartKom [13] and MIAMM [14] both use an XML-based method of semantic representation. It is common that a derivative of XML is used for semantic representation. For example, SmartKom uses an XML-based mark-up language, M3L (MultiModal Markup Language), to semantically represent information passed between the various components of the platform. An example of M3L is shown in Fig. 3. The M3L code in Fig. 3 is used to present a list of TV broadcasts to the user in response to a user-request. The exchange of information within MIAMM is also facilitated through a derivative of XML called MMIL (Multi-Modal Interface Language). Any programming language can manipulate data
in XML and a range of middleware technology exists for managing data in XML format.
IV. DECISION-MAKING WITHIN MEDIAHUB
The aim of this research is to develop a multimodal platform hub (MediaHub) which will use a potential new approach to decision-making over language and vision data. We will now consider the types of decisions that MediaHub will be required to make. Essentially these can be divided into two main categories:
- Decisions relating to input
- Decisions relating to output
With regard to decisions concerning input, these can be further categorized into the following three areas:
- Determining the semantic content of the input.
- Fusing the semantics of the input (into frames). That is, fuse the semantics of the language input such as “Whose office is this?” with the visual input (i.e. the pointing information/data) [12].
- Resolving any ambiguity at the input.
An example of ambiguity at the input could be if the user points three times while saying “Show me the best possible route from Paul’s office to Glenn’s office” [12]. Here, synchronisation (e.g. using timestamps) could be used to determine which two offices the user is referring to. Another example could be in an industrial environment where a control technician points at two computer consoles saying “Copy all files from the ‘process control folder’ of this computer to a new folder called ‘check data’ on that computer.” In this example, synchronisation of the visual and audio input may be needed to determine which two computers the control technician is referring to. Resolving ambiguity at the input will be a key objective for the decision-making component of MediaHub.
In relation to decisions at the output, synchronisation issues could arise in order to match, for example, a laser movement with a speech output. As is the case in CHAMELEON [12], a statement of the form “This is the best route from Paul’s office to Glenn’s office” may need to be synchronised with the laser output tracing the route between the two offices. A decision may also need to be made on what is the best modality to use at the output (i.e. language or vision?). For example, the directions from one office to another may be best presented visually using a laser, while a response to a user’s query may be better presented using natural language output. Another example could be when the driver of a car asks an in-car intelligent system for directions to the nearest petrol station. Here the system could respond by presenting a map to the driver or by dictating directions using speech output. The system response in this case would depend on whether or not the car was moving. That is, if the car is stopped in a lay-by, the response could be given to the user via the map. If however the car is moving (i.e. the driver’s eyes are pre-occupied on the road), then the system would respond using speech output.
Of course, there are numerous other possible decisions that will be needed in relation to multimodal input and output in MediaHub. Ultimately, the decisions required in MediaHub will depend on its application. The ideal scenario for a multimodal platform hub is that it will be capable of making all possible decisions that could be required in a multimodal system.
V. SYSTEM ARCHITECTURE
MediaHub will be an intelligent multimedia distributed platform hub for the fusion and synchronisation of language and vision data. MediaHub’s proposed architecture is shown in Fig. 4.
The key components of MediaHub are:
- Dialogue Manager
- Semantic Representation Database
- Decision-Making Module
The role of the Dialogue Manager is to facilitate the interactions between all components of the platform. It will act as a blackboard module, with all communication between components achieved via the Dialogue Manager. It will also
be responsible for the synchronisation of the multimodal input and output.
The Semantic Representation Database in MediaHub will use an XML-based method of semantic representation. XML has been chosen due to its widespread use in the area of knowledge and semantic representation in intelligent multimedia. XML’s ease of use will allow it to be easily integrated into MediaHub.
The Decision-Making Module will employ an Artificial Intelligence (AI) technique to provide decision-making on language and vision data. Bayesian Networks and CPNs (Causal Probabilistic Networks) [17] are currently being investigated, to determine if they will be suitable for decision-making within MediaHub. It may also be possible to use other techniques such as Fuzzy Logic, Neural Networks, Genetic Algorithms or a combination of techniques to provide this functionality. With regard to multimodal input and output, existing input/output data structures will be assumed.
Fig. 5 illustrates the flow of data through MediaHub, with the semantic representation, decision-making and synchronisation processes delineated within the dashed rectangle. The circles represent the main processes within the hub. The multimodal input data is first parsed by suitable processing tools and is then passed on to the dialogue manager. The information is then semantically represented using an XML-based semantic representation language. The dialogue manager has the option of using the decision making database, though the data may simply be passed on to the synchronisation process, as indicated in the diagram. It is anticipated that the data flow and the MediaHub architecture will be constantly refined as the development of MediaHub progresses.
VI. POTENTIAL TOOLS AND FUTURE DEVELOPMENT
The development of MediaHub is in its infancy. Key decisions that lie ahead relate to the method of semantic representation, semantic storage, communication and decision-making. Several implementations of XML could be used by the Semantic Representation Database. Initially, XHTML + Voice may be a suitable choice, since it combines the vision capabilities of XHTML and the speech capabilities of VoiceXML. Other XML-based languages such as the Synchronised Multimedia Integration Language (SMIL) and EMMA (Extensible MultiModal Annotation mark-up language) will also be considered.
A major focus of the future development of MediaHub will be in the area of decision-making over multimodal data. The HUGIN software tool [17], a tool implementing Bayesian Networks as CPNs, will be investigated for its potential to provide MediaHub with decision-making capabilities. Hugin offers an API which is implemented in the form of a library written in the C, C++ and Java programming languages. The API can be used like any other library and can be linked to applications, allowing them to implement Bayesian decision-making. The Hugin API encloses a high performance inference engine that, when given descriptions of causal relationships, can perform fast and accurate reasoning. Whilst Hugin may be used for the development of MediaHub, Microsoft’s MSBNx [18] is also a viable option – particularly if the .NET framework is to be used as a distributed processing tool within MediaHub. Other software tools for implementing Fuzzy Logic, Neural Networks and Genetic Algorithms will also be considered.
VII. CONCLUSION
The objectives of MediaHub, in providing a distributed platform hub for the fusion and synchronisation of language and vision data, have been defined. A review of various existing distributed systems and multimodal platforms has given an insight into the recent advances and achievements in the area of intelligent multimedia distributed computing. The various existing methods of multimodal semantic representation, storage and decision-making, which will be of critical importance in the development of MediaHub, were also considered. The area of Bayesian Networks has been considered with regard to the possibility of using Bayesian decision-making in MediaHub. This provides a potential new approach to decision-making over language and vision data. In conclusion, this paper presents a summary of the motivation for, and future direction of, the development of MediaHub.
Fig. 5. Data flow in MediaHub
REFERENCES
|
{"Source-Url": "http://www.paulmckevitt.com/pubs/glennpgnet05.pdf", "len_cl100k_base": 4207, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 18460, "total-output-tokens": 5731, "length": "2e12", "weborganizer": {"__label__adult": 0.00037288665771484375, "__label__art_design": 0.0011425018310546875, "__label__crime_law": 0.0005221366882324219, "__label__education_jobs": 0.0011911392211914062, "__label__entertainment": 0.00033211708068847656, "__label__fashion_beauty": 0.00019240379333496096, "__label__finance_business": 0.0003795623779296875, "__label__food_dining": 0.0003676414489746094, "__label__games": 0.0006971359252929688, "__label__hardware": 0.0021419525146484375, "__label__health": 0.0007643699645996094, "__label__history": 0.0004177093505859375, "__label__home_hobbies": 9.292364120483398e-05, "__label__industrial": 0.0006351470947265625, "__label__literature": 0.0006814002990722656, "__label__politics": 0.00042724609375, "__label__religion": 0.0006132125854492188, "__label__science_tech": 0.453125, "__label__social_life": 0.00015366077423095703, "__label__software": 0.040618896484375, "__label__software_dev": 0.493896484375, "__label__sports_fitness": 0.0002288818359375, "__label__transportation": 0.0007090568542480469, "__label__travel": 0.0002313852310180664}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25858, 0.01532]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25858, 0.57158]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25858, 0.88321]], "google_gemma-3-12b-it_contains_pii": [[0, 3759, false], [3759, 8766, null], [8766, 13826, null], [13826, 17648, null], [17648, 21934, null], [21934, 25858, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3759, true], [3759, 8766, null], [8766, 13826, null], [13826, 17648, null], [17648, 21934, null], [21934, 25858, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25858, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25858, null]], "pdf_page_numbers": [[0, 3759, 1], [3759, 8766, 2], [8766, 13826, 3], [13826, 17648, 4], [17648, 21934, 5], [21934, 25858, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25858, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
be79a41960b76a116c69009c1c31a19a8a801730
|
[REMOVED]
|
{"Source-Url": "https://web.ntpu.edu.tw/~myday/doc/ASONAM2020/ASONAM2020_Proceedings/pdf/papers/057_050_361.pdf", "len_cl100k_base": 8132, "olmocr-version": "0.1.53", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 29330, "total-output-tokens": 10194, "length": "2e12", "weborganizer": {"__label__adult": 0.0014772415161132812, "__label__art_design": 0.0010633468627929688, "__label__crime_law": 0.055755615234375, "__label__education_jobs": 0.005702972412109375, "__label__entertainment": 0.0006990432739257812, "__label__fashion_beauty": 0.0005412101745605469, "__label__finance_business": 0.00115203857421875, "__label__food_dining": 0.0007338523864746094, "__label__games": 0.006969451904296875, "__label__hardware": 0.0036869049072265625, "__label__health": 0.0019063949584960935, "__label__history": 0.0013580322265625, "__label__home_hobbies": 0.0005173683166503906, "__label__industrial": 0.0011510848999023438, "__label__literature": 0.0016326904296875, "__label__politics": 0.0018711090087890625, "__label__religion": 0.0011472702026367188, "__label__science_tech": 0.366943359375, "__label__social_life": 0.001049041748046875, "__label__software": 0.1597900390625, "__label__software_dev": 0.38330078125, "__label__sports_fitness": 0.0006494522094726562, "__label__transportation": 0.0006413459777832031, "__label__travel": 0.00033092498779296875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 40172, 0.03321]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 40172, 0.25085]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 40172, 0.88858]], "google_gemma-3-12b-it_contains_pii": [[0, 5760, false], [5760, 8954, null], [8954, 14201, null], [14201, 19224, null], [19224, 25094, null], [25094, 28674, null], [28674, 33751, null], [33751, 40172, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5760, true], [5760, 8954, null], [8954, 14201, null], [14201, 19224, null], [19224, 25094, null], [25094, 28674, null], [28674, 33751, null], [33751, 40172, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 40172, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 40172, null]], "pdf_page_numbers": [[0, 5760, 1], [5760, 8954, 2], [8954, 14201, 3], [14201, 19224, 4], [19224, 25094, 5], [25094, 28674, 6], [28674, 33751, 7], [33751, 40172, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 40172, 0.09091]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
538722944bf0960314d82fec15fb063f54d9d848
|
A keyword search system using open source software
Jan Trmal
*Johns Hopkins University*
Guoguo Chen
*Johns Hopkins University*
Dan Povey
*Johns Hopkins University*
Sanjeev Khudanpur
*Johns Hopkins University*
Pegah Ghahremani
*Johns Hopkins University*
See next page for additional authors
Follow this and additional works at: [http://repository.cmu.edu/lti](http://repository.cmu.edu/lti)
Published In
*Proceedings of Spoken Language Technology Workshop (SLT)*, 530-535.
ABSTRACT
Provides an overview of a speech-to-text (STT) and keyword search (KWS) system architecture build primarily on the top of the Kaldi toolkit and expands on a few highlights. The system was developed as a part of the research efforts of the Radical team while participating in the IARPA Babel program. Our aim was to develop a general system pipeline which could be easily and rapidly deployed in any language, independently on the language script and phonological and linguistic features of the language.
Index Terms— Kaldi, spoken term detection, keyword search, speech recognition, deep neural networks, pitch, IARPA BABEL, OpenKWS
1. BACKGROUND
The IARPA BABEL program aims to achieve the capability to rapidly develop speech-to-text (STT) and keyword search (KWS) systems in new languages with limited linguistic resources—transcribed speech, pronunciation lexicon and matched text—with emphasis on conversational speech.
The four BABEL program participants were evaluated by NIST via two benchmark tests: on five development languages and on a surprise language revealed only at the beginning of the evaluation period. The development languages were Assamese, Bengali, Haitian Creole, Lao and Zulu, and the surprise language was Tamil. Eight additional teams worldwide participated in the surprise language evaluation.
The primary 2014 evaluation was on KWS performance using systems trained on an IARPA-provided limited language pack (LimitedLP) containing 10 hours of transcribed speech, a dictionary that covered words in the transcripts, 70 hours of un-transcribed speech for unsupervised training, and 10 hours of transcribed speech for development-testing. A secondary evaluation was on KWS performance using a full language pack (FullLP), in which transcripts and dictionary entries were provided for an additional 50 of the 70 hours of un-transcribed speech: total 60 hours transcribed.
The test data provided by NIST contained 15 hours of speech for each development language, 75 hours for the surprise language, and a list of ca 3000 keywords for each language. The primary KWS evaluation metric was actual term weighted value (ATWV), and the BABEL program goal for 2014 was to attain an ATWV of 0.30 in the LimitedLP training condition on all six languages.
This paper describes the system submitted to NIST by the JHU Kaldi team. It is expected to interest readers because the submitted system attained all the program goals, enabling the RADICAL team to achieve third place worldwide, and because 9 of the top 10 participants in the NIST evaluation used Kaldi components/recipes in their submitted system.
2. JHU KALDI SYSTEMS OVERVIEW
The Kaldi KWS system is comprised of LVCSR based lattice generation followed by OpenFST based indexing and keyword search. LVCSR systems based on four different acoustic models are used to decode and index the speech:
1. A subspace Gaussian mixture model (SGMM) of the type described in [1], trained discriminatively via pack (LimitedLP) containing 10 hours of transcribed speech, a dictionary that covered words in the transcripts, 70 hours of un-transcribed speech for unsupervised training, and 10 hours of transcribed speech for development-testing. A secondary evaluation was on KWS performance using a full language pack (FullLP), in which transcripts and dictionary entries were provided for an additional 50 of the 70 hours of un-transcribed speech: total 60 hours transcribed.
The test data provided by NIST contained 15 hours of speech for each development language, 75 hours for the surprise language, and a list of ca 3000 keywords for each language. The primary KWS evaluation metric was actual term weighted value (ATWV), and the BABEL program goal for 2014 was to attain an ATWV of 0.30 in the LimitedLP training condition on all six languages. This paper describes the system submitted to NIST by the JHU Kaldi team. It is expected to interest readers because the submitted system attained all the program goals, enabling the RADICAL team to achieve third place worldwide, and because 9 of the top 10 participants in the NIST evaluation used Kaldi components/recipes in their submitted system.
2. JHU KALDI SYSTEMS OVERVIEW
The Kaldi KWS system is comprised of LVCSR based lattice generation followed by OpenFST based indexing and keyword search. LVCSR systems based on four different acoustic models are used to decode and index the speech:
1. A subspace Gaussian mixture model (SGMM) of the type described in [1], trained discriminatively via
---
1 The exact corpus identifiers are
- Assamese, IARPA-babel102b-v0.4;
- Bengali, IARPA-babel103b-v0.3;
- Haitian Creole, IARPA-babel1201b-v0.2b;
- Lao, IARPA-babel1203b-v3.1a;
- Tamil, IARPA-babel1204b-v1.1b;
- Zulu, IARPA-babel1206b-v0.1e.
Fig. 1. Schematic diagram the JHU Kaldi systems described in Section 2 (with some novel components highlighted).
1. A deep neural network (DNN) with $p$-norm activation, as described in [3]. For the LimitedLP condition, an novel ensemble training method, describe below, provides improved performance.
2. A model trained on bottleneck features (BNF) extracted from a DNN. The 42-dim bottleneck features are used in a tandem SGMM system, again trained discriminatively via BMMI.
3. A BNF model with semi-supervised training on 50 hours of un-transcribed speech (BNF-SEMISUP). The automatic transcripts were obtained using the LimitedLP SGMM and DNN models. BNF-SEMISUP was used only in the LimitedLP training condition.
4. A “sequence-trained” deep neural network, trained using a state-level minimum Bayes risk (DNN-SMBR) criterion. Sequence training did not improve DNN performance in the LimitedLP condition. DNN-SMBR was hence used only in the FullLP training condition.
All LVCSR systems use one of two pronunciation lexicons: the base lexicon for the appropriate (LimitedLP/FullLP) training condition, or an expanded lexicon generated as described in Section 4.1. Language models for all LVCSR systems are estimated from the appropriate training transcripts only, using the SRILM tools. This results in a total of $4 \times 2 \times 1 = 8$ STT decoding runs each in the LimitedLP and FullLP conditions for each language.
The Kaldi KWS pipeline is based on lattice-indexing as described in [4], and utilizes as its input the exact lattice generation method described in [5]. Two additional strategies are used to handle out of vocabulary (OOV) keywords:
1. One is to search for phonetically similar in-vocabulary words (i.e. proxy keywords) for each OOV keyword, as described in [6]. The novelty in the 2014 system beyond [6] is that due to the vastly increased vocabulary when using an expanded lexicon, proxy-based search has the opportunity to be more effective, but straightforward search becomes computationally prohibitive. Several optimizations, including lazy composition, breaking down the search into several steps, and intermediate pruning have been implemented to reduce the memory footprint and run time of the FSTs.
2. The other is a novel Poisson point process model (PPM), as described in [7]. This method bypasses most of the STT modules, using only the DNN outputs as its input, and is agnostic to the keyword being OOV.
To obtain the final submission to NIST, outputs from various individual systems are combined. For STT, lattice-level combination of 4 to 8 STT systems is performed, while for the KWS task, the ranked lists of several systems and KWS search-strategies are combined, as detailed in Section 6.
Code and scripts used for almost all results reported here are available via svn://svn.code.sf.net/p/kaldi/code/trunk/. Scripts under egs/babel/s5b were used to build the JHU Kaldi systems, and by other participants who submitted systems to NIST.
3. JHU KALDI INNOVATIONS IN 2014
A few notable innovations in the 2014 JHU Kaldi systems relative to the 2013 release (which was also made available via SourceForge under egs/babel/s5) are as follows.
1. All JHU Kaldi systems now use pitch and probability-of-voicing features along with PLPs. Their extraction procedure is described in [8], and their inclusion improves STT and KWS performance on all languages and in both training conditions.
2. All DNNs now use units with the novel $p$-norm activation function described in [3]. This results in modest but consistent (1%-2% absolute) improvement in
Table 1. Performance of normal versus ensemble training of a DNN. STT and KWS results are on the development sets of four representative languages, and use the NIST keywords.
<table>
<thead>
<tr>
<th>Language</th>
<th>LimitedLP WER</th>
<th>LimitedLP MTWV</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>normal</td>
<td>ensemble</td>
</tr>
<tr>
<td>Tamil</td>
<td>76.9%</td>
<td>75.7%</td>
</tr>
<tr>
<td>Assamese</td>
<td>65.2%</td>
<td>63.8%</td>
</tr>
<tr>
<td>Bengali</td>
<td>67.6%</td>
<td>66.2%</td>
</tr>
<tr>
<td>Zulu</td>
<td>70.1%</td>
<td>68.7%</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Language</th>
<th>FullLP WER</th>
<th>FullLP MTWV</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>DNN</td>
<td>DNN-SMBR</td>
</tr>
<tr>
<td>Tamil</td>
<td>68.4%</td>
<td>67.4%</td>
</tr>
<tr>
<td>Assamese</td>
<td>53.4%</td>
<td>52.8%</td>
</tr>
<tr>
<td>Bengali</td>
<td>56.5%</td>
<td>56.4%</td>
</tr>
<tr>
<td>Zulu</td>
<td>60.3%</td>
<td>59.7%</td>
</tr>
</tbody>
</table>
Table 2. Performance of DNN versus DNN-SMBR (sequence) training. STT and KWS results are on the development sets of four representative languages, and use the NIST keywords.
generated for each such new pronunciation using a “reverse” G2P system trained on the base lexicon. About 2 million such “word”+pronunciation entries are generated for each language. The language model treats them as unseen vocabulary items for purposes of probability assignment. The impact of this massive lexicon expansion is language-dependent. e.g. it makes no difference in Assamese and Bengali, but significantly improves ATWV for Zulu in the LimitedLP condition. Its impact is more pronounced if a language model with data-driven word classes is used (cf. Section 4.2).
4. **LEXICON CREATION & LANGUAGE MODELING**
We use the SRILM tools to build language models from the training transcripts. Several n-gram models with different smoothing methods and count cutoffs are built. The one with the lowest perplexity on the development data is retained — typically a Good-Turing 3-gram in the LimitedLP condition.
IARPA provided lexicons are used in all systems, with syllabic stress or tone converted into a “tag” attached to each phoneme in that syllable. Another tag indicates whether a phoneme is word-initial, word-final, etc. Questions concerning these tags are permitted during triphone clustering.
In addition to phonemes in the IARPA-provided lexicon3, four special phonemes are introduced: silence, noise, vocalized-noise and unknown-word. The first two are self-explanatory. The vocalized-noise phoneme models coughs, laughter, etc. while the unknown-word phoneme models out-of-vocabulary speech, such as unintelligible words and un-transcribed foreign words, etc.
4.1. **Lexicon Expansion to Enable OOV Keyword Search**
We developed a novel syllable-based lexicon expansion method, which is described next. The main idea is to automatically generate millions of distinct lexicon entries whose pronunciations are phonotactically plausible in that language. An OOV (key)word in the test speech will then have a good
---
3For some languages, such as Vietnamese in the 2013 NIST evaluation and Zulu in 2014, the IARPA-provided lexicon systematically re-labels a phoneme as one of two or more variants based on context. We found in such cases that it is beneficial to collapse such variants back into a single phoneme and let the data-driven triphone clustering step decide whether multiple variants are warranted.
chance of being decoded as a similar-sounding lexicon entry, obviating the need for a separate phonetic decoding pass or a separate subword index for OOV search. The word-lattices may be searched directly for the OOV keyword, with the proxy-based method of [6] to mitigate differences between the correct spelling (of the keyword) and the spelling generated during this automatic lexicon expansion.
We first use the IARPA lexicon to estimate an \(n\)-gram “language model” for syllable sequences that constitute words in the language; this requires a syllabified lexicon. Each pronunciation in the lexicon is treated as a “sentence” and the syllables that constitute the pronunciation are treated as atomic “words,” so that the syllable inventory becomes the “vocabulary” of this “language model.” Once this statistical language model has been estimated, it is used generatively to simulate new “sentences” in the language: each simulated “sentence” is the syllabic pronunciation of a potential word.
We discard syllable sequences that already exist in the IARPA lexicon, retaining only OOV syllable sequences. We also discard sequences comprised of very few phonemes. Up to 2 million of the remainder, sorted by their syllabic “language model” scores, are selected for addition to the lexicon.
The last step is to generate an orthographic form for each selected syllable sequence. For this we resort to standard G2P techniques in reverse: we treat each phoneme on the pronunciation-side of the lexicon as a single orthographic character (grapheme), and each orthographic character on the word-side of the lexicon as a phoneme. We train a Sequitur G2P system [10] using the IARPA lexicon in reverse, as described above. We refer to it as the P2G system to remind readers that its input is a phoneme sequence (instead of a grapheme sequence), and its output is a sequence of characters (instead of phonemes). Once trained, the P2G system accepts each selected syllable sequence, viewed as a phoneme sequence, and generates the needed orthographic form.
Since these orthographic forms are not seen in the language model training text, they are inserted in to the language model as unseen unigrams, and are assigned the unigram probability of the unseen word (times the probability of their pronunciation under the syllabic language model).
For the NIST evaluation, there were two versions of each Kaldi decoding run described in Section 2, one with the base lexicon and one with the expanded lexicon described above (cf Figure 1). On the development data, the expanded lexicons provided some improvement in ATWV for some languages (e.g. Zulu), especially when used in conjunction with the proxy-based KWS method for OOV keywords, and negligible gain for other languages. We saw no degradation from their use in any condition on the development data.
However, NIST reported a (\(\approx 0.2\%\) WER) degradation in STT performance for languages where we saw negligible gains, while the languages that improved on the development data continued to do so on evaluation data. We expect that the degradation may be alleviated by tuning the total language model probability assigned to the new lexical entries.
### 4.2. Orthographic-Class Based Language Modeling
A shortcoming of the massive lexicon expansion of Section 4.1 is the arbitrary assignment of language model (LM) probabilities to the new words. Class-based LMs, especially those based on syntax or semantic word classes, are a good way to selectively assign different probabilities in different contexts to an otherwise indistinguishable set of unseen words. Our investigations in this direction are described next.
A major hurdle in the limited resource setting is that neither data-driven techniques (e.g. Brown clustering [11]) nor knowledge-based ones are feasible for creating word classes. Furthermore, “words” resulting from the automatic expansion are not guaranteed to be real words in the language. We therefore resort to simple, spelling-based clustering methods.
We created three such clusterings, estimated a class-based LM for each clustering, and linearly interpolated them with the baseline 3-gram LM and 2 other LMs:
1. a class-based LM, using the first three characters;
2. a class-based LM, using the first six characters;
3. a class-based LM, using the last three characters;
4. a skip bigram LM;
5. a word 3-gram LM whose absolute discounting parameters depend on the count level via a rational function.
Models 1-5 were implemented using Saarland University’s LSVLM toolkit. To map the resulting LMs to ARPA format, an artificial corpus of 30 million tokens was sampled using model 5. A trigram tree was constructed and probabilities of models 1-5 where written to the leaves of that tree.
This method is still under development/evaluation, but it already seems from the preliminary results in Table 3 on two languages (Zulu and Tamil) that the interpolated class-based LM provides modest STT improvement, and somewhat more significant KWS improvement in both languages. For Tamil, the model 2 had the largest contribution for all experiments. We note that the sampling/pooling steps in converting the LSVLM to ARPA format must be performed carefully.
To obtain the results in Table 3, we only rescored lattices generated by the DNN system (cf Section 2) using
<table>
<thead>
<tr>
<th>Language</th>
<th>Lexicon</th>
<th>LM</th>
<th>WER</th>
<th>ATWV</th>
</tr>
</thead>
<tbody>
<tr>
<td>Zulu</td>
<td>basic</td>
<td>Word 3-gram</td>
<td>69.8%</td>
<td>0.26</td>
</tr>
<tr>
<td></td>
<td>expanded</td>
<td>Word+Class LM</td>
<td>68.5%</td>
<td>0.32</td>
</tr>
<tr>
<td>Tamil</td>
<td>basic</td>
<td>Word 3-gram</td>
<td>75.7%</td>
<td>0.21</td>
</tr>
<tr>
<td></td>
<td>expanded</td>
<td>Word+Class LM</td>
<td>75.3%</td>
<td>0.23</td>
</tr>
<tr>
<td></td>
<td>expanded</td>
<td>Word+Class LM</td>
<td>75.7%</td>
<td>0.20</td>
</tr>
</tbody>
</table>
Table 3. Performance of the LimitedLP DNN system with a basic v/s expanded lexicon and a basic v/s class-based LM on the respective development sets using NIST keywords.
the interpolated LM. Incorporating the new LM into first-pass decoding is likely to lead to further improvements.
5. POISSON POINT PROCESS MODELS FOR KWS
The point process model (PPM) for keyword search is a whole-word, event-based acoustic modeling and phonetic search technique [7, 12]. The PPM represents keywords as a set of time-inhomogeneous Poisson point processes, one process per phone. Therefore, if a PPM can be constructed for a keyword, and the speech is indexed with corresponding phonetic “events,” there is no OOV problem. We use either dictionary or G2P-based pronunciations to seed the keyword PPM, and the per-frame posterior probabilities generated by our p-norm DNN to construct the phonetic event index. Indexing is approximately 2× faster than real-time, and the matching (search) is optimized so that it is extremely fast (≈ 400,000× real time). Each detection is assigned a PPM likelihood. The outstanding issue is the normalization of this likelihood across keywords to enable the setting of a global detection threshold. The performance of PPM itself is usually on the par with other phonetic search systems but it combines really well with the word-based systems, as shown in Table 4.
6. SYSTEM COMBINATION FOR STT AND KWS
Our final submissions to NIST employ combination of several systems depicted in Figure 1 and described below.
6.1. System Combination for Speech to Text
The only system combination method used for the STT submission is the minimum Bayes risk (MBR) decoding method described in [13], which we view as a systematic way to perform confusion network combination (CNC) [14]. Note that it is nontrivial to perform MBR decoding when the vocabularies of the systems are vastly different. We therefore combine the STT outputs via MBR decoding\(^4\) of the 4 systems that use the base lexicon (cf Section 2), and separately the 4 that use the expanded lexicon. Table 5 shows a typical, modest reduction in STT errors from system combination.
6.2. System Combination for Keyword Search
System combination for KWS is a basic merging, for each keyword, of the ranked lists produced by the component KWS systems. Putative hits are aligned across systems based on proximity/overlap of time-spans, and the lattice posterior probabilities\(^5\) of aligned putative hits are averaged across the systems. If a putative hit does not appear in a system’s list, that system is assumed to have assigned it zero probability.
Specifically, if a putative hit has scores \(\{s_1, s_2, \ldots, s_N\}\) in the ranked lists of \(N\) independent KWS systems, where some of the \(s_n\)’s may be 0, the combined score of the hit is defined to be
\[
s_{\text{avg}} = \left(\frac{1}{N} \sum_{n=1}^{N} w_n s_n^p\right)^\frac{1}{p},
\]
where \(p\) and the weights \(w_n\) are determined empirically, and are typically found to be around \(p = 0.5\) and \(w_n = 1\). The ranked list after KWS system combination therefore is the union of the individual ranked lists sorted by \(s_{\text{avg}}\).
Table 5 shows typical improvements from KWS system combination for the 8 word-indexed systems described in Section 2 and further combination with the PPM system described in Section 5.
7. NIST EVALUATION RESULTS
The primary development language submissions of the JHU Kaldi team in both the FullLP and the LimitedLP conditions were combinations of 4 to 9 systems as described above. The primary STT system was a combination of 4 STT systems with expanded lexicons, as described in Section 6.1, while the primary KWS system was a combination of 8 word-indexed systems with the PPM system, as described in Section 6.2.
The primary surprise language submission was a combination of the PPM system with 5 word-indexed KWS systems, each derived from an STT system with an expanded lexicon. Two of these STT systems entailed lattice rescoring with the interpolated class-based LM (cf Section 4.2). STT system combination was not performed for the surprise language due to some computational limitations.
Table 6 reports the official NIST evaluation of the primary STT and KWS systems, demonstrating that the ambitious BABEL goal of 0.30 ATWV in the LimitedLP condition is attainable in all five development languages and in the surprise language using the JHU Kaldi tools. Performance of other systems (that also used these open source tools) that were submitted to NIST further attests to the quality of the tools.
\(^4\)A system-specific offset determined empirically is applied to the language model weight for each system during decoding.
\(^5\)Averaging the lattice posteriors (without further normalization) was adequate when combining various Kaldi KWS systems. Combining further with non-Kaldi systems may benefit from normalizing scores within each ranked list before merging.
Table 5. Performance of STT and KWS system combination for Assamese on evaluation data, using NIST keywords.
<table>
<thead>
<tr>
<th>Language</th>
<th>LimitedLP</th>
<th>FullLP</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>WER</td>
<td>ATWV</td>
</tr>
<tr>
<td>Assamese</td>
<td>60.6%</td>
<td>0.375</td>
</tr>
<tr>
<td>Bengali</td>
<td>62.1%</td>
<td>0.355</td>
</tr>
<tr>
<td>Haitian Creole</td>
<td>57.2%</td>
<td>0.433</td>
</tr>
<tr>
<td>Lao</td>
<td>54.7%</td>
<td>0.437</td>
</tr>
<tr>
<td>Zulu</td>
<td>67.1%</td>
<td>0.380</td>
</tr>
<tr>
<td>Tamil</td>
<td>—</td>
<td>0.313</td>
</tr>
</tbody>
</table>
Table 6. Official evaluation of STT and KWS performance of the JHU Kaldi system on NIST data using NIST keywords.
8. CONCLUSION
We have described the design and implementation of state-of-the-art STT and KWS systems using the Kaldi open source tools, and outlined some innovations and capabilities we have recently added to these tools. The STT performance is on par with the best systems, and the KWS performance is respectable. We hope that this information will enable further improvement and/or fruitful deployment of the tools.
9. REFERENCES
|
{"Source-Url": "http://repository.cmu.edu/cgi/viewcontent.cgi?article=1061&context=lti", "len_cl100k_base": 5939, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 24190, "total-output-tokens": 7344, "length": "2e12", "weborganizer": {"__label__adult": 0.0006103515625, "__label__art_design": 0.0008411407470703125, "__label__crime_law": 0.0006818771362304688, "__label__education_jobs": 0.002162933349609375, "__label__entertainment": 0.0005321502685546875, "__label__fashion_beauty": 0.00029468536376953125, "__label__finance_business": 0.0002913475036621094, "__label__food_dining": 0.0005159378051757812, "__label__games": 0.0010881423950195312, "__label__hardware": 0.0018863677978515625, "__label__health": 0.0014753341674804688, "__label__history": 0.0004744529724121094, "__label__home_hobbies": 9.435415267944336e-05, "__label__industrial": 0.0005669593811035156, "__label__literature": 0.002719879150390625, "__label__politics": 0.0005464553833007812, "__label__religion": 0.0008325576782226562, "__label__science_tech": 0.41748046875, "__label__social_life": 0.0002034902572631836, "__label__software": 0.02545166015625, "__label__software_dev": 0.5400390625, "__label__sports_fitness": 0.0004086494445800781, "__label__transportation": 0.0006318092346191406, "__label__travel": 0.00021409988403320312}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 27111, 0.04479]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 27111, 0.25269]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 27111, 0.90732]], "google_gemma-3-12b-it_contains_pii": [[0, 492, false], [492, 492, null], [492, 5325, null], [5325, 8922, null], [8922, 12280, null], [12280, 18159, null], [18159, 22966, null], [22966, 27111, null]], "google_gemma-3-12b-it_is_public_document": [[0, 492, true], [492, 492, null], [492, 5325, null], [5325, 8922, null], [8922, 12280, null], [12280, 18159, null], [18159, 22966, null], [22966, 27111, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 27111, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 27111, null]], "pdf_page_numbers": [[0, 492, 1], [492, 492, 2], [492, 5325, 3], [5325, 8922, 4], [8922, 12280, 5], [12280, 18159, 6], [18159, 22966, 7], [22966, 27111, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 27111, 0.19231]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
db5fc422c5d099a99845c1f38e45992b68870462
|
Heuristic Evaluation of TutorNOW
Evaluator #A: Lorena Huang Liu
Evaluator #B: Bruno De Martino
Evaluator #C: Kelsey Josund
Evaluator #D: Sanby Lee
Evaluator #E: Pat Boonyarittipong
1. Problem
The TutorNOW application provides an on-demand mechanism for students on college campuses to connect for the purposes of offering and receiving tutoring.
2. Violations Found
1. [H2-1. Visibility of system status] [Severity 1] [Found by: D, B]
On the screens for a tutee to find a tutor, it does not indicate where in the flow the user is. One solution would be to add a title at the top with text such as “Submit Tutor Request” or “What kind of tutoring do you need today?”
2. [H2-8. Aesthetic and minimalist design] [Severity 1] [Found by: D]
On the screen to fill out help that a tutee is requesting, a profile photo and name are listed at the very top as the first item. This is the user’s own profile, so it is not necessary to view this as he/she is filling out the information. This is a superfluous item that could be removed to reduce distractions.
3. [H2-3. User control and freedom] [Severity 3] [Found by: D, C, B, E]
On the screen where the user views a list of tutoring offers, the main button is to “Cancel Request.” However, this takes the user all the way back to the home screen. In case the user just wants to go back one screen and edit something about the request, there is no way to do that; they will need to fill out the entire request again. There should be an option to just go back one step and edit the request.
4. [H2-6. Recognition rather than recall] [Severity 2] [Found by: D, B]
If the user sees a list of offers and clicks on the tutor’s profile, he/she is taken to the profile page. At the bottom, there is a button to “Accept [tutor’s] offer” for the particular tutoring job. However, the price information for this particular job is not listed, meaning the user has to either remember how much the offer was, or click back to see it, thereby reducing the functionality of having a shortcut to accept directly on the profile page. One solution would be to list the price for this particular tutoring job next to the accept button or make it part of the button text, e.g. “Accept Josephine’s offer for $30!”
5. [H2-8. Aesthetic and minimalist design] [Severity 2] [Found by: D, A]
The tutor profile page lists Market Rates for different classes. This might be useful if a tutee is just browsing through different tutors and wants to submit a request for a particular tutor; however I encountered this information while clicking through to a tutor’s profile after receiving a specific quote from them. The information seems unnecessary to the current task that I am trying to complete, so it could be removed in this instance.
6. [H2-4. Consistency and standards] [Severity 2] [Found by: C, D]
On the screen with a list of tutoring quotes, the largest button is “Cancel Request” which is shown in large text at the bottom of the screen. My eye is immediately drawn to this, and on the previous page this is where user flow buttons like “Back” and “Continue” are listed. In order to move forward from this screen, I have to hunt for the “Accept Offer” button, which is much smaller than the “Cancel Request.” One suggestion would be to make the cancel button smaller so that it does not compete with the accept button, or take up the prime real estate on this page (since presumably it’s not the primary action you are hoping users to take).
7. [H2-1. Visibility of system status] [Severity 3] [Found by: C, D, E, B, A] Once I accept the offer, I am taken to a confirmation page for the tutoring job. However, if I click “Cancel” from this page, I am taken all the way back to the home screen. There is no confirmation that my tutoring job has been canceled, and I am not sure what the status of the tutoring job is, or where to check that. One suggestion would be to add a confirmation screen that indicates the tutoring job has been canceled, and perhaps a suggestion for what to do next (book another job?) It also seems a bit strange that there would be an option to cancel immediately after I booked a job; it seems likely that I might want to edit the job or maybe pressed “book” by accident, but completely canceling a job I just booked seems like an unlikely scenario.
8. [H2-1. Consistency and standards] [Severity 2] [Found by: C, E, D, B] On the confirmation page, it isn’t clear what “Start” means. Since the two options at the bottom were “Cancel” and “Start,” I assumed that I was supposed to click Start as soon as I had confirmed the job. I didn’t realize that “Start” meant that the tutoring session is about to start, and once I clicked it, there wasn’t a way to click back. One suggestion would be to remove the Start button completely from the tutee flow, and have the tutor be the one to indicate when a session is starting. In other contexts, usually it is the service provider who indicates when a session is starting, since they are the one charging (Uber drivers, therapists, etc).
9. [H2-3. User control and freedom] [Severity 4] [Found by: C, E, D, B] If the user accidentally clicks the “Start” button when they didn’t mean to, there should be a quick “Back” button. Otherwise, they will be charged for the tutoring session when they didn’t mean to. Currently, the only option on this page is “Tutoring Complete,” which does not give the user much control.
10. [H2-3. User control and freedom] [Severity 3] [Found by: C, D, B] When the user clicks “Tutoring Complete,” the next screen shows that $25 has automatically been charged to his/her Venmo account. In situations where I am paying money, I would feel more comfortable if I had a chance to review first. Even though I am going to pay anyway, it gives a sense of control if I actively say yes, rather than having money automatically charged to my account (e.g. reviewing a bill in a restaurant before putting down your credit card, order confirmation pages for online shopping). Some applications such as Uber automatically charge to your account, but I would argue that the situation of taking a taxi is much more time constrained because you are usually hopping in and out in a rush. One suggestion would be to add a quick confirmation screen that says something like “Tutoring session completed! Confirm payment?” This would also prevent errors where the user accidentally presses a button and ends the tutoring session early.
11. [H2-5. Error prevention] [Severity 3] [Found by: C, D, B]
What happens if a user fills out a tutoring request, completes the session, but doesn’t have a Venmo account set up? Do they set this up at that point within the flow? Or are they prevented from filling out any tutoring requests until they link their Venmo account from the Payments flow? One suggestion would be to disable the “Be Tutored” button until Payments has been filled out.
12. [H2-3. User control and freedom] [Severity 2] [Found by: C, D, E]
After paying with Venmo, the only way forward from the payment screen is to “Review your experience with Josephine!” There is no way to go back directly to the home screen. Some applications such as Postmates or Uber also require you to rate your service before being able to book another service; however this takes control away from the user. There may be a business reason to limit the control of the user in this scenario; however there are also tradeoffs, especially since the review screen in this case is longer than typical. One suggestion would be to cut down on the amount of information required for the review.
13. [H2-4. Match between system and the real world] [Severity 1] [Found by: C, D]
The terminology on the home screen is a little confusing. The home screen is very simple, so it was easy to find what I wanted, and it was also clear from reading the write up what the flow was supposed to be. However, it might be a little more intuitive to name the buttons something like “Sign up to tutor” and “Find a tutor”/ “Hire a tutor” as those are closer to terminology that is commonly used in real life.
14. [H2-8 Aesthetic and minimalist design] [Severity 2] [Found by: E]
In a tutor’s profile screen, the “skills and endorsements” section includes information of questionable relevance and ambiguity. For example, it is not immediately clear to a tutee what it means to have 5 people endorsing Josephine for “Social Media” (5 people out of how many?). This section confuses users, should be presented more clearly, and moved downward, making room for more relevant contents.
15. [H2-8 Aesthetic and minimalist design] [Severity 3] [Found by: E]
The “star” rating can be a misleading measure of tutor’s quality. While Josephine could be a rockstar biology tutor and is highly rated, she could also be a poor psychology tutor at the same time. However, this fact is obscured by her high rating from tutoring biology. This would render the combined star rating unhelpful. Consider adding a breakdown of ratings by class/subject, as well as adding the number of raters in a bracket following the stars. Many tutee would want to see this information first in order to identify high quality tutor effectively, so it should be moved to be visible without further scrolling as well.
16. [H2-2 Match Between System and the Real World] [Severity 1] [Found by: C]
On first use, I have no idea what to put into the "I need help with: " space. Isn't that what the assignment description is for? It should specify that it wants the particular class, not “a problem set” or “biology”, which could logically go in that space as it is currently. This violates H2-2 because the real world is usually more explicit.
17. [H2-1 Visibility of System Status] [Severity 2] [Found by: C]
There’s no context for the user regarding the differences in pricing. Why do some tutors charge more than others? Why does the same tutor charge more for some subjects than others? It would be useful to see some explanation on the tutor profile regarding how prices are set. This violates H2-1 because it obscures pricing standards/calculations inherent in the system, leaving users to wonder what exactly is going on.
18. [H2-3 User Control and Freedom] [Severity 1] [Found by: C]
It is not clear whether the user is selecting the room to meet in or if that’s connected to the
tutor or the subject. Should these all be options? If not, it should be very clear why the one or
two that are offered are provided and not the other(s), and it should be very clear to the user
why a specific location is chosen. This violates H2-3 because it takes control out of the hands
out the user and is unclear.
19. [H2-1 Visibility of System Status] [Severity 2] [Found by: C]
The app prevents users from seeing what tutoring options are available until they have
entered information about the class they need to be tutored in. If I’m a new user, I probably
want to see what subjects have active tutors, just out of curiosity; similarly, if I’ve had a
positive experience with some tutor in, say, CS, and now I’m taking physics, I may want to see
if that same tutor can help me again rather than looking at all the tutors. It would be nice if
there were a way to navigate straight to a list of available tutors for all subjects, or filtered by
a subject more generally. This violates H2-1 because it does not make it clear how many
options there are or how varied these options are, thus limiting a prospective tutee’s
understanding of the system.
20. [H2-7 Flexibility and efficiency of use] [Severity 2] [Found by: E, B]
The tutor request screen lacks an option to load from a previously-filled information. A tutee
is likely to request multiple sessions with a tutor for the same class. Add a button to select
the class from historical data will save time for repeated users.
21. [H2-5 Error prevention] [Severity 4] [Found by: E, B]
The “confirmed” screen, after accepting an offer from the tutor, lacks an option to contact
the tutor. The need to contact the tutor may arise in many cases, e.g. a tutee is kicked out of
a room in Old Union and now need to change her location
22. [H2-8 Aesthetic and minimalist design] [Severity 2] [Found by: E]
The “review tutor” screen, after the tutoring session is completed, shows several “skills” of
the tutor to endorse. These skills are irrelevant (C++, Java and Ruby on Rails have nothing to
do with Biology) and not meaningful. There is also no way to endorse a new skill. Consider
modifying the endorsement system to perhaps the rating system of each skill instead.
23. [H2-8 Aesthetic and minimalist design] [Severity 3] [Found by: E]
In the home screen, it is unclear why the “Payment” button is on it. Most people would set up
the payment (Venmo link) only once. As a result, making the payment button as large and
visible as the other two key functions (Tutor and Be tutored) doesn’t make much sense. The
payment setting should be hidden in a setting function somewhere.
24. [H2-4 Consistency and standards] [Severity 1] [Found by: E]
In the tutor profile screen, the word “market rates” is used instead of rates. This is different
from the tutor list screen and may confuse some users whether there is any difference
between “market rates” and “rates.” The word “market rates” implies that the price is set by
the market and is continuously changing. These terms should be clarified if there are any
differences. If there is none, the term should be the same.
25. [H2-5 Error prevention] [Severity 4] [Found by: E]
The “tutoring in progress” screen doesn’t have a pause button. This could cause several
problems because the charge is directly calculated based on the length of the tutoring. If the
start button is tapped by mistake or if the tutor needs to take a break (emergency phone call, etc.), the charge could be inaccurate, causing disagreements between the users. A pause button should be added to prevent the aforementioned issues.
26. [H2-7 Flexibility and efficiency of use] [Severity 3] [Found by: B]
The interface allows the user to choose whether he would like to be tutored “now” or between a range of time. Even though there is a clear goal of immediacy in this task, many college students have tight schedules and deadlines, which means they are able to predict when the will need and have time for tutoring. Consequently, users might want to be able to schedule tutoring ahead, to make sure they will be helped. However, the interface only supports same-day tutoring. Add a “set date and time” to the second screen.
27. [H2-1 Visibility of system status] [Severity 2] [Found by: B]
The interface requires me to go through several pages in order to get a tutor: defining subject and location, then choosing my tutor (which possibly involves checking their profile), and then a confirmation page. It would be helpful to now where in the tutoring request process I am at any given point. Add a progress bar on top, which highlights which section I am and what other section are to come.
28. [H2-4 Consistency and standards] [Severity 2] [Found by: B]
On the confirmation page, I need to click the “Start” button to signalize the start of my tutoring session. However, this action could be confusing to the user, since he might interpret as a way to confirm that he wants the tutor to come. Moreover, this should likely not be a responsibility of the tutee, given that the session only starts when the tutor arrives there, and given that the tutee already had to go through several pages in order to get a tutor. Instead, much like Uber, have a page saying “Tutor on his/her way”, and have the tutor be the one responsible for signaling the start of a session. When he does so, the tutees phone would go to the “Tutoring in progress” page.
29. [H2-4 Consistency and standards] [Severity 4] [Found by: B]
On both the tutoring details and tutoring confirmation page, we see a map, related to the location where the tutoring session would happen. However, there are no pins on the map specifying the exact location, which makes the map not very useful. Add pin to location specified.
30. [H2-7 Flexibility and efficiency of use] [Severity 3] [Found by: B, A]
On the tutor profile page, the user needs to scroll down through all the information in order to reach the “back” and “accept offer” buttons, which might be inefficient, particularly when a tutor has multiple reviews. Have the same buttons on the top of the screen, or have a sticky tab on the bottom with those buttons.
31. [H2-2 Match between system and the real world] [Severity 1] [Found by: B]
On the tutor profile page, the user sees previous reviews of the tutor. However, people tend to care not only what people had to say, but also who said it. Moreover, particularly in a college campus, one can expect the a lot of the people using the app and reviewing tutors would know each other. Thus, TutorNOW is not taking advantage of the possible more intimate experience it could provide. To do so, add the name of the author of each review, or make it possible for people to choose whether the review tutors anonymously or not.
32. [H2-2 Match between system and the real world] [Severity 2] [Found by: B]
On the tutor profile page, the user sees myriad information on the tutor. However, since the tutor is a student, one of the most important information the user might be interesting in seeing is academic performance. What’s this tutor’s GPA? What grade did she get on the class I need help with? This would also be very useful for new tutors to get traction even though they don’t have any reviews yet. Add GPA and grade on each class to tutors’ profile pages.
33. [H2-2 Match between system and the real world] [Severity 2] [Found by: B] I can only browse tutors’ profiles when I’m about to ask for help. However, it would be nice to be able to explore different tutors without wanting help, since I could explore the different classes they are taking, different rates, etc. Add “explore tutors” button.
34. [H2-4 Consistency and standards] [Severity 1] [Found by: A] The color scheme of the application seems to be a few grays and some red. Throughout the application, bright purple and bright orange are also present, not matching or consistent with the rest of the color scheme. Replace purple and orange with colors that fit better into the color scheme.
35. [H2-7 Flexibility and efficiency of use] [Severity 2] [Found by: A] In the page where the tutee enters information in order to request a tutor, the first two lines, i.e. subject and time are much more important than the description. However, the description box is much bigger and salient. Make the first two lines larger and description box smaller so that the tutee can see which information is more crucial.
36. [H2-5 Error prevention] [Severity 3] [Found by: A] The user could easily pinpoint the wrong location by tapping the map because exact locations are difficult to find in such a small map. Add a search option for locations so the user can easily find them without looking through the map.
37. [H2-7 Flexibility and efficiency of use] [Severity 3] [Found by: A] As far as I understand this app, it seems to only show tutors that match the time and location exactly. Chances are, tutors will not always be available in that manner. If there aren’t enough tutors that meet the exact criteria, also include similar times and nearby locations.
38. [H2-2 Match between system and the real world] [Severity 2][Found by: A] “Accept offer” does not seem to be an intuitive phrase when it comes to tutoring. It reminds me of job offers and it seems too formal to use in this kind of casual, peer-to-peer application. Find a more casual and intuitive way to phrase this action.
39. [H2-8 Aesthetic and minimalist design] [Severity 1] [Found by: A] In the end, once the person has been tutored, he or she is shown the option to review the tutor. The text in this button is quite lengthy and could be shortened from “Review your experience with Josephine” to “Review Josephine” or “Review Tutor.” This would make the button more aesthetically pleasing and the button clearer.
40. [H2-8 Aesthetic and minimalist design] [Severity 2] [Found by: A] The page that has the tutor review form includes the tutor’s school, major, and other information that is not necessary for a tutee that has already received tutoring services. Remove this information for a better usage of space.
41. [H2-8 Aesthetic and minimalist design] [Severity 3] [Found by: A]
The chunk of text on the payment screen is way too long for anyone to read, especially because the buttons are already quite self-explanatory. Put this information in a separate screen that’s only seen if someone clicks on a “Help” type of button.
3. Summary of Violations
<table>
<thead>
<tr>
<th>Category</th>
<th># Viol (sev 0)</th>
<th># Viol (sev 1)</th>
<th># Viol (sev 2)</th>
<th># Viol (sev 3)</th>
<th># Viol (sev 4)</th>
<th># Viol (total)</th>
</tr>
</thead>
<tbody>
<tr>
<td>[H2-1: Visibility of status]</td>
<td>0</td>
<td>1</td>
<td>4</td>
<td>1</td>
<td>0</td>
<td>6</td>
</tr>
<tr>
<td>[H2-2: Match Sys & World]</td>
<td>0</td>
<td>2</td>
<td>3</td>
<td>0</td>
<td>0</td>
<td>5</td>
</tr>
<tr>
<td>[H2-3: User Control]</td>
<td>0</td>
<td>1</td>
<td>1</td>
<td>2</td>
<td>1</td>
<td>5</td>
</tr>
<tr>
<td>[H2-4: Consistency]</td>
<td>0</td>
<td>3</td>
<td>2</td>
<td>0</td>
<td>1</td>
<td>6</td>
</tr>
<tr>
<td>[H2-5: Error Prevention]</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>2</td>
<td>2</td>
<td>4</td>
</tr>
<tr>
<td>[H2-6: Recognition not recall]</td>
<td>0</td>
<td>0</td>
<td>1</td>
<td>0</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<td>[H2-7: Efficiency of Use]</td>
<td>0</td>
<td>0</td>
<td>2</td>
<td>3</td>
<td>0</td>
<td>5</td>
</tr>
<tr>
<td>[H2-8: Minimalist Design]</td>
<td>0</td>
<td>2</td>
<td>4</td>
<td>3</td>
<td>0</td>
<td>9</td>
</tr>
<tr>
<td>[H2-9: Help Users with Errors]</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<td>[H2-10: Documentation]</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
<tr>
<td>Total Violations by Severity</td>
<td>0</td>
<td>9</td>
<td>17</td>
<td>11</td>
<td>4</td>
<td>41</td>
</tr>
</tbody>
</table>
4 Evaluation Statistics
<table>
<thead>
<tr>
<th>Severity</th>
<th>A</th>
<th>B</th>
<th>C</th>
<th>D</th>
<th>E</th>
<th># Violations</th>
</tr>
</thead>
<tbody>
<tr>
<td>Level 0</td>
<td>NA</td>
<td>NA</td>
<td>NA</td>
<td>NA</td>
<td>NA</td>
<td>0</td>
</tr>
<tr>
<td>Level 1</td>
<td>22%</td>
<td>22%</td>
<td>33%</td>
<td>33%</td>
<td>11%</td>
<td>9</td>
</tr>
<tr>
<td>Level 2</td>
<td>24%</td>
<td>41%</td>
<td>29%</td>
<td>29%</td>
<td>29%</td>
<td>17</td>
</tr>
<tr>
<td>Level 3</td>
<td>45%</td>
<td>55%</td>
<td>36%</td>
<td>36%</td>
<td>36%</td>
<td>11</td>
</tr>
<tr>
<td>Level 4</td>
<td>0%</td>
<td>75%</td>
<td>25%</td>
<td>25%</td>
<td>75%</td>
<td>4</td>
</tr>
<tr>
<td>Total (Levels 3 & 4)</td>
<td>33%</td>
<td>60%</td>
<td>33%</td>
<td>33%</td>
<td>47%</td>
<td>15</td>
</tr>
<tr>
<td>Total (All Levels)</td>
<td>27%</td>
<td>44%</td>
<td>32%</td>
<td>32%</td>
<td>32%</td>
<td>41</td>
</tr>
<tr>
<td>Evaluator</td>
<td># Problems Found</td>
<td># Problems Remaining & Problem IDs</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>-----------</td>
<td>-----------------</td>
<td>-----------------------------------</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>A</td>
<td>11</td>
<td>3 shared with #B, #C, #D, #E → 8 (34, 35, 36, ... 41)</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>B</td>
<td>18</td>
<td>18 (most found)</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>C</td>
<td>13</td>
<td>6 shared with #B → 7 (6, 12, 13, 16, 17, 18, 19)</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>D</td>
<td>13</td>
<td>11 shared with #B & #C → 2 (2, 5)</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>E</td>
<td>13</td>
<td>7 shared with #B, #C, #D → 6 (14, 15, 22, 23, 24, 25)</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>

Severity Ratings
0 - don’t agree that this is a usability problem
1 - cosmetic problem
2 - minor usability problem
3 - major usability problem; important to fix
4 - usability catastrophe; imperative to fix
Heuristics
[H2-1: Visibility of System Status]
- keep users informed about what is going on
[H2-2: Match Between System & Real World]
- speak the users’ language
- follow real world conventions
[H2-3: User Control & Freedom]
- “exits” for mistaken choices, undo, redo
- don’t force down fixed paths
[H2-4: Consistency & Standards]
[H2-5: Error Prevention]
[H2-6: Recognition Rather Than Recall]
- make objects, actions, options, & directions visible or easily retrievable
[H2-7: Flexibility & Efficiency of Use]
- accelerators for experts (e.g., gestures, kb shortcuts)
- allow users to tailor frequent actions (e.g., macros)
[H2-8: Aesthetic & Minimalist Design]
- no irrelevant information in dialogues
[H2-9: Help Users Recognize, Diagnose, & Recover from Errors]
- error messages in plain language
- precisely indicate the problem
- constructively suggest a solution
[H2-10: Help & Documentation]
- easy to search
- focused on the user’s task
- list concrete steps to carry out
- not too large
|
{"Source-Url": "http://hci.stanford.edu/courses/cs147/2014/au/projects/behavior/tutornow/assets/projectfiles/Assignment13HeuristicEvaluation.pdf", "len_cl100k_base": 6669, "olmocr-version": "0.1.50", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 25100, "total-output-tokens": 6607, "length": "2e12", "weborganizer": {"__label__adult": 0.0006494522094726562, "__label__art_design": 0.0025310516357421875, "__label__crime_law": 0.00048828125, "__label__education_jobs": 0.06402587890625, "__label__entertainment": 0.0004367828369140625, "__label__fashion_beauty": 0.00039076805114746094, "__label__finance_business": 0.0011758804321289062, "__label__food_dining": 0.0006079673767089844, "__label__games": 0.0030765533447265625, "__label__hardware": 0.0010423660278320312, "__label__health": 0.0004529953002929687, "__label__history": 0.0004901885986328125, "__label__home_hobbies": 0.00031876564025878906, "__label__industrial": 0.0002727508544921875, "__label__literature": 0.0010747909545898438, "__label__politics": 0.0003662109375, "__label__religion": 0.0007448196411132812, "__label__science_tech": 0.003265380859375, "__label__social_life": 0.0006060600280761719, "__label__software": 0.13525390625, "__label__software_dev": 0.78125, "__label__sports_fitness": 0.0003294944763183594, "__label__transportation": 0.0004682540893554687, "__label__travel": 0.00044465065002441406}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 24912, 0.03477]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 24912, 0.0422]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 24912, 0.93714]], "google_gemma-3-12b-it_contains_pii": [[0, 2843, false], [2843, 6529, null], [6529, 10189, null], [10189, 13719, null], [13719, 17211, null], [17211, 20529, null], [20529, 23134, null], [23134, 23697, null], [23697, 24912, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2843, true], [2843, 6529, null], [6529, 10189, null], [10189, 13719, null], [13719, 17211, null], [17211, 20529, null], [20529, 23134, null], [23134, 23697, null], [23697, 24912, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 24912, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 24912, null]], "pdf_page_numbers": [[0, 2843, 1], [2843, 6529, 2], [6529, 10189, 3], [10189, 13719, 4], [13719, 17211, 5], [17211, 20529, 6], [20529, 23134, 7], [23134, 23697, 8], [23697, 24912, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 24912, 0.1686]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
49689535851459e520b06c41b3458d1fca36f5a7
|
A Performance Analysis of Sequential Pattern Mining Algorithms
Karishma B Hathi, Jalpa A Varsur, Sonali P Desai, Sagar R Manvar
PG Student: CSE Department, B.H.Gardi College of Engg&Tech, Rajkot, Gujarat India
Abstract: Sequential pattern mining is a very important mining technique with wide applications. It found very useful in various domains like natural disaster, sales record analysis, marketing strategy, shopping sequences, medical treatment and DNA sequences etc. It discovers the subsequence’s and frequent relevant pattern from the given sequences in the database. The first was Apriori algorithm, which was put forward by the founders themselves. Later more scalable algorithms for difficult applications were developed. E.g. GSP, Spade, PrefixSpan etc. In this paper, a survey of the sequential pattern mining algorithms is given.
Keywords: Sequential Pattern, Sequence Database, Itemsets, Apriori, Pattern Growth.
INTRODUCTION
The sequential pattern mining is a very important concept of data mining, a further extension to the concept of association rule mining [1]. That has a huge range of real-life application. This mining algorithm solves the problem of discovering the presence of frequent sequences in the given database [2]. Sequential Pattern Mining finds interesting sequential patterns among the huge database. It discovers frequent subsequences as patterns from a given sequence database. With large amounts of data continuously being collected and stored, various industries are becoming interested in mining sequential patterns from their database. Sequential pattern mining is one of the most well-known methods and has wide applications including web-log analysis, customer purchase behavior analysis and medical record analysis [1]. In the retailing business, sequential patterns can be mined from the transaction records of customers. For example, having bought a camera, a customer comes back to buy a cover and a SD card next time. The retailer can use such information for discovering the behavior of the customers, to understand their interests, to satisfy their demands, and above all, to predict their needs. In the medical field, sequential patterns of symptoms and diseases exhibited by patients discover strong symptom/disease complementary relations that can be a valuable source of information for medical diagnosis and preventive medicine. In the Web log analysis, the exploring behavior of a user can be obtained from member records or log files. For example, having viewed a web page on —Cards —Birthday cards. These sequential patterns yield huge benefits, when acted upon, increases customer royalty.
A. Basic Concepts of Sequential Pattern Mining[9]
1) Let I = {x1, . . . , xk} be a set of items, each perhaps being associated with a set of attributes, such as value, price, profit, calling distance, period, etc. The value on an attribute A of item x is denoted by x.A. An itemset is a non-empty subset of items, and an itemset with k items is called k itemset.
2) A sequence=sX1 · · · Xn> is an ordered list of itemsets. An itemset Xi(1 ≤ i ≤ l) in a sequence is called as a transaction, a term emerged from shopping sequences in a transaction database. A transaction Xi may have an exceptional attribute, timestamp, denoted by Xi time, which registers the time when the transaction was executed. For a sequence α= <X1 · · · Xn>, we assume that Xi.time < Xj.time for 1 ≤ i < j ≤ l.
3) The number of transaction in a sequence is called as the length of sequence. A sequence with length l is called as an l-sequence. For an l-sequence α, we have len (α)=l. Moreover, the i-th itemset is denoted by α[i]. An item can arise at most once in an itemset, but can arise multiple times in various itemsets in a sequence.
4) A sequence α= <X1 · · · Xn> is called as a subsequence of another sequence β= <Y1 · · · Ym> (n ≤ m), and β is a super-sequence of α, if there exist integers 1 ≤ i1 < · · · < im ≤ n such that X1 ⊆ Yi1, ..., Xn ⊆ Yim.
5) A sequence database SDB is a set of 2-tuples (sid, α) where sid is a sequence-id and α is a sequence. A tuple (sid, α) in a sequence database SDB is said to hold sequence γ if γ is a subsequence of α. The number of tuples in a sequence database SDB holding sequence γ is called the support of γ, denoted by sup (γ). Given a positive integer min_sup as the support threshold, a sequence γ is a sequential pattern in sequence database SDB if sup≥min(sup). The sequential pattern mining problem is to discover the complete set of sequential patterns with respect to a given sequence database SDB and a support threshold min_sup.
CLASSIFICATION OF SEQUENTIAL PATTERN MINING ALGORITHM
In recent years many approaches in sequential pattern mining have been proposed, these studies cover broad portion of issues [11]. In general there are two valuable concerns in sequential pattern mining.
1. The first one and very important one is to increase the performance or efficiency and accuracy in sequential pattern mining process.
2. Expand the mining of sequential pattern to the time related constraint.
Sequential pattern mining is broadly classified into Two groups:
a) Apriori Based.
b) Pattern Growth Based.
![Diagram of Sequential Pattern Mining Algorithms]
**Fig.1 Classification of Sequential Pattern Mining Algorithm**
### a) Apriori Based Algorithms
The Apriori and AprioriAll algorithms set the basic for a set of algorithms that relies largely on the apriori property and use the apriori property to generate joint procedure to generate the candidate sequences. As per the apriori statement property all the nonempty subset from the frequent item set must also be the frequent. That is also be described as (downward-closed) in that if a sequence cannot satisfy the minimum support test, then its entire subsequence will also fail the test/condition.
Important terms of the apriori-based algorithms are[3]
1) **Breadth-first search technique used:** Basically the apriori based algorithms are work on this technique. Apriori-based algorithms are described as breath-first (level-wise) search algorithms because they construct all the k-sequences, in kth iteration of the algorithm, as they traverse the search space.[15]
2) **Generate-and-Test:** This kind of feature is used by the very early algorithms from initial research done in sequential pattern mining algorithms which depend on this technique only shows the inefficient pruning method and create large number of candidate sequences and then test each one sequentially for satisfying some user specified constraints consuming huge memory in the early stage of mining.
3) **Multiple scan of the database:** This feature is very unwanted because it requires the lots of processing time and IO cost.
i) **GSP (Generalized Sequential Pattern)** algorithms is represented by Agrawal and Shrikant [4] makes the multiple passes on the data. This algorithm is much faster than the AprioriAll algorithm. In the GSP algorithm the two steps are involved, first one is candidate generation and candidate pruning method. The algorithm is not a main memory algorithm generates only as many candidates as will suitable in memory and the support of the candidate is find out by scanning the dataset. Frequent Sequences from these candidates are written to disk and the candidates which are without minimum support are deleted [10]. The same step is iterated until every candidate has been counted. The GSP algorithm searches all the length-1 candidates (using one database scan) and orders them by their support value ignoring whose support<min_support. Then for each level (i.e. sequences of length k) the algorithm scans the dataset to gather the support count of the each candidates sequence and generates candidates of length (k+1) sequence from length-K frequent sequences using apriori. This step is continued until there is no frequent sequence or no candidates can be found.
This algorithm has a very effective scale up properties with respect to the number of transaction per data sequence and number of items per transaction. But this algorithm is less than efficient where the mining in large sequencing of databases having countless pattern or long patterns as it cannot generates any more candidates sequence and also several scans of database is needed because the length of each candidates grows by one at each database scan.
ii) **SPIRIT** - The basic concept behind this algorithm is to use the regular expression at flexible tool for the constraint specifications [12]. It gives the generic user specified regular expression constraint on the mined pattern, for giving the more powerful restriction. There are various versions in the algorithm. The selection of the regular expression as a constraint specification tool is considered on the basic of two valuable factors. The first one regular expression is the simple form and natural syntax for specification of families of sequential pattern and second it has the more power for specifying wide range of interesting pattern constraints.
iii) **SPADE** - As like horizontal formulating methods (GSP) the sequential dataset can be converted into a vertical dataset format consisting of item id-lists [5]. The vertical dataset list is the list of (sequential-id, timestamps) pair showing the occurring timestamps of the item in that sequence. The finding in the format of dataset is done by the id-list interaction, this SPADE algorithm complete the mining in total three passes of database scanning. In addition to this the computation time requires to convert in the horizontal dataset to vertical dataset and also require additional storage space several times larger than that of the original sequence database.
iv) **SPAM** - SPAM combines the ideas of GSP, SPADE, and FreeSpan [6]. This algorithm uses the vertical bitmap data structure...
representation of database which is similar to the given id-list of SPADE algorithm. The entire algorithm with its data structure fits in the main memory. For the performance grow the SPAM use the depth-first traversal fashion. SPAM is similar to SPADE, but it uses the bitwise operations on behalf of the regular and temporal join when the comparison of SPAM and SPADE is consider the SPAM outplay more than SPADE, while the SPADE algorithm is more SPACE-efficient than SPAM.
v) CloSpan- CloSpan (Closed Sequential Pattern Mining) algorithm mines the frequent closed sub sequences only [6]. That is, those containing no super-sequences with the equal support when mining long frequent sequence. The performance of algorithms decreases dramatically. This algorithm creates the lower number of sequences than the other algorithms.
vi) CMDS - (Closed Multidimensional Pattern Mining) is an combines method of closed- item set pattern mining and closed sequential pattern mining [13]. It consist of two steps-
- Fusion of closed sequential pattern mining with closed item set pattern mining.
- Deletion of redundant pattern.
The number of pattern in CMDS is lower than the number of pattern in multidimensional pattern mining. The set of CMDS pattern can cover the set of MDS pattern.
b) Pattern-growth Sequential Pattern Mining Algorithms
The Pattern Growth algorithm comes in the early 2000s, for the answer to the problem of generating and test. The main idea is for to avoid the candidate generation step altogether, and to concentrate the search on a specified portion of the initial database. In this kind of the algorithm the technique of search space partitioning is plays main role in pattern-growth. In this kind of algorithm begins by building a representation of the database to be mined, and after that explains the way to partition the search space and generates the candidates' sequences by growing on the initially mined frequent sequences. The initial algorithm begins by using projected databases, which is free-span, prefix span with latter one being most influential.
i) PrefixSpan- The PrefixSpan (Prefix Projected Sequential pattern Mining) algorithms represented by Jian Pei, Jiavei Han and Helen Pinto[7] is the only projection based algorithms from all the sequencing pattern mining algorithms. It performs higher than the algorithm like apriori, SPADE (vertical data format). This algorithm searches the frequent items by scanning the sequence database once. The database is projected into many smaller databases according to the frequent items. By recursively increasing subsequence fragment in every projected database, we get the complete set of sequential pattern. The chief concept behind the prefix span algorithm to successfully discovered patterns is employing the divide-and-conquer strategy. The prefix span algorithm requires high memory space as differentiate to the other algorithms in the sense that it requires creation and processing of large number of projected sub-databases.
ii) FREESPAN- The freespan algorithm reduces the cost require to candidate generation and testing of apriori, with satisfying its basic feature [14]. In short, the freespan algorithm uses the frequent items to repetitively project the sequence database into projected database while growing subsequence’s frequently in each projected dataset. Every projection divides the database and confines further testing to progressively small-scale and more manageable units. The valuable issue is to considerable amount of sequences can appear in more than single projected database and the size of database decreases with each iteration.
iii) WAP-MINE- WAP-MINE is pattern-growth based algorithm with tree-structure mining technique on its WAP-tree data structure[8]. In this algorithm the sequence database is scanned twice to construct the WAP-tree from the frequent sequences by their support values. Here header table is managed first to point that where is first occurrence of the each item in a frequent item set which can be useful to mine the tree for frequent sequences built up on their suffix. It found in the analysis that the WAP-MINE algorithm have more scalability than GSP and perform sharply by marginal points. Although this algorithm scans the database twice only and avoids the problem of generating big amount of candidate as in case of apriori-based approach, the WAP-MINE faces the problem of memory consumption, as it iteratively regenerate n increase automatically.
COMPARATIVE STUDY OF SEQUENTIAL PATTERN MINING ALGORITHM
Comparative analysis of sequential pattern mining algorithm is done on the basis of their different important features. For comparison sequential pattern mining is subdivided into two broad categories, namely, Apriori Based and Pattern Growth Based Algorithms. All the nine features used to categorize these algorithms are discussed first and then comparison is done for the following algorithms
GSP: Generalized Sequential Patterns
SPADE: Sequential Pattern Discovery using Equivalence Classes
SPAM: Sequential Pattern Mining
Prefixspan: Prefix Projected Sequential pattern Mining
WAP-MINE: Web Access Pattern Mining
Characteristics of Sequential Pattern Mining Algorithm are:
Apriori-Based vs. Pattern-Growth-Based
Apriori-based algorithms usually use a candidate generate-and-test type of approach, which utilize the downward closure property:
if an itemset $\alpha$ is not frequent, then any superset of $\alpha$ must not be frequent either. Pattern-growth algorithms use a more incremental approach in producing possible frequent sequences, and use what might be called a divide-and-conquer approach. Pattern-growth algorithms make projections of the database in an attempt to decrease the search space.
**BFS-Based Approach Vs. DFS-Based Approach** In a BFS approach level-by-level search can be conducted to search the complete set of patterns i.e. All the children of a node are processed before proceeding to the next level. On the other hand, when using a depth-first search approach, all sub-arrangements on a path must be traversed before proceeding to the next one. The advantage of DFS over BFS is that DFS can very rapidly reach large frequent arrangements and therefore, some expansions in the other paths in the tree can be neglected.
**Top-Down Search Vs. Bottom-Up Search** Apriori-based algorithms utilize a bottom-up search, enumerating every single frequent sequence. This implies that in order to produce a frequent sequence of length 1, all 2i subsequences have to be produced. It can be easily concluded that this exponential complexity is limiting all the Apriori-based algorithms to find only short patterns, since they only implement subset infrequency pruning by eliminating any candidate sequence for which there exists a subsequence that does not belong to the set of frequent sequences. In a top-down approach the subsets of sequential patterns can be mined by building the corresponding set of projected databases and mining each recursively from top to bottom.
**Anti-Monotone Vs. Prefix-Monotone Property** In Anti-Monotone property states that every non-empty sub-sequence of a sequential pattern is a sequential pattern, while Prefix-Monotone property states that if for each $\alpha$ sequence fulfilling the constraint, so does every sequence having $\alpha$ as a prefix also fulfills the constraint.
### Table 1: Comparative study of sequential pattern mining algorithms
<table>
<thead>
<tr>
<th>Characteristics</th>
<th>GSP</th>
<th>SPADE</th>
<th>SPAM</th>
<th>PREFIX SPAN</th>
<th>WAP MINE</th>
</tr>
</thead>
<tbody>
<tr>
<td>Apriori Based</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
<td>No</td>
<td>No</td>
</tr>
<tr>
<td>Prefix-growth Based</td>
<td>NO</td>
<td>NO</td>
<td>NO</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td>BFS-based Approach</td>
<td>Yes</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>No</td>
</tr>
<tr>
<td>DFS-based Approach</td>
<td>No</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td>Top down search</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>Yes</td>
<td>Yes</td>
</tr>
<tr>
<td>Bottom up Search</td>
<td>Yes</td>
<td>Yes</td>
<td>Yes</td>
<td>No</td>
<td>No</td>
</tr>
<tr>
<td>Anti-Monotone Property</td>
<td>Yes</td>
<td>Yes</td>
<td>No</td>
<td>No</td>
<td>No</td>
</tr>
<tr>
<td>Prefix-Monotone Property</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>Yes</td>
<td>No</td>
</tr>
<tr>
<td>Regular-Expression Constraints</td>
<td>No</td>
<td>No</td>
<td>No</td>
<td>Yes</td>
<td>Yes</td>
</tr>
</tbody>
</table>
**Regular Expression Constraint** Complexity of regular expression constraints can be approximately measured by the numbers of state changes in their corresponding deterministic finite automata. A regular expression constraint has a good property called growth-based anti-monotonic. A constraint is growth-based anti-monotonic if it has the succeeding property: If a sequence fulfills the constraint must be reachable by growing from any component which matches part of the regular expression. From the comparative study of table 1, it is easy to understand that PrefixSpan algorithm uses depth first search based approach, top down search which are efficient techniques to search frequent subsequences as sequential patterns form the large database. Also PrefixSpan utilize regular expression constraints as well as prefix monotone property, which makes this algorithm a clear choice for applying user defined constraints for mining only some concerned sequential patterns.
**CONCLUSION**
In this paper, we considered what is a sequential pattern mining and different types of their algorithms. This concept is being introduced in 1995[2] has gone through astonishing advancement in few years only. Initial work on this topic is focused on improvement of the performance of algorithms by using various data structure or various representations. So, on the basis of these problems the sequential pattern mining is divided into two main groups, Apriori approach based algorithms and pattern growth approach based algorithms. From our comparative study and previous some studies by different researchers on sequential pattern mining algorithms it is found that the algorithm which are based on the approach of pattern growth are superior in terms of scalability, time-complexity and space-complexity.
**REFERENCES**
[4] Jian Pei, Jiawei Han and Wei Wang, "Constraint-based sequential pattern mining: the pattern-growth methods", Journal of
Author Profiles:
1)Hathi Karishma is a student of Masters of Engineering in Computer Science and Engineering at B.H.Gardi College of Engineering and Technology, Rajkot, Gujarat, India. She is bachelors in Computer Science and Engineering. Her area of interest are Data Mining, Computer networking and Security. Contact:+91 9429810304
2)Desai Sonali is a student of Masters of Engineering in Computer Science and Engineering at B.H.Gardi College of Engineering and Technology, Rajkot, Gujarat, India. She is bachelors in Computer Science and Engineering. Her area of interest are Data Mining, Computer networking and Security.Contact:+91 9408966536
3)Varsur Jalpa is a student of Masters of Engineering in Computer Science and Engineering at B.H.Gardi College of Engineering and Technology, Rajkot, Gujarat, India. She is bachelors in Computer Science and Engineering. Her area of interest are Data Mining, Software Engineering and Information Security. Contact: +91 9925460983
4)Manvar Sagar is a student of Masters of Engineering in Computer Engineering at B. H. Gardi College of Engineering and Technology, Rajkot, Gujarat, India. He is bachelors in Information Technology. His area of interest are Data Mining,Computer networking and Security. Contact:+91 9586507454
|
{"Source-Url": "http://www.jetir.org/papers/JETIR1502040.pdf", "len_cl100k_base": 4566, "olmocr-version": "0.1.50", "pdf-total-pages": 5, "total-fallback-pages": 0, "total-input-tokens": 18566, "total-output-tokens": 5497, "length": "2e12", "weborganizer": {"__label__adult": 0.0004024505615234375, "__label__art_design": 0.0003554821014404297, "__label__crime_law": 0.0008859634399414062, "__label__education_jobs": 0.0018339157104492188, "__label__entertainment": 9.638071060180664e-05, "__label__fashion_beauty": 0.0002378225326538086, "__label__finance_business": 0.0005135536193847656, "__label__food_dining": 0.0004177093505859375, "__label__games": 0.0011758804321289062, "__label__hardware": 0.001895904541015625, "__label__health": 0.0013093948364257812, "__label__history": 0.0003523826599121094, "__label__home_hobbies": 0.00022017955780029297, "__label__industrial": 0.0009851455688476562, "__label__literature": 0.00037479400634765625, "__label__politics": 0.0003104209899902344, "__label__religion": 0.0006160736083984375, "__label__science_tech": 0.321044921875, "__label__social_life": 0.0001633167266845703, "__label__software": 0.0169525146484375, "__label__software_dev": 0.6484375, "__label__sports_fitness": 0.00042057037353515625, "__label__transportation": 0.0005116462707519531, "__label__travel": 0.00019371509552001953}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23874, 0.01538]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23874, 0.54104]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23874, 0.90732]], "google_gemma-3-12b-it_contains_pii": [[0, 5097, false], [5097, 9875, null], [9875, 15292, null], [15292, 20514, null], [20514, 23874, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5097, true], [5097, 9875, null], [9875, 15292, null], [15292, 20514, null], [20514, 23874, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23874, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23874, null]], "pdf_page_numbers": [[0, 5097, 1], [5097, 9875, 2], [9875, 15292, 3], [15292, 20514, 4], [20514, 23874, 5]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23874, 0.11579]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
502f24a3c88eb4952c09bc8cdbc02910c9ea0929
|
The following full text is an author’s version which may differ from the publisher’s version.
For additional information about this publication click this link.
http://hdl.handle.net/2066/36680
Please be advised that this information was generated on 2019-02-27 and may be subject to change.
Verifying an implementation of SSH
Erik Poll1* and Aleksy Schubert1,2**
1 Radboud University Nijmegen, the Netherlands
2 Warsaw University, Poland
Abstract. We present a case study in the formal verification of an open source Java implementation of SSH. We discuss the security flaws we found and fixed by means of formal specification and verification – using the specification language JML and the program verification tool ESC/Java2 – and by more basic manual code inspection. Of more general interest is the methodology we propose to formalise security protocols such as SSH using finite state machines. This provides a precise but accessible formal specification, that is not only useful for formal verification, but also for development, testing, and for clarification of official specification in natural language.
1 Introduction
The past decade has seen great progress in the field of formal analysis of security protocols. However, there has been little work or progress on verifying actual implementations of security protocols. Still, this is an important issue, because bugs can make an implementation of a secure protocol completely insecure. A fundamental challenge here is posed by the big gaps between (i) the official specification of a security protocol, typically in natural language; (ii) any models of (parts of) the protocol developed for formal verification of security properties, e.g. using model checking; and (iii) actual implementations of the protocol. In an effort to bridge these gaps, we performed a case study in the formal specification and verification of a Java implementation of SSH. We considered an existing implementation, MIDP-SSH3, which is an actively maintained open source implementation for use on Java-enabled mobile phones. MIDP-SSH is a typical implementation in the sense that it is not written from scratch but based on an earlier one, re-using code from a variety of sources.
In order to express the properties to be verified for the source code, we used the Java Modeling Language (JML) [9]. JML is a specification language designed to describe properties of Java programs. It supports all the important features of the Java language e.g. inheritance, subtyping, exceptions etc. JML is supported by a range of tools for dynamic or static checking; for an overview see [2]. We used the extended static checker ESC/Java2 [3], the successor of ESC/Java [4]. This
* Supported by the Sixth Framework Programme of the EU under the MOBIUS project FP6-015905.
** Supported by the Sixth Framework Programme of the EU under the SOJOURN project MEIF-CT-2005-024306.
The tool tries to verify automatically JML-annotated source code, using a weakest precondition calculus and an automated theorem prover.
The structure of the paper Section 2 describes the informal code inspection carried out as first stage of our analysis, and Section 3 describes the more formal approach taken after that. Section 1.1 below presents an overview of the approach taken in these two stages. We draw our conclusions and discuss possible future work in Section 4.
1.1 Methodology
After considering the security requirements of the application, our analysis of the implementation proceeded in several steps.
The first stage, described in Section 2, was an ad-hoc manual inspection of the source code. We familiarised ourselves with the design of the application, considered which parts of the code are security-sensitive, and looked for possible weaknesses. This led to discovery of some common mistakes — or at least bad practices which should be avoided in security-sensitive applications.
The next stage, described in Section 3, involved the use of the formal specification language JML and the program verification tool ESC/Java2. Here we can distinguish two steps:
– The first step, discussed in Section 3.1, was the standard one when using ESC/Java2: we used the tool to verify that the implementation does not throw any runtime exceptions. For instance, the implementation might throw an ArrayIndexOutOfBoundsException due to incorrect handling of some malformed data packet it receives. This step revealed some bugs in the implementation, where sanity checks on well-formedness of the data packets received were not properly carried out. This would only allow a DoS attack, by making the SSH client crash on such a malformed packet. Of course, for an implementation in a type-unsafe language such as C, as opposed to Java, these bugs would be much more serious, as potential sources of buffer overflow attacks.
The process of using ESC/Java2 to verify that no runtime exceptions can occur, incl. the process of adding the JML annotations this requires, forces one to thoroughly inspect and understand the code. As a side effect of this we already spotted one serious security flaw in the implementation.
– The next step, discussed in Section 3.2, was to verify that the Java code correctly implements the SSH protocol as officially specified in RFCs 4250-4254 [16, 14, 17, 15]. This required some formal specification of SSH. For this we developed our own formal specification of SSH, in the form of a finite state machine (FSM) which describes how the state of the protocol changes in response to the different messages it can receive. This is of course only a partial specification, as it specifies the order of messages but not their precise format. Still, it turned out to be interesting enough, as we hope to demonstrate in this paper.
This last step of the verification is probably the most interesting. Firstly, we found that obtaining the finite state machine from the natural language description in the RFCs was far from trivial, and it revealed some ambiguities and unclari ties. It is not always clear what the response to an unexpected, unsupported or simply malformed message should be: some of these may or should be ignored but others must lead to disconnection. Secondly, verifying that the implementation meets this partial specification as given by the FSM revealed some serious security flaws in the implementation. In particular, the implementation is vulnerable to a man-in-the-middle attack, where an attacker can request the username and password of the user before any authentication has taken place and before a session key has been established.
A secure implementation should of course never handle such a request.
2 Stage 1: Informal, ad-hoc analysis
Prior to any systematic analysis of the application as discussed in the next section, we read the security analysis of the SSH protocol provided in the RFCs [16]. Then we extended the analysis to cover the issues closely related to the Java programming language and to the Java MIDP platform. We located the part of the source code which directly implements the protocol and tried to relate the results of the security analysis to the source code, but without trying to understand the logic of the implementation. In the course of these steps, we already spotted some (potential) security problems. Here is a description of the most important ones:
Weak/no authentication The SSH client does not store public key information for subsequent sessions: it will connect to any site and simply ask that site for its public keys, without checking this against earlier runs and asking the user to accept a new or changed public key. In other words, there is no real authentication before starting an SSH session. This is especially strange as the application stores certain session related information (i.e. host name, user name, and even password) in the MIDP permanent storage – record stores.
There is a countermeasure that allows the user to authenticate the server she or he is connecting to: the SSH client displays an MD5 hash of the server’s public key as ‘fingerprint’ of the server it connects to. The user can check to see if this MD5 hash has the right value. Of course, the typical user will not check this.
Note that unauthenticated key exchange is a well-known and common security mistake; it is for instance listed in [6]. This highlights the importance that programmers are aware of such common mistakes!
Poor use of Java access restrictions The implementation does not make optimal use of the possibilities that Java offers to restrict access to data, with the visibility modifiers, such as public and private, and the modifier final to make fields immutable.
For instance, the implementation creates an instance of the standard library class `java.lang.Random` for random number generation. The reference to this object is stored in a `public` static field `rnd`. Untrusted code could simply modify this field, so that it for instance points to an instance of `java.lang.Random` with a known seed, or to an instance of some completely bogus subclass of `java.lang.Random` which does not produce random numbers at all. The field `rnd` should be `private` or `final` – or, better still, both – to rule out such tampering.
In all fairness, we should point out that for the current version of the MIDP platform the threat of some hostile application attacking the SSH client by changing its public fields is not realistic. A restriction of the MIDP platform is that at most one application – or `midlet`, as applications for the MIDP platform are called – is running at the same time, so a hostile application cannot be executing concurrently with the SSH midlet. Moreover, each time the SSH client is started it will initialise its fields from scratch. Still, such restrictions are likely to be loosened in the future, and the code of MIDP-SSH might be re-used in applications for other Java platforms where these restrictions do not apply.
A similar problem occurs with the storage of the contents of P- and S-boxes in the implementation. The class `Blowfish` in the implementation uses an array
```java
final static int[] blowfish_sbox = { 0xd1310ba6, ... };
```
This integer array is final, so cannot be modified. However, the `content` of the array is still modifiable. The field has default package visibility, which gives rather weak restrictions about who can modify is, as explained in [10], so hostile code could modify the S-boxes used by the SSH client, and at least create a DoS attack. The field should really be `private` and there is no reason why it cannot be.
Again, for the MIDP platform this is not really a threat, due to its restrictions discussed above.
Checking if access modifiers can be tightened need not be done manually, but can be automated, for instance using JAMIT\(^4\). The problems in the application suggest that systematic use of such a tool would be worthwhile.
**Control characters** One of the security threats mentioned in the security analysis is the scenario when a malicious party sends a stream of control characters which erases certain messages to lure the user into performing an insecure action. Although the SSH client does interpret some control characters, there is no operation to ensure that only safe control sequences appear on the user’s terminal.
**Downloading of the session information** The application implements functionality to download a description of an SSH session to execute. Such a description can contain the information about the user and a host name. The transfer of such information over the network in cleartext is an obvious compromise of the security as third parties can associate the login with the machine. Moreover,
\(^4\) See [http://grothoff.org/christian/xtc/jamit/](http://grothoff.org/christian/xtc/jamit/)
data downloaded in this way is not displayed to the user who demanded it. In this way it is easy to realize a spoofing attack which forwards the user to a fake SSH server which steals the password.
3 Formal, systematic analysis using JML and ESC/Java2
The analysis using more formal methods consisted of two stages. The first stage was to verify that the implementation does not throw any runtime exceptions, e.g. due to null pointers, bad type casts, or accesses outside array bounds. The second one was to (partially) specify SSH, by means of a finite state machine, and verify that the implementation correctly implements this behaviour.
3.1 Stage 2: Exception Freeness
The standard first step in using ESC/Java2 is to check that the program does not produce any runtime exceptions. Indeed, often this is the only property one checks for the code. Although it is a relatively weak property, verifying it can reveal quite a number of bugs and can expose many implicit assumptions in the code. Just establishing exception freeness requires the formalisation of many properties about the code, as JML preconditions, invariants, and sometimes postconditions. For instance, invariants that certain reference fields cannot be null are needed to rule out NullPointerExceptions, and invariants that certain integer fields are not negative or have some maximum value are needed to rule out ArrayIndexOutOfBoundsExceptions.
Like any verification with ESC/Java2, checking the absence of exceptions relies on the axiomatisation of Java semantics built into the tool and on specifications of any APIs used, e.g. for library calls such as System.arraycopy, which are given in a standard set of files with JML specifications for core API classes. Correctness of the results of ESC/Java2 relies on the correctness of this axiomatisation and these API specifications.
Trying to check that no runtime exceptions occur with ESC/Java2 revealed some bugs in the implementation, namely missing sanity checks on the well-formedness of the data packets before these packets are processed. This means that the SSH client could crash with an ArrayIndexOutOfBoundsException when receiving certain malformed packets. Such Denial-of-Service attacks are discussed in the RCFs.
The process of using ESC/Java2 to check that no runtime exceptions can occur – incl. the adding of all the JML annotations this requires – forces one to thoroughly inspect and understand the code. As a side effect of this we spotted a serious security weakness in the implementation, namely that it does not check the MAC of the incoming messages, so it is vulnerable to certain replay attacks.
The whole process of proving exception freeness, including fixing the code where required, took about two weeks.
3.2 Stage 3: Protocol specification and verification
In addition to just proving that the implementation does not throw runtime exceptions, we also wanted to verify that it is a correct implementation of the client side of the SSH protocol, as specified in the RFCs. This requires some formal specification of SSH, of course.
**Formal specification of SSH as FSM** Unfortunately we could not find any formal description of SSH in the literature; the only formal description we could find [12] only deals with a part of the whole SSH protocol. Therefore we developed our own formal specification of SSH, in the form of a finite state machine (FSM) which describes how the state of the protocol changes in response to the different messages it can receive. This is of course only a partial specification, as it only specifies the order of messages but not their precise format. Still, this partial spec was interesting enough, as we hope to demonstrate in this paper.

**Fig. 1.** A simplified view of the FSM specifying the behaviour of the SSH client, without optional features described in the RFCs that are not supported, and ignoring the aspects described in Fig. 3. The names of the transitions are the same names used in the RFCs. Labels ending with ! are outputs of the client to the server, labels ending with ? denote inputs to the client.
It turns out that the SSH protocol involves about 15 kinds of messages and its session key negotiation phase has about 20 different states. One complication in defining an FSM describing the client side behaviour of the protocol is that the SSH specifications present the protocol as a set of features which are partly obligatory and partly optional. A FSM that includes all these optional parts is given in Fig. 2. For simplicity, we focused our attention on those parts of the protocol that this particular implementation actually supports. This simplifies the overall behaviour, namely to that shown in Fig. 1. This behaviour corresponds to the left-most branch in the full specification of SSH given in Fig. 2.
Fig. 1 not only ignores options not implemented, but also includes an apparently common choice made in the implementation that is left open by the official specification. Section 4.2 of [17] states: “When the connection has been established, both sides MUST send an identification string”. This specifies that both client and server must send an identification string, but does not specify the order in which they do this. In principle, it is possible for both sides to wait for the other to send the identification string first, leading to deadlock. The MIDP-SSH implementation chooses to let the client wait for an identification string from the server (the transition VERSION? in Fig. 1) before replying with an identification string (the subsequent transition VERSION!). This appears to be the standard way of implementing this: OpenSSH makes the same choice. In fact, an earlier specification of SSH 1.5 [13, Overview of the Protocol] does prescribe this order; it is not clear to us why the newer specification [17] does not. Moreover, it is not clear if this is a deliberate underspecification or a mistake. Of course, one of the benefits of formalising specifications is that such issues come to light.
Fig. 1 does not tell the whole story, though. It only specifies the standard, correct sequence of messages, but does not specify how the client should react to unexpected, unsupported, or simply malformed messages. This is where much of the complication lies: some of these messages may or should be ignored, but others must lead to disconnection. Adding all the transitions for this to Fig. 1, (or, worse still to Fig. 2) would lead to a very complicated FSM that is hard to draw or understand, and very easy to get wrong. We therefore chose to specify these aspects in a separate FSM, given in Fig. 3.
The SSH specification states that after the protocol version is negotiated, i.e. from the state WAIT_KEXINIT onwards, the client should always be able to handle a few messages in a generic way. Some of these messages should be completely ignored; some should lead to an UNIMPLEMENTED! reply, meaning the client does not support this message; some should lead to disconnection. This aspect is specified in a separate FSM: in the state WAIT_KEXINIT and any later state, the client should implement the additional transitions given in Fig. 3.
In Fig. 3 we use a few additional ad-hoc conventions to keep the diagram readable. FOREIGN_MSGS? stands for any message that is not explicitly known by the application. As noted above, all such messages should trigger the sending of the UNIMPLEMENTED message. Similarly, OTHER_KNOWN_MSGS? stands for any message that is known, but arrived in a wrong state – these
Fig. 2. Specification of the full client side behaviour of SSH, including all optional features. This diagram must still be extended with the additional aspects as given by the diagrams in Fig. 3. [NB the diagram above have been corrected and simplified from the original WITS’07 publication.]
messages lead to disconnection. This diagram is still a simplification because in some states certain known messages should be ignored rather than lead to disconnection, but we do not have space to discuss these details here.
Another ad-hoc convention are the labels `SshException?` and `IOException?`. These transitions represent two exceptional situations that can occur. Firstly, there is the possibility of an IO error (e.g. because the network or the server goes down), which is modelled by the `IOException?` transition. Secondly, there is the possibility that the incoming packet is of a known type but fails to meet the format specified in the RFCs (e.g. the value of the length field exceeds the size of the packet, or the MAC is incorrect), which is modelled by the `SshException?` transition. As you may have guessed, the names of these transitions are inspired the Java exceptions used in the implementation.
**Discussion** The finite state machines specifying SSH are implicit in the natural language specifications given in the RFCs, but were not so easy to extract, and highlighted some unclarities. We already mentioned the issue that description of the order of certain messages from client to server and back can be interpreted in several ways.
Whereas the names of various types of messages are well-standardised, and we use these in our diagrams, there is no explicit notion of state in the SSH specifications. So the names of the states in the diagrams are our invention. This lack of an explicit notion of state is a source of unclarity in the specification. In particular, [16, Sect. 9.3.5] asserts:
> If transmission errors or message manipulation occur, the connection is closed. The connection SHOULD be re-established if this occurs.
but it is hard to figure out which messages should be regarded as message manipulation at a given stage. The RFCs specify forbidden messages in several places, e.g. in [17, Sect. 7.1], e.g.
Once a party has sent a `SSH_MSG_KEXINIT` message [...] , until it has sent a `SSH_MSG_NEWKEYS` message (Section 7.3), it MUST NOT send any messages other than: [...]
but it is not obvious that messages other than those listed should be considered as ‘manipulations’ at this stage.
It would be better if the information about which messages are allowed, can be ignored, or must lead to disconnection in a given state is available in a more structured way. Now this information is spread out over several places in the RFCs. An alternative to using FSMs might simply be a table of states and messages.
Another source of unclarity is the way the standard keywords are used in the specifications. There is an IETF standard which precisely defines the precise meaning of terms such as ‘MUST’, ‘MAY’, ‘RECOMMENDED’, and ‘OPTIONAL’ [1], but the SSH specification is not consistent in using these keywords. For example, [17, Section 4] says
Key exchange will begin immediately after sending this identifier.
which presumably means that it ‘MUST’ (and that any other behaviour ‘MUST’ be considered as manipulation and lead to disconnection?).
Finally, in [17, Section 6] we noted that it is not clear if a well-formed packet may have a zero-length payload section or if such a packet should always be treated as malformed, because it is impossible to determine its type, which is crucial for any handling of the packet. (The specification does not forbid such packets, but for instance OpenSSH treats them as an error and quits the client).
3.3 Verification of MIDP-SSH
Before we even attempted a formal verification that the MIDP-SSH correctly implements the specification as given by the FSMs, it was easy to see that the implementation was not correct: it did not correctly record the protocol state, and it accepted and processed many messages which following the FSMs should lead to disconnection. The prime example of this was that a request for username and password would be processed by the SSH client in any state.
Therefore we improved the implementation before attempting formal verification: we re-factored the code so the handling of each message was done by a separate method, we improved the recording of the protocol state, and we added case distinctions based on the protocol state to obtain the right behaviour in each state.
To verify that the software correctly implemented the finite state machine, we then used AutoJML\textsuperscript{5} [7], a tool that generates JML specifications (or Java code) from finite state machines. This tool had to be adapted to cope with our use of several state diagrams to express various aspects of the behaviour, i.e. with Fig 3 expressing aspects of the behaviour that should be added to the overall behaviour in Fig. 1. (The alternative would have been to draw the very large finite state machine that would result from adding these aspects to the overall behaviour in Fig. 1.)
\textsuperscript{5} Available from \url{http://autojml.sourceforge.net}
We added the specifications generated by AutoJML to the source code and verified them using ESC/Java2. This revealed there were still errors in the (already improved) implementation, where certain methods handled incoming messages in a different way than prescribed by the FSM. Even though we were aware that the handling of exceptions is a delicate matter and paid particular attention to this, we still missed updates to the internal state variable in certain cases when the exceptions were thrown.
4 Conclusions
Now that there are various mature tools available to verify security properties of abstract security protocols, we believe it is time to tackle the next challenge, namely trying to verify the security of real implementations of such protocols.
This paper reports on an experiment to see if and how formal methods – in particular formal specification using finite state machines, the specification language JML, and the program checker ESC/Java2 – can be used for to verify an existing Java implementation of SSH. In the end, we managed to verify the implementation in the sense that it never throws an exception (which is maybe more a safety property than a security property) and that it correctly implements the SSH protocol as specified by finite state machines that we developed as formalisation of the official SSH specifications. Along the way we found and fixed several security flaws in the code. Some of these were found as a direct consequence of the verification, some were found more as a side-effect of having to thoroughly inspect and annotate the code to get it to verify. Using of an extended static checker such as ESC/Java2 is a way to force a very thorough code inspection.
A general conclusion about our case study is that a formal specification of a security protocol that captures all or at least most of the complexities in some format that is readable to implementors is very useful. Given the complexity of real-life protocols, it is easy to get something wrong, as witnessed by the implementation we looked at. The specification of SSH as a finite state machine is formal, but still easy to understand by non-experts. (We are investigating other notations to use instead of finite state machines – more on that below.) We believe that providing such a description as part of official specification would be valuable, as it clarifies the specification and is also useful for development. Indeed, note that anyone who implements SSH will, as part of the work, have to implement a finite state machine that is described in the prose of the SSH RFCs and hence will have to re-do much of the work that we have done in coming up with the description of SSH as finite state machine.
The size of the SSH code we verified (just the code for the protocol, excluding the code for the GUI etc.) is around 4.5 kloc. The whole verification effort took about 6 weeks, including the time it took to understand and formalise the SSH specs, which was about 2 weeks. For widely used implementations of security protocols, say the implementation of SSL in the Java API, such an effort might be considered acceptable.
The second stage in our approach, ensuring the absence of runtime exceptions, can catch programming errors in the handling of individual messages, especially malformed ones. The third stage, verification of conformance to the FSM, can catch programming errors in the handling of sequences of messages, especially incorrect ones. Note that this complements conventional testing: testing – or, indeed, normal use of the application – is likely to reveal bugs in the handling of correctly formatted messages and correct sequences of such messages, but is less likely to reveal bugs in the handling of incorrectly formatted messages or incorrect sequences of messages, simply due to the limitless number of possibilities for this. So our approach may detect errors that are hard to find using testing.
A more practical issue is what the most convenient formalism or format for such finite state machines is, and which tools can be used to develop them. We developed our diagrams on paper and whiteboards, but with large number of arrows this becomes very cumbersome without some ad-hoc conventions and abbreviations. Maybe a purely graphical language is not the most convenient in the long run. Given the complexity of a real-life protocols, some way of separating different aspects in different finite state machines (as we have done with Fig. 1 describing the ‘normal’ scenario and Fig. 3 describing ‘other’ scenarios) seems important.
Related work An earlier paper [7] already investigated how a provably correct implementation could be obtained from an abstract security protocol for a very simple protocol. The AutoJML tool we used to produce JML specifications from the finite state machines can also produce a skeleton Java implementation. When developing an implementation for SSH from scratch, rather than examining an existing one as we did, this approach might be preferable. There are already efforts to generate code from abstract protocol descriptions, e.g. to generate Java code from security protocols described in the Spi calculus [11], or to refine abstract state machine (ASM) specifications to Java code [5].
Jiříens in [8] showed how to verify the security of UML models of security related protocols. These UML models are on the level of abstraction similar to the one employed by us in FSMs.
Future work It would be interesting to repeat the experiment we have done for other implementations and for other protocols, i.e. trying to formalise other protocols using FSMs or other formalisms, and using these to check implementations. Of course, for an implementation that is not in Java, but say in C or C++, we might not have program checkers like ESC/Java2. Still, a formalisation of a security protocol is not only useful for program verification, but also as aid to the implementor, as aid for a human code inspection and for testing. Indeed, model-based testing could be used to test if an implementation of SSH conforms to our formal specification of the protocol.
In the end we only verified that the code correctly implements the protocol as described by the finite state machine, not that this protocol is secure, i.e. that it ensures authentication, integrity and confidentiality. Verifying that the
full SSH protocol as described in Fig. 2 meets its security goals still seems an interesting challenge to the security protocol verification community.
As an alternative to using finite state machines, we are currently experimenting with a notation that is similar to the standard format used to describe security protocols, but extended with branching and jump statements. This then allows us not only to specify the normal protocol run, but also how any deviations from the normal protocol run have to be handled. Such a formalism may be more practical notation than a graphical one such as finite state machines, which can become unwieldy, and has the advantage of being closer to conventional formal notation for security protocols.
References
|
{"Source-Url": "https://repository.ubn.ru.nl/bitstream/handle/2066/36680/36680.pdf?sequence=1", "len_cl100k_base": 6433, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 31972, "total-output-tokens": 8025, "length": "2e12", "weborganizer": {"__label__adult": 0.0003876686096191406, "__label__art_design": 0.000247955322265625, "__label__crime_law": 0.0007982254028320312, "__label__education_jobs": 0.00038242340087890625, "__label__entertainment": 6.490945816040039e-05, "__label__fashion_beauty": 0.00014460086822509766, "__label__finance_business": 0.00020992755889892575, "__label__food_dining": 0.0003421306610107422, "__label__games": 0.0004856586456298828, "__label__hardware": 0.0012922286987304688, "__label__health": 0.0005807876586914062, "__label__history": 0.0002079010009765625, "__label__home_hobbies": 8.565187454223633e-05, "__label__industrial": 0.0004487037658691406, "__label__literature": 0.00022864341735839844, "__label__politics": 0.0002751350402832031, "__label__religion": 0.00044608116149902344, "__label__science_tech": 0.045867919921875, "__label__social_life": 9.584426879882812e-05, "__label__software": 0.01031494140625, "__label__software_dev": 0.93603515625, "__label__sports_fitness": 0.00033164024353027344, "__label__transportation": 0.0004680156707763672, "__label__travel": 0.0001984834671020508}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 34852, 0.02462]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 34852, 0.47493]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 34852, 0.91568]], "google_gemma-3-12b-it_contains_pii": [[0, 294, false], [294, 2956, null], [2956, 5825, null], [5825, 8741, null], [8741, 11880, null], [11880, 14646, null], [14646, 16019, null], [16019, 19457, null], [19457, 19751, null], [19751, 21875, null], [21875, 24719, null], [24719, 27862, null], [27862, 31094, null], [31094, 34270, null], [34270, 34852, null]], "google_gemma-3-12b-it_is_public_document": [[0, 294, true], [294, 2956, null], [2956, 5825, null], [5825, 8741, null], [8741, 11880, null], [11880, 14646, null], [14646, 16019, null], [16019, 19457, null], [19457, 19751, null], [19751, 21875, null], [21875, 24719, null], [24719, 27862, null], [27862, 31094, null], [31094, 34270, null], [34270, 34852, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 34852, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 34852, null]], "pdf_page_numbers": [[0, 294, 1], [294, 2956, 2], [2956, 5825, 3], [5825, 8741, 4], [8741, 11880, 5], [11880, 14646, 6], [14646, 16019, 7], [16019, 19457, 8], [19457, 19751, 9], [19751, 21875, 10], [21875, 24719, 11], [24719, 27862, 12], [27862, 31094, 13], [31094, 34270, 14], [34270, 34852, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 34852, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
2d361a4301af65d5d35aa56ddf9b8a1ddd14beb7
|
Lighting and Shading
Outline
- Global and Local Illumination
- Normal Vectors
- Light Sources
- Phong Illumination Model
- Polygonal Shading
- Example
Global Illumination
- Ray tracing
- Radiosity
- Photon Mapping
- Follow light rays through a scene
- Accurate, but expensive (off-line)
Local Illumination
- Approximate model
- Local interaction between light, surface, viewer
- Phong model (this lecture): fast, supported in OpenGL
- GPU shaders
- Pixar Renderman (offline)
Light Sources
Phong Illumination Model
Normal Vectors
[Angel Ch. 5]
Local Illumination
- Approximate model
- Local interaction between light, surface, viewer
- Color determined only based on surface normal, relative camera position and relative light position
- What effects does this ignore?
Outline
- Global and Local Illumination
- Normal Vectors
- Light Sources
- Phong Illumination Model
- Polygonal Shading
- Example
Normal Vectors
- Must calculate and specify the normal vector
- Even in OpenGL!
- Two examples: plane and sphere
Normals of a Plane, Method I
- Method I: given by \( ax + by + cz + d = 0 \)
- Let \( p_0 \) be a known point on the plane
- Let \( p \) be an arbitrary point on the plane
- Recall: \( u \times v \) orthogonal to \( u \) and \( v \)
- \( n_0 = (p_1 - p_0) \times (p_2 - p_0) \)
- Order of cross product determines orientation
- Normalize to \( n = n_0/|n_0| \)
Normals of a Plane, Method II
- Method II: plane given by \( p_0, p_1, p_2 \)
- Points must not be collinear
- Recall: \( u \times v \) orthogonal to \( u \) and \( v \)
- \( n_0 = (p_1 - p_0) \times (p_2 - p_0) \)
- Normalize to \( n = n_0/|n_0| \)
Normals of Sphere
- Implicit Equation \( f(x, y, z) = x^2 + y^2 + z^2 - 1 = 0 \)
- Vector form: \( f(p) = p \cdot p - 1 = 0 \)
- Normal given by gradient vector
\[
n_0 = \begin{bmatrix}
\frac{\partial f}{\partial x} \\
\frac{\partial f}{\partial y} \\
\frac{\partial f}{\partial z}
\end{bmatrix} = \begin{bmatrix}
2x \\
2y \\
2z
\end{bmatrix} = 2p
\]
- Normalize \( n_0/|n_0| = 2p/2 = p \)
Reflected Vector
- Perfect reflection: angle of incident equals angle of reflection
- Also: $l$, $n$, and $r$ lie in the same plane
- Assume $|l| = |n| = 1$, guarantee $|r| = 1$
\[
\mathbf{l} \cdot \mathbf{n} = \cos(\theta) = \mathbf{n} \cdot \mathbf{r}
\]
\[
\mathbf{r} = \alpha \mathbf{l} + \beta \mathbf{n}
\]
Solution: $\alpha = -1$ and $\beta = 2 (\mathbf{l} \cdot \mathbf{n})$
\[
\mathbf{r} = 2 (\mathbf{l} \cdot \mathbf{n}) \mathbf{n} - \mathbf{l}
\]
Normals Transformed by Modelview Matrix
Modelview matrix $M$ (shear in this example)
Only keep linear transform in $M$ (discard any translation).
Normals Transformed by Modelview Matrix
When $M$ is rotation, $M = (M^*)^T$
Normals Transformed by Modelview Matrix (proof of $(M^*)^T$ transform)
Point $(x,y,z,w)$ is on a plane in 3D (homogeneous coordinates) if and only if
\[
a x + b y + c z + d w = 0, \text{ or } [a \ b \ c \ d] [x \ y \ z \ w]^T = 0.
\]
Now, let’s transform the plane by $M$.
Point $(x,y,z,w)$ is on the transformed plane if and only if
$M^T [x \ y \ z \ w]^T$ is on the original plane:
\[
[a \ b \ c \ d] M^T [x \ y \ z \ w]^T = 0.
\]
So, equation of transformed plane is
\[
[a' \ b' \ c' \ d'] [x \ y \ z \ w]^T = 0,
\]
for
\[
[a' \ b' \ c' \ d']^T = (M^*)^T [a \ b \ c \ d]^T.
\]
Outline
- Global and Local Illumination
- Normal Vectors
- Light Sources
- Phong Illumination Model
- Polygonal Shading
- Example
Light Sources and Material Properties
- Appearance depends on
- Light sources, their locations and properties
- Material (surface) properties:
- Viewer position
Types of Light Sources
- Ambient light: no identifiable source or direction
- Point source: given only by point
- Distant light: given only by direction
- Spotlight: from source in direction
- Cut-off angle defines a cone of light
- Attenuation function (brighter in center)
Point Source
- Given by a point \( p_0 \)
- Light emitted equally in all directions
- Intensity decreases with square of distance
\[
I \propto \frac{1}{|p - p_0|^2}
\]
Limitations of Point Sources
- Shading and shadows inaccurate
- Example: penumbra (partial “soft” shadow)
- Similar problems with highlights
- Compensate with attenuation
\[
q = \text{distance} |p - p_0| \\
a, b, c \text{ constants}
\]
- Softens lighting
- Better with ray tracing
- Better with radiosity
Distant Light Source
- Given by a direction vector \([x \ y \ z]\)
Spotlight
- Light still emanates from point
- Cut-off by cone determined by angle \( \theta \)
Global Ambient Light
- Independent of light source
- Lights entire scene
- Computationally inexpensive
- Simply add \([G_R \ G_G \ G_B]\) to every pixel on every object
- Not very interesting on its own. A cheap hack to make the scene brighter.
Outline
- Global and Local Illumination
- Normal Vectors
- Light Sources
- Phong Illumination Model
- Polygonal Shading
- Example
Phong Illumination Model
- Calculate color for arbitrary point on surface
- Compromise between realism and efficiency
- Local computation (no visibility calculations)
- Basic inputs are material properties and I, n, v:
- I = unit vector to light source
- n = surface normal
- v = unit vector to viewer
- r = reflection of I at p
(determined by I and n)
Phong Illumination Overview
2. Add contributions from each light source
3. Clamp the final result to [0, 1]
- Calculate each color channel (R,G,B) separately
- Light source contributions decomposed into
- Ambient reflection
- Diffuse reflection
- Specular reflection
- Based on ambient, diffuse, and specular lighting and material properties
Ambient Reflection
\[ I_a = k_a L_a \]
- Intensity of ambient light is uniform at every point
- Ambient reflection coefficient \( k_a \geq 0 \)
- May be different for every surface and r,g,b
- Determines reflected fraction of ambient light
- \( L_a \) = ambient component of light source
(can be set to different value for each light source)
- Note: \( L_a \) is not a physically meaningful quantity
Diffuse Reflection
- Diffuse reflector scatters light
- Assume equally all direction
- Called Lambertian surface
- Diffuse reflection coefficient \( k_d \geq 0 \)
- Angle of incoming light is important
Lambert's Law
Intensity depends on angle of incoming light.
\[ I = \frac{I_a}{d^2} \]
\[ \frac{d}{\cos \theta} = \frac{d}{d} \]
(a) (b)
Diffuse Light Intensity Depends On Angle Of Incoming Light
- Recall
\[ l = \text{unit vector to light} \]
\[ n = \text{unit surface normal} \]
\[ \theta = \text{angle to normal} \]
\[ \cos \theta = l \cdot n \]
\[ I_d = k_d L_d (l \cdot n) \]
- With attenuation:
\[ I_d = \frac{k_d L_d}{a + bq + cq^2} (l \cdot n) \]
\( q = \text{distance to light source,} \)
\( L_d = \text{diffuse component of light} \)
Specular Reflection
- Specular reflection coefficient \( k_s \geq 0 \)
- Shiny surfaces have high specular coefficient
- Used to model specular highlights
- Does not give the mirror effect (need other techniques)
Specular Reflection
- Recall
\[ v = \text{unit vector to camera} \]
\[ r = \text{unit reflected vector} \]
\[ \phi = \text{angle between} v \text{ and } r \]
\[ \cos \phi = v \cdot r \]
\[ I_s = k_s L_s (\cos \phi)^\alpha \]
- \( L_s \) is specular component of light
- \( \alpha \) is shininess coefficient
- Can add distance term as well
Shininess Coefficient
- \( I_s = k_s L_s (\cos \phi)^\alpha \)
- \( \alpha \) is the shininess coefficient
- Higher \( \alpha \) gives narrower curves
Summary of Phong Model
- Light components for each color:
- Ambient \( (L_a) \), diffuse \( (L_d) \), specular \( (L_s) \)
- Material coefficients for each color:
- Ambient \( (k_a) \), diffuse \( (k_d) \), specular \( (k_s) \)
- Distance \( q \) for surface point from light source
\[ I = \frac{1}{a + bq + cq^2} (k_d L_d (l \cdot n) + k_s L_s (r \cdot v)^\alpha) + k_d L_d \]
\( I = \text{unit vector to light} \)
\( r = I \text{ reflected about} n \)
\( n = \text{surface normal} \)
\( v = \text{vector to viewer} \)
BRDF
- Bidirectional Reflection Distribution Function
- Must measure for real materials
- Isotropic vs. anisotropic
- Mathematically complex
- Implement in a fragment shader
Outline
• Global and Local Illumination
• Normal Vectors
• Light Sources
• Phong Illumination Model
• Polygonal Shading
• Example
Polygonal Shading
• Now we know vertex colors
– either via OpenGL lighting,
– or by setting directly via glColor3f if lighting disabled
• How do we shade the interior of the triangle?
Polygonal Shading
• Curved surfaces are approximated by polygons
• How do we shade?
– Flat shading
– Interpolative shading
– Gouraud shading
– Phong shading (different from Phong illumination!)
Flat Shading
• Shading constant across polygon
• Core profile: Use interpolation qualifiers in the fragment shader
• Compatibility profile: Enable with glShadeModel(GL_FLAT);
• Color of last vertex determines interior color
• Only suitable for very small polygons
Flat Shading Assessment
• Inexpensive to compute
• Appropriate for objects with flat faces
• Less pleasant for smooth surfaces
Interpolative Shading
• Interpolate color in interior
• Computed during scan conversion (rasterization)
• Core profile: enabled by default
• Compatibility profile: enable with glShadeModel(GL_SMOOTH);
• Much better than flat shading
• More expensive to calculate (but not a problem)
Gouraud Shading
Invented by Henri Gouraud, Univ. of Utah, 1971
- Special case of interpolative shading
- How do we calculate vertex normals for a polygonal surface? Gouraud:
1. average all adjacent face normals
\[ n = \frac{n_1 + n_2 + n_3 + n_4}{|n_1 + n_2 + n_3 + n_4|} \]
2. use \( n \) for Phong lighting
3. interpolate vertex colors into the interior
- Requires knowledge about which faces share a vertex
Data Structures for Gouraud Shading
- Sometimes vertex normals can be computed directly (e.g. height field with uniform mesh)
- More generally, need data structure for mesh
- Key: which polygons meet at each vertex
Phong Shading (“per-pixel lighting”)
Invented by Bui Tuong Phong, Univ. of Utah, 1973
- At each pixel (as opposed to at each vertex):
1. Interpolate normals (rather than colors)
2. Apply Phong lighting to the interpolated normal
- Significantly more expensive
- Done off-line or in GPU shaders (not supported in OpenGL directly)
Phong Shading Results
Michael Gold, Nvidia
Outline
- Global and Local Illumination
- Normal Vectors
- Light Sources
- Phong Illumination Model
- Polygonal Shading
- Example
Phong Shader: Vertex Program
#version 150
in vec3 position; // input vertex position and normal, in world-space
in vec3 normal;
out vec3 viewPosition; // vertex position and normal, in view-space
out vec3 viewNormal; // these will be passed to fragment program (interpolated by hardware)
uniform mat4 modelViewMatrix;
uniform mat4 normalMatrix;
uniform mat4 projectionMatrix;
Phong Shader: Vertex Program
```cpp
void main()
{
// view-space position of the vertex
vec4 viewPosition4 = modelViewMatrix * vec4(position, 1.0f);
viewPosition = viewPosition4.xyz;
// final position in the normalized device coordinates space
gl_Position = projectionMatrix * viewPosition4;
}
```
Phong Shader: Fragment Program
```cpp
void main()
{
// camera is at (0,0,0) after the modelview transformation
vec3 eyedir = normalize(vec3(0, 0, 0) - viewPosition);
// reflected light direction
vec3 reflectDir = -reflect(viewLightDirection, viewNormal);
// Phong lighting
float d = max(dot(viewLightDirection, viewNormal), 0.0f);
float s = max(dot(reflectDir, eyedir), 0.0f);
// compute the final color
c = ka * La + d * kd * Ld + pow(s, alpha) * ks * Ls;
}
```
VBO Layout: positions and normals
```cpp
VBO
```
VAO code ("normal" shader variable)
```cpp
During initialization:
```
Upload the light direction vector to GPU
```cpp
void display()
{
// get a handle to the program
GLuint program = pipelineProgram->GetProgramHandle();
openGLMatrix->LookAt(ex, ey, ez, fx, fy, fz, ux, uy, uz);
float view[16];
openGLMatrix->GetMatrix(view); // read the view matrix
// get a handle to the pipeline program
GLuint pipelineProgram = windowProgram->GetProgramHandle();
// get a handle to the viewLightDirection shader variable
GLuint h_viewLightDirection = glGetUniformLocation(program, "viewLightDirection");
// bind the VBO "buffer" (must be previously created)
glBindBuffer(GL_ARRAY_BUFFER, buffer);
// set the layout of the "normal" attribute data
glVertexAttribPointer(loc, 3, GL_FLOAT, normalized, stride, offset);
glEnableVertexAttribArray(loc); // enable the "normal" attribute
// get a handle to the "normal" shader variable
GLuint loc = glGetAttribLocation(program, "normal");
glEnableVertexAttribArray(loc); // enable the "normal" attribute
vidBindBuffer(GL_ARRAY_BUFFER, buffer);
// bind the VBO "buffer" (must be previously created)
vidBindBuffer(GL_ARRAY_BUFFER, buffer);
// bind the VAO
glBindVertexArray(vao);
// bind the VBO "buffer" (must be previously created)
glBindBuffer(GL_ARRAY_BUFFER, buffer);
// set the layout of the "normal" attribute data
glVertexAttribPointer(loc, 3, GL_FLOAT, normalized, stride, offset);
glEnableVertexAttribArray(loc); // enable the "normal" attribute
```
Upload the light direction vector to GPU
```c
float lightDirection[3] = { 0, 1, 0 }; // the "Sun" at noon
float viewLightDirection[3]; // light direction in the view space
// the following line is pseudo-code:
viewLightDirection = (view * float4(lightDirection, 0.0)).xyz;
// upload viewLightDirection to the GPU
glUniform3fv(h_viewLightDirection, 1, viewLightDirection);
// continue with model transformations
openGLMatrix->Translate(x, y, z);
...
renderBunny(); // render, via VAO
glutSwapBuffers();
```
Upload the normal matrix to GPU
```c
// in the display function:
// get a handle to the program
GLuint program = pipelineProgram->GetProgramHandle();
// get a handle to the normalMatrix shader variable
GLint h_normalMatrix =
glGetUniformLocation(program, "normalMatrix");
float n[16];
matrix->SetMatrixMode(OpenGLMatrix::ModelView);
matrix->GetNormalMatrix(n); // get normal matrix
// upload n to the GPU
GLboolean isRowMajor = GL_FALSE;
glUniformMatrix4fv(h_normalMatrix, 1, isRowMajor, n);
```
Summary
- Global and Local Illumination
- Normal Vectors
- Light Sources
- Phong Illumination Model
- Polygonal Shading
- Example
|
{"Source-Url": "http://run.usc.edu/cs420-s19/11-lighting/11-lighting-6up.pdf", "len_cl100k_base": 4159, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 30591, "total-output-tokens": 4844, "length": "2e12", "weborganizer": {"__label__adult": 0.0004742145538330078, "__label__art_design": 0.0041351318359375, "__label__crime_law": 0.00040340423583984375, "__label__education_jobs": 0.0008325576782226562, "__label__entertainment": 0.00020313262939453125, "__label__fashion_beauty": 0.00026679039001464844, "__label__finance_business": 0.00019752979278564453, "__label__food_dining": 0.0004725456237792969, "__label__games": 0.00136566162109375, "__label__hardware": 0.002521514892578125, "__label__health": 0.0006132125854492188, "__label__history": 0.0006852149963378906, "__label__home_hobbies": 0.0002460479736328125, "__label__industrial": 0.0008649826049804688, "__label__literature": 0.0003769397735595703, "__label__politics": 0.00023245811462402344, "__label__religion": 0.0008039474487304688, "__label__science_tech": 0.1448974609375, "__label__social_life": 0.00013136863708496094, "__label__software": 0.01806640625, "__label__software_dev": 0.82080078125, "__label__sports_fitness": 0.0004529953002929687, "__label__transportation": 0.000736236572265625, "__label__travel": 0.00041866302490234375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 14669, 0.00871]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 14669, 0.75243]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 14669, 0.67654]], "google_gemma-3-12b-it_contains_pii": [[0, 547, false], [547, 2051, null], [2051, 3615, null], [3615, 4794, null], [4794, 6442, null], [6442, 8311, null], [8311, 9526, null], [9526, 11055, null], [11055, 13528, null], [13528, 14669, null]], "google_gemma-3-12b-it_is_public_document": [[0, 547, true], [547, 2051, null], [2051, 3615, null], [3615, 4794, null], [4794, 6442, null], [6442, 8311, null], [8311, 9526, null], [9526, 11055, null], [11055, 13528, null], [13528, 14669, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 14669, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 14669, null]], "pdf_page_numbers": [[0, 547, 1], [547, 2051, 2], [2051, 3615, 3], [3615, 4794, 4], [4794, 6442, 5], [6442, 8311, 6], [8311, 9526, 7], [9526, 11055, 8], [11055, 13528, 9], [13528, 14669, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 14669, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
0e3bc4cf7c9713d2b02a182f79a1a0a056a23082
|
Task Assignment Optimization with the Use of PESBAT Linear Programming Tool
Optymalizacja przypisania zadania za pomocą liniowego narzędzia programującego PESBAT
Keywords: task assignment; assignment problem; process optimization; workload balancing; optimizing in Excel; Solver; linear programming
Słowa kluczowe: przydział zadań; problem przypisania; optymalizacja procesu; równoważenie obciążenia; optymalizacja w Excelu; Solver; programowanie liniowe
JEL Code: C61; M12; L86
1. Task assignment problems
Task assignment problems may be found in various spheres of business and science, such as transportation [Dekker, Spliet, 2016, p. 213], production management, computer science, education [Guler et al., 2015, p. 20]. Generalizing definition given in the article by Lai, Yeh and Huang [2017, p. 115], it may be said that task assignment problem (TAP) is a problem that requires matching a defined set of tasks to a known set of agents (e.g. factory workers, factories, production lines, processors, vehicles or warehouses) so that a certain goal is achieved. Example goals are: minimal time required to perform all the tasks, minimal costs, the best quality of the end product, maximum profits, the shortest route, etc. Each particular goal can usually be described as a function of several variables. Depending on that function, task assignment problems may be linear or nonlinear; such division also gives a general understanding...
of the problem’s difficulty. Besides variables and a goal, task assignment problems often have a set of constraints. Basic constraints for a TAP are: each agent should be assigned to a certain task and each task may be assigned to only one agent. There may also be a requirement to assign tasks from the same “task supplier” to different agents during each task assignment or a requirement to assign such a number of tasks to each agent so that all the tasks could be performed in a given amount of time. In different specific cases, task assignment problems have different constraints and goals, which define the best algorithm or technique for solving given problem. Task assignment problems may have several sources (Fig. 1).
<table>
<thead>
<tr>
<th>The number of tasks to assign</th>
</tr>
</thead>
<tbody>
<tr>
<td>The number of agents</td>
</tr>
<tr>
<td>Diversity of tasks</td>
</tr>
<tr>
<td>Diversity of agents (e.g. different productivity or available time)</td>
</tr>
<tr>
<td>External regulations (e.g. traceability requirement)</td>
</tr>
<tr>
<td>Impossibility to standardize task assignment process (e.g. seasonality of tasks)</td>
</tr>
</tbody>
</table>
Fig. 1. Sources of task assignment problems
Source: own work.
Being unidentified, ignored or solved incorrectly, task assignment problem may result in several phenomena that are unfavorable for running a business (Fig. 2).
<table>
<thead>
<tr>
<th>Longer time of performing tasks</th>
</tr>
</thead>
<tbody>
<tr>
<td>Difficulties in controlling which task was assigned to a particular agent</td>
</tr>
<tr>
<td>Biased task assignment (e.g. giving easier or more profitable tasks to agents favored by the manager)</td>
</tr>
<tr>
<td>Establishing relations between agents and “task suppliers” (if the task is to assess the quality of task supplier’s work, it may lead to biased assessment)</td>
</tr>
<tr>
<td>Impossibility to perform a task by an agent (e.g. in road transportation, if a route was assigned to a driver that cannot deliver goods fast enough due to EU drivers’ working hours regulations)</td>
</tr>
<tr>
<td>Not assigning a number of tasks due to human factor (e.g. a manager can forget about some tasks in not reminded)</td>
</tr>
</tbody>
</table>
Fig. 2. Phenomena that are unfavorable for running a business
Source: own work.
The above-mentioned phenomena cause disorders in business processes and affect enterprise’s revenue and reputation negatively. Hence solving task assignment problem and creating a procedure to assign tasks should have high priority for projects involving arbitrarily large amount of tasks and people. Task assignment can be supported by task management software that contains information about tasks, agents, constraints and reminds manager that there are tasks to be assigned. Such tools or platforms can be components/extensions of popular office suites used in enterprises.
2. Linear programming tools
Currently, there is a great amount of optimization tools available for use, either on commercial basis or free. Available tools are rarely dedicated to linear programming only, great number of such tools also gives possibility to solve or model non-linear problems. Such optimization tool may be an independent application or a library of algorithms and procedures that may be used for developing software in a chosen programming language. Optimization tools may be divided into two groups: solvers and modeling environments. Solvers use data model as an input and return optimal or close to optimal results by applying several algorithms or methods to the input. Modeling environments, on the other hand, integrate human modelers and solvers by giving a tool for data model creation, importing data, creating instances of the problem basing on data model, engaging solvers and analyzing results [Fourer, 2017, p. 1].
More information on linear programming tools are available on www.mat.univie.ac.at/~neum/glopt/software_g.html and in a series of comprehensive surveys concerning linear programming software conducted by Robert Fourer [2017, p. 1].
3. Microsoft Excel Solver Add-in
Microsoft Excel Solver Add-in (Solver) is a popular optimization and what-if analysis tool. The tool was developed and is still supported by Frontline Systems, Inc., a software developer located in the United States of America. Solver is delivered as a part of Microsoft Excel, it can be installed directly from the MS Excel menu or from the CD where Microsoft Office or Microsoft Excel installation files are contained and its usage does not require any additional costs. Solver was a part of Microsoft Excel in versions 2007, 2010, 2013, 2016. It is also available for Excel 2011 and 2016 for Mac computers, Excel for iPad, iPhone and Excel for tablets and phones using Android OS [Microsoft Support, p. 1].
Solver’s task is to process a given data model so that an optimal solution, or goal, was found. Finding the goal is done by changing the variables, which may be subject to several constraints. There can only be one goal for a model, and it has to be a cell which contains a formula. The formula’s result should depend on variables’
value. Solver is capable of finding minimum or maximum goal value for a given data model, it can also process the model so that the goal value would be equal to a certain given number.
Solver has some limits on variables and constraints that can be added to the problem that needs to be solved. The limit for variables in constant: the tool can analyze up to 200 variables. Limitations on constrains are as follows:
Using the basic Excel Solver, you can place constraints on up to 100 cells which are not decision variables. In addition, you can place constant upper and/or lower bounds on the decision variables, and you can place integer constraints on some or all of the decision variables [Frontline Systems, Inc., p. 1].
In the Solver which was a part of Microsoft Excel 2010 and later version, there are six types of constraints that can be put on a cell or a range of cells: less than or equal to, equal to, greater than or equal to, binary, integer and all different. The right part of an equation for the first three types can be a number or a formula that returns a number.
Besides variables and constraints, data model that will be processed by Solver should have one goal or target. It has to be a cell which contains a formula, and formula’s result should depend on variables’ value (Fig. 3).

Solver uses three methods to find optimal solution for given problems [Frontline Systems, Inc., p. 2]. The first method is GRG Nonlinear, which is used for nonlinear optimization. Its code was developed by Leon Lasdon (University of Texas at Austin) and Alan Waren (Cleveland State University) and later enhanced by Frontline Systems, Inc.
The Simplex LP Method is used for linear programming. The method “uses the Simplex and dual Simplex method with bounds on the variables, and problems with integer constraints use the branch and bound method, as implemented by John Watson and Daniel Fylstra, Frontline Systems, Inc.”.
The Evolutionary solving method first appeared in Microsoft Excel 2010 version. This method can be used for non-smooth optimization. It uses a combination of genetic algorithm and local search methods, which were implemented by several professionals at Frontline Systems, Inc. This method is capable of finding near-optimal solutions for data models which use Excel IF and VLOOKUP functions.
Solver allows adjusting some of the parameters that will be taken into account while using a chosen algorithm to solve a given problem.
The features listed above make Microsoft Excel Solver Add-in a suitable tool for a simple what-if analysis and a way to search for solutions to not complex business problems that can be presented as a set of mathematical equations or inequalities.
The main reasons for choosing Solver as a tool to solve the task assignment problem are shown in Fig. 4.

4. Problem of assigning tasks for photoanalysts
This section contains a description of a task assignment process which takes place in QMK Sp. z o.o., a Polish consulting and analytical enterprise. There is also a presentation of a solution which was created using Solver and Visual Basic for Applications and helps to optimize the process described below.
QMK is based in Sopot, Poland, and employs 15 people. For one of its clients, QMK provides a photoanalysis service. QMK’s client possesses several thousands of shelves with its products, which are installed at various stores all over Poland. Client’s staff visits all of these locations at least once a month (twice if needed) and takes a series of photographs (usually 7 to 12) of each shelf. After the photos of a given shelf are taken, they are compressed into a package and sent to client’s server. At the end of each working day, all packages are exported to one of QMK’s server, so the next day after the photos are taken analysis of received packages may begin. The aim of the photoanalysis is to assess the quality of client’s personnel work, to collect detailed data on shelves’ stock (availability of all products and quantity of available ones), which shows effectiveness of salesforce and point-of-sales materials. Data on availability of the recent POSM is also gathered. Collected data is later used to create diverse reports and ratings.
Current status of all received and processed packages is presented on a web-platform, which is used for communication between personnel of QMK and its clients. The assignment of received packages with the use of web platform is shown in Fig. 5.

The table (Fig. 5) shows all packages that can be assigned to photoanalysts (further – PHA); the first column contains client’s worker ID, other columns present information on quantity of packages to assign, a number of packages with photos of particular types, visit ID and data of assigned PHA and packages status. Packages with photographs of the shelves are grouped by shelf type and client’s worker. Assigning packages to PHA is performed by selecting several checkboxes corresponding to packages of a given type and sent by a given client’s worker, selecting PHA at the bottom of the table and confirming the selection.
After photoanalysis is performed, the results are published and clients have a possibility to check the results. If complaints occur the photoanalysis is improved and the final reports are prepared. The scheme of photoanalysis process is presented in Fig. 6.

Specifics of the business process and features of the enterprise impose several restrictions on task assignment:
- packages should be assigned to PHA proportionally to their availability,
- packages of photographs of each type should be assigned proportionally to their share in all the packages available for assignment,
- throughout a month packages sent by each particular client’s worker should be assigned to maximum number of PHA in order to minimize a possibility of a biased assessment,
- packages of different types from the same client’s worker should be perceived as different tasks,
- each group of packages should be assigned to only one PHA during one assignment.
Besides the restrictions mentioned above, the problem of assigning tasks to PHA has another feature: differences in photoanalysts’ productivity are negligible. The
package assignment problem, in this case, reduces to assigning each of the analysts the number of packages proportional to his or her work time. However, as mentioned in the previous section, the packages in the table are grouped by type and client’s worker, so it is impossible to assign single packages to PHA. The packages of a given type to assign to a PHA (e.g. 15 packages of type B) should rather be gathered by adding several packages of that type from different client’s workers (e.g. 3+1+7+4). The task is easy to perform for a small number of PHA and groups of packages but assigning around 60 groups of packages of different sizes to a dozen of PHA manually is time-consuming and may result in mistakes, due to which it becomes impossible to assign all the analysts the number of packages proportional to their available time.
In order to have better control over each of these constraints and to optimize the process of task assignment, a PESBAT assignment tool using Microsoft Solver and VBA procedures was created.
5. Task assignment optimization with the use of PESBAT tool
PESBAT (Programming Enhanced Solver Based Assignment Tool) was developed with the use of Visual Basic for Application and Excel Solver LP Simplex method. The purpose of the tool is to assist the QMK manager in assigning packages with photographs from QMK’s client to photoanalysts.
The tool uses Microsoft Excel as its environment and framework. PESBAT consists of five spreadsheets containing formulas and tables with data, and several macros which prepare the data for further processing and assign packages to PHA.
The whole process of assigning packages with the use of PESBAT can be divided into three steps:
- data preparation,
- calculating quantities of packages of each type to be assigned,
- assigning the packages.
The tool uses the table shown in Fig. 5 as its input. After copying the table from the web-platform and pasting it into the first spreadsheet, a macro is used to prepare the table for further usage. The structure of the macro is shown in Fig. 7. Spreadsheets “Step 1” and “Step 2” are shown in Fig. 8 and 9 respectively.
The second macro prepares the data for calculating quantities of packages to be assigned. The main steps implemented in the macro are:
- copying the table from spreadsheet “Step 2” to “Step 3”,
- adding a column with sums of all packages,
- adding sums of packages of each type,
- sorting the table by the “Sum” column in descending order.
After the third macro stops, a previously prepared table located in spreadsheet “Step 3” copies data concerning the sum of packages and sums of packages of each type and calculates the quantities of packages to be assigned to each of the PHA.
Coping the table to the spreadsheet “Step 2”
Deleting the columns that will not be used for package assignment, unmerge all cells and delete empty rows
Insert zeros to empty cells in the intersection of client’s worker rows and package type columns
Moving the data concerning packages created after second visits in stores under the section of data concerning first visit packages
Changing height of rows and width of columns so that it would be easier to see the whole table
Fig. 7. The structure of PESBAT data preparation macro
Source: own work.
Fig. 8. PESBAT – spreadsheet “Step 1”
Source: own work.
Fig. 9. PESBAT – spreadsheet “Step 2”
Source: own work.
The calculations are performed by formulas embedded in each cell of the table. Formulas calculate the quantities mentioned above according to the restrictions: each PHA should get a number of packages that corresponds to his or her available time, and the sum of packages of each group that should be assigned to a PHA should be proportional to the share of that group in the sum of all the packages. The calculated values are not integer, so formulas round them. Sometimes due to rounding the sum of packages that may be assigned is not equal to the sum of values calculated by formulas, so these values need to be manually corrected. The second reason for correcting values manually is that sometimes it is not possible to assign the number of packages calculated by formulas to each of the analysts, e.g. if one of the groups of packages has more packages than any of the PHA should get.

Source: own work.
After the quantities of packages that need to be assigned are calculated, the main macro is enabled. Its function is to arrange the packages from the prepared table in such a way that each of the analysts could get the number of packages that was calculated by formulas. The macro engages Microsoft Excel Solver for that task; the variables for the Solver are cells with binary constraints. If the cell’s value is 1, the package in the same row as the cell will be assigned to currently analyzed PHA, if 0 – the package will not be assigned. Solver’s target is a cell with formula calculating the sum of products of multiplication the variables by number of packages in each group of packages. Solver’s task is to change the variables in such a way that the target value would be equal to the value calculated for currently analyzed PHA by formulas in the previous step. The tool also uses one column as its operative memory: the information concerning packages that were already assigned is stored.
there in order to avoid multiple assignment of the same package to several PHA. The structure of the main macro is presented in Table 1.
<table>
<thead>
<tr>
<th>Table 1. Structure of the main PESBAT macro</th>
</tr>
</thead>
<tbody>
<tr>
<td>- set the variables that will be used to navigate through spreadsheets</td>
</tr>
<tr>
<td>- main loop 1: further operations repeat until all types of packages are assigned</td>
</tr>
<tr>
<td>● choose the type of package to be assigned</td>
</tr>
<tr>
<td>● reset the operating memory</td>
</tr>
<tr>
<td>● main loop 2: further operations repeat for each of the photoanalysts</td>
</tr>
<tr>
<td>● select an analyst</td>
</tr>
<tr>
<td>● gather information on the packages that may be assigned</td>
</tr>
<tr>
<td>● reset Solver, initiate Solver and set its parameters</td>
</tr>
<tr>
<td>● assign packages</td>
</tr>
<tr>
<td>● check if assignment was successful, propose to change the target value if necessary</td>
</tr>
<tr>
<td>● check which packages were assigned, copy the information to the operating memory</td>
</tr>
<tr>
<td>● copy the data that allows identifying client’s workers to the dedicated spreadsheet</td>
</tr>
<tr>
<td>● copy the data on the size of the assigned packages to the technical spreadsheet</td>
</tr>
<tr>
<td>● go to the beginning of the main loop 2</td>
</tr>
<tr>
<td>● adjust the variables used for navigation</td>
</tr>
<tr>
<td>● reset the operating memory</td>
</tr>
<tr>
<td>- give information on how long package assignment took</td>
</tr>
</tbody>
</table>
Source: own work.
After the macro stops, the spreadsheet allows to check if the assignment was successful: additional table shows if sum of packages for assignment equals to the sum of the assigned packages. It also shows if all the partial sums (sums of packages of each type, sums of packages that each photoanalyst was assigned) are correct.
The last macro changes the formatting of the final spreadsheet, which contains the output of PESBAT: a table with columns for each of the analysts and types of packages, with listed personal data of client’s workers whose packages were assigned to a given analyst.
Control panel and spreadsheet “Output” are shown in Fig. 11.
The tool is automatic: its macros have procedures that clean the data that remained after previous package assignment; control buttons also stay in the same place after package assignment. PESBAT has to be updated only if some new types of packages or new photoanalysts are added to the project. The tool theoretically can assign packages from 200 client’s workers (that is the number of variables that Microsoft Excel Solver can handle), at present, there are around 60 client’s workers that create packages of photographs.
Manual package assignment takes on average 43 minutes. PESBAT helps to reduce this time to 17 minutes on average, so almost by 60%. Average running time of PESBAT is between 2 and 3 minutes, this time generally depends on the hardware that is used to run the tool.
PESBAT assigns packages in a way that is similar to how packages were assigned by manager: greater groups of packages are assigned to photoanalysts that can daily analyze more packages. It helps to avoid situations when some groups of packages remain unused.
Monthly tests of the tool showed that levels of diversity in groups of photoanalysts who analyzed packages coming from client’s workers were similar for manual and PESBAT-supported task assignment: the average number of analysts per client’s worker was around 9 (out of 10) PHA in both cases. However, this was mainly achieved due to randomness in packages’ import. The number of packages of each type from each client’s worker that are available for assignment each day are different.
6. Conclusions
The realization of the described business process, PESBAT tool development process and performed tests indicate that:
− companies in similar to QMK’s business field have a lot of areas for optimization (e.g. task assignment, creation of documentation, identifying risks and constraints that have to be taken into account to provide good quality services),
− sometimes managers see problems in organization’s functioning, but don’t know what methods and tools may be used to solve it. It may be caused by lack of knowledge and experience in spheres of process management or information technology,
− a lot of business problems that may be presented as a mathematical model can be solved using programming languages and solvers built in office suits like VBA and Excel Solver. Such tools give limited possibilities (e.g. Solver’s constraint of using maximally 200 variables) but they still may be successfully used to solve small and medium optimization problems,
it is not always easy to convince managers of the need for developing a tool for optimizing some process. The main reason for such problem is that implementation of such a tool generally creates additional costs. Another reason is a general reluctance to changes, inherent in every person. It may be useful to calculate potential costs and benefits, perform several tests and create a plan of development and implementation of such a tool to convince managers.
- the problem mentioned above may not occur if the tools are developed with the use of not expensive, well-known tools available in office suits. Using it even small companies are able to develop such a tool with minimal costs.
7. Future research
Development of PESBAT tool is not finished. At present, the tool only assigns three out of five types of packages, generally because one type of packages is always assigned to the manager, and packages of the other type are assigned to analysts basing on information concerning previous assignments.
After improving these areas, further automation of task assignment may be considered. Time that task assignment takes may be reduced even more by adding new functionalities for data acquisition and exporting final results to the B2B platform, or in other words by stronger PESBAT and B2B platform integration.
After these updates, more comparative tests of manual and semi-automatic task assignment will be performed to correctly assess positive and negative effects of using the PESBAT tool.
Interesting issue is also solving vehicle routing problem for QMK’s clients. Future PESBAT development will be performed to help to plan the number of packages that will be available for assignment each day. Such control may help to achieve even greater diversity in groups of photoanalysts who analyze packages coming from client’s workers.
References
Optymalizacja przypisania zadania za pomocą liniowego narzędzia programującego PESBAT
W artykule przedstawiono problem przypisywania zadań oraz rolę, jaką mogą odegrać w ich rozwijaniu nowoczesne narzędzia programowania liniowego. Problem przydziału zadań jest przypadkiem problemu przypisywania, będącym jednym z podstawowych problemów optymalizacji kombinatorycznej. Jego specyficzne sformułowania można znaleźć w logistyce (np. problem przypisywania sterowników), informatyce (np. w zarządzaniu pamięcią) oraz innych dziedzinach nauki i biznesu. Opracowano lub zaadaptowano różne metody i algorytmy, aby rozwiązać problem przydziału, a nowoczesne narzędzia do programowania i optymalizacji liniowej, takie jak Microsoft Excel Solver, które zawierają implementację tych algorytmów, zapewniają możliwość rozwiązywania różnorodnych przypadków problemu przydziału przy minimalnym wysiłku i czasie.
Główną częścią artykułu jest prezentacja opracowanego przez autorów narzędzia optymalizacyjnego VBA o nazwie PESBAT oraz przypadku optymalizacji procesu przydzielania zleceń w przedsiębiorstwie, które oferuje usługi fotoanalityczne. Pilotażowe wykorzystanie narzędzia PESBAT pokazało, że pozwala ono na skrócenie czasu potrzebnego na przypisanie zadań, zapewnia bardziej zrównoważony nakład pracy dla pracowników, ale także wskazało możliwości optymalizacji innych obszarów opisywanej firmy, takie jak optymalizacja trasy i przygotowanie raportów.
Task Assignment Optimization with the Use of PESBAT Linear Programming Tool
The article presents task assignment problem and the role that modern linear programming tools may play in its solving. Task assignment problem is a case of the assignment problem, which is one of the fundamental combinatorial optimization problems. Its specific formulations can be found in logistics (e.g. driver assignment problem), computer science (e.g. memory management) and other fields of science and business. Various methods and algorithms have been created or adapted to solve the assignment problem, and modern linear programming and optimization tools like Microsoft Excel Solver, which contain implementations of these algorithms, provide a possibility to solve diverse cases of the assignment problem with minimum effort and time.
The article addresses widely known task assignment problem in business, its sources and ways of solving or reducing its negative impact on business processes effectiveness. The paper contains also a general overview of modern linear programming tools that can be used for task assignment and describes Microsoft Excel Solver Add-in as a tool for business process optimization.
The main part of the article is a presentation of VBA based optimization tool called PESBAT developed by the authors and the case of task assignment process optimization in the enterprise that offers photoanalytical services. Pilot use of a PESBAT tool showed that it allows reducing the time needed for task assignment, gives more balanced workload for employees but also indicated the optimization possibilities of other areas of described company, like route optimization and reports preparation.
|
{"Source-Url": "https://www.journals.umcs.pl/h/article/download/7188/5383", "len_cl100k_base": 6069, "olmocr-version": "0.1.53", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 28629, "total-output-tokens": 6866, "length": "2e12", "weborganizer": {"__label__adult": 0.0006504058837890625, "__label__art_design": 0.0011663436889648438, "__label__crime_law": 0.0012502670288085938, "__label__education_jobs": 0.06219482421875, "__label__entertainment": 0.00021696090698242188, "__label__fashion_beauty": 0.0003786087036132813, "__label__finance_business": 0.0189666748046875, "__label__food_dining": 0.0009407997131347656, "__label__games": 0.001617431640625, "__label__hardware": 0.0019359588623046875, "__label__health": 0.0011472702026367188, "__label__history": 0.0008368492126464844, "__label__home_hobbies": 0.0006413459777832031, "__label__industrial": 0.006938934326171875, "__label__literature": 0.0008077621459960938, "__label__politics": 0.00054168701171875, "__label__religion": 0.0007281303405761719, "__label__science_tech": 0.2498779296875, "__label__social_life": 0.0005145072937011719, "__label__software": 0.2239990234375, "__label__software_dev": 0.420166015625, "__label__sports_fitness": 0.0005288124084472656, "__label__transportation": 0.0035572052001953125, "__label__travel": 0.0005574226379394531}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 29022, 0.01083]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 29022, 0.66658]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 29022, 0.87317]], "google_gemma-3-12b-it_contains_pii": [[0, 1447, false], [1447, 3567, null], [3567, 6403, null], [6403, 7776, null], [7776, 9364, null], [9364, 11091, null], [11091, 12866, null], [12866, 15594, null], [15594, 16265, null], [16265, 18227, null], [18227, 20951, null], [20951, 22677, null], [22677, 25439, null], [25439, 29022, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1447, true], [1447, 3567, null], [3567, 6403, null], [6403, 7776, null], [7776, 9364, null], [9364, 11091, null], [11091, 12866, null], [12866, 15594, null], [15594, 16265, null], [16265, 18227, null], [18227, 20951, null], [20951, 22677, null], [22677, 25439, null], [25439, 29022, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 29022, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 29022, null]], "pdf_page_numbers": [[0, 1447, 1], [1447, 3567, 2], [3567, 6403, 3], [6403, 7776, 4], [7776, 9364, 5], [9364, 11091, 6], [11091, 12866, 7], [12866, 15594, 8], [15594, 16265, 9], [16265, 18227, 10], [18227, 20951, 11], [20951, 22677, 12], [22677, 25439, 13], [25439, 29022, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 29022, 0.22449]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
34dfcde52db8ba10434bd06bb782a8e03ea41171
|
Efficient Dual-ISA Support in a Retargetable, Asynchronous Dynamic Binary Translator
Citation for published version:
Digital Object Identifier (DOI):
10.1109/SAMOS.2015.7363665
Link:
Link to publication record in Edinburgh Research Explorer
Document Version:
Peer reviewed version
Published In:
Embedded Computer Systems: Architectures, Modeling, and Simulation (SAMOS), 2015 International Conference on
General rights
Copyright for the publications made accessible via the Edinburgh Research Explorer is retained by the author(s) and / or other copyright owners and it is a condition of accessing these publications that users recognise and abide by the legal requirements associated with these rights.
Take down policy
The University of Edinburgh has made every reasonable effort to ensure that Edinburgh Research Explorer content complies with UK legislation. If you believe that the public display of this file breaches copyright please contact openaccess@ed.ac.uk providing details, and we will remove access to the work immediately and investigate your claim.
Efficient Dual-ISA Support in a Retargetable, Asynchronous Dynamic Binary Translator
Tom Spink, Harry Wagstaff, Björn Franke and Nigel Topham
Institute for Computing Systems Architecture
School of Informatics, University of Edinburgh
t.spink@sms.ed.ac.uk, h.wagstaff@sms.ed.ac.uk, bfranke@inf.ed.ac.uk, npt@inf.ed.ac.uk
Abstract—Dynamic Binary Translation (DBT) allows software compiled for one Instruction Set Architecture (ISA) to be executed on a processor supporting a different ISA. Some modern DBT systems decouple their main execution loop from the built-in Just-In-Time (JIT) compiler, i.e. the JIT compiler can operate asynchronously in a different thread without blocking program execution. However, this creates a problem for target architectures with dual-ISA support such as ARM/THUMB, where the ISA of the currently executed instruction stream may be different to the one processed by the JIT compiler due to their decoupled operation and dynamic mode changes. In this paper we present a new approach for dual-ISA support in such an asynchronous DBT system, which integrates ISA mode tracking and hot-swapping of software instruction decoders. We demonstrate how this can be achieved in a retargetable DBT system, where the target ISA is not hard-coded, but a processor-specific module is generated from a high-level architecture description. We have implemented ARM V5T support in our DBT and demonstrate execution rates of up to 1148 MIPS for the SPEC CPU 2006 benchmarks compiled for ARM/THUMB, achieving on average 192%, and up to 323%, of the speed of QEMU, which has been subject to intensive manual performance tuning and requires significant low-level effort for retargeting.
I. INTRODUCTION
The provision of a compact 16-bit instruction set architecture (ISA) alongside a standard, full-width 32-bit RISC ISA is a popular architectural approach to code size reduction. For example, some ARM processors (e.g. ARM7TDMI) implement the compact THUMB instruction set whereas MIPS has a similar offering called MIPS16E. Common to these compact 16-bit ISAs is that the processor either operates in 16-bit or 32-bit mode and switching between modes of operation is done explicitly through mode change operations, or implicitly through PC load instructions.
For instruction set simulators (ISS), especially those using dynamic binary translation (DBT) technology rather than instruction-by-instruction interpretation only, dynamic changes of the ISA present a challenge. Their integrated instruction decoder, part of the just-in-time (JIT) compiler translating from the target to the host system’s ISA, needs to support two different instruction encodings and keep track of the current mode of operation. This is a particularly difficult problem if the JIT compiler is decoupled from the main execution loop and, for performance reasons, operates asynchronously in a different thread as in e.g. [1] or [2]. For such asynchronously multi-threaded DBT systems, the ISA of the currently executed fragment of code may be different to the one currently processed by the JIT compiler. In fact, in the presence of a JIT compilation task farm [2], each JIT compilation worker may independently change its target ISA based on the encoding of the code fragment it is operating on. Most DBT systems [3], [4], [5], [6], [7], [8], [9], [1], [10], [11], [12], [13], [14], [15] avoid dealing with this added complexity and do not provide support for dual-ISAs at all. A notable exception is the ARM port of QEMU [16], which supports both ARM and THUMB instructions, but tightly couples its JIT compiler and main execution loop and, thus, misses the opportunity to offload the JIT compiler from the critical path to a separate thread.
The added complexity and possible performance implications of handling dual ISAs in DBT systems motivate us to investigate high-level retargetability, where low-level implementation and code generation details are hidden from the user. In our system ISA modes, instruction formats and behaviours are specified using a C-based architecture description language (ADL), which is processed by a generator tool that creates a dynamically loadable processor module. This processor module encapsulates the necessary ISA tracking logic, instruction decoder trees and target instruction implementations. Users of our system can entirely focus on the task of transcribing instruction definitions from the processor manual and are relieved of the burden of writing or modifying DBT-internal code concerning ISA mode switches.
In this paper we introduce a set of novel techniques enabling dual-ISA support in asynchronous DBT systems, involving ISA mode tracking and hot-swapping of software instruction decoders. The key ideas can be summarised as follows: First, for ISA mode tracking we annotate regions of code discovered during initial interpretive execution with their target ISA. This information cannot be determined purely statically. Second, we maintain separate instruction decoder trees for both ISAs and dynamically switch between software instruction decoders in the JIT compiler according to the annotation on the code region under translation. Maintaining two instruction decoder trees, one for each ISA, contributes to efficiency. The alternative solution, a combined decoder tree, would require repeated mode checks to be performed as opcodes and fields of both ISAs may overlap. Finally, we demonstrate how dual-ISA support can be integrated in a retargetable DBT system, where both the interpreter and JIT compiler, including their instruction decoders and code generators, are generated from a high-level architecture description. We have implemented full ARM V5T support, including complete coverage of THUMB instructions, in our retargetable, asynchronous DBT system and evaluated it against the SPEC CPU 2006 benchmark suite. Using the ARM port of the GCC compiler we have compiled the
A. Translation/Execution Models for DBT Systems
Before describing our contributions we review existing translation/execution models for DBT systems with respect to their ability to support target dual instruction sets.
1) Single-mode translation/execution model
a) Interpreter only. In this mode the entire target program is executed on an instruction-by-instruction basis. Strictly, this is not DBT as no translation takes place. It is straightforward to keep track of the current ISA as mode changes take immediate effect and the interpreter can handle the next instruction appropriately based on its current state (see Figure 1(a)). ISS using interpretative execution such as SIMPLESCALAR [9] or ARMISS [15] have low implementation complexity, but suffer from poor performance.
b) JIT only. Interpreter-less DBT systems exclusively rely on JIT compilation to translate every target instruction to native code before executing this code. As a consequence, execution in this model will pause as soon as previously unseen code has been discovered and only resume after JIT compilation has completed. ISA mode changes take immediate effect (see Figure 1(b)) and are again simple to implement as native code execution and JIT compilation stages are tightly coupled and mutually exclusive. JIT-only DBT systems are of low complexity and provide better performance than purely interpreted ones, but rely on very fast JIT compilers, which in turn will often perform very little code optimisation. This and the fact that the JIT compiler is on the critical path of the main execution loop within a single thread limits the achievable performance. QEMU [16], STRATA [6], [7], [8], SHADE [4], SPIRE [17], and PIN [18] are based on this model.
2) Mixed-mode translation/execution model
a) Synchronous (single-threaded). This model combines both an interpreter and a JIT compiler in a single DBT (see Figure 1(c)). Initially, the interpreter is used for code execution and profiling. Once a region of hot code has been discovered, the JIT compiler is employed to translate this region to faster native code. The advantage of a mixed-mode translation/execution model is that only profitable program regions are JIT translated, whereas infrequently executed code can be handled in the interpreter without incurring JIT compilation overheads [19]. Due to its synchronous nature ISA tracking is simple in this model: the current machine state is available in the interpreter and can be used to select the appropriate instruction decoder in the JIT compiler. As before, the JIT compiler operates in the same thread as the main execution loop and program execution pauses whilst code is translated. This limits overall performance, especially during prolonged code translation phases. A popular representative of this model is DYNAMO [20].
b) Asynchronous (multi-threaded). This model is characterised by its multi-threaded operation of the main execution loop and JIT compiler. Similar to the synchronous mixed-mode case, an interpreter is used for initial code execution and discovery of hotspots. However, in this model the interpreter enqueues hot code regions to be translated by the JIT compiler and continues operation without blocking (see Figure 1(d)). As soon as the JIT compiler installs the native code the execution mode switches over from interpreted to native code execution. Only in this model is it possible to leverage concurrent JIT compilation on multi-core host machines, hiding the latency of JIT compilation and, ultimately, contributing to higher performance of the DBT system [1], [2]. Unfortunately, this model presents a challenge to implementing dual-ISA support: the current machine state represented in the interpreter may have advanced due to its concurrent operation and cannot be used to appropriately decode target instructions in the JIT compiler.
In summary, decoupling the JIT compiler from main execution loop and offloading it to a separate thread has been demonstrated to increase performance in multi-threaded DBT systems. However, it remains an unsolved problem how to efficiently handle dynamic changes of the target ISA without tightly coupling the JIT compiler and, thus, losing the benefits of its asynchronous operation.
B. Motivating Example
The nature of the ARM and THUMB instruction set is such that it is not possible to statically determine from the binary encoding alone which ISA the instruction is part of. This becomes even more important when it is noted that ARM instructions are 32-bit in length, and THUMB instructions are...
16-bit. For example, consider the 32-bit word e2810006. An ARM instruction decoder would decode the instruction as:
```
add r0, r1, #6
```
whereas, a THUMB instruction decoder would consider the above 32-bit word as two 16-bit words, and would decode as the following two THUMB instructions:
```
mov r6, r0
b.n +4
```
An ARM processor correctly decodes the instruction by being in one of two dynamic modes: ARM or THUMB.
A disassembler, given a sequence of instructions, has no information about what ISA the instructions belong to, and can therefore not make the distinction between ARM and THUMB instructions on a raw instruction stream, and must use debugging information provided with the binary to perform disassembly. If the debugging information is not available (e.g. it has been “striped” from the binary) then the disassembler must be instructed how to decode the instructions (assuming the programmer knows), and if the instructions are mixed-mode, then it will not be able to effectively decode at all. This problem for disassemblers directly translates to the same problem in any DBT with multi-ISA support. A DBT necessarily works on a raw instruction stream – without debugging information – and therefore must use its own mechanisms to correctly decode instructions. In the example of an ARM/THUMB DBT, it may choose to simulate a THUMB status bit as part of the CPSR register existent in the ARM architecture (see Section II), and therefore use the information within the register to determine how the current instruction should be decoded. But as mentioned in Section I-A, this approach does not work in the context of an asynchronous JIT compiler, as the state of the CPSR within the interpreter would be out of sync with the compiler during code translation.
C. Overview
The remainder of this paper is structured as follows. We review the dual ARM/THUMB ISA as far as relevant for this paper in Section II. We then introduce our new methodology for dual-ISA DBT support in Section III. This is followed by the presentation of our experimental evaluation in Section IV and a discussion of related work in Section V. Finally, in Section VI we summarise our findings and conclude.
II. BACKGROUND: ARM/THUMB
THUMB is a compact 16-bit instruction set supported by many ARM cores in addition to their standard 32-bit ARM ISA. Internally, narrow THUMB instructions are decoded to standard ARM instructions, i.e. each THUMB instruction has a 32-bit counterpart, but the inverse is not true. In THUMB mode only 8 out of the 16 32-bit general-purpose ARM registers are accessible, whereas in ARM mode no such restrictions apply. The narrower 16-bit instructions offer memory advantages such as increased code density and higher performance for systems with slow memory. The Current Program Status Register (CPSR) holds the processor mode (user or exception flag), interrupt mask bits, condition codes, and THUMB status bit. The THUMB status bit (T) indicates the processor’s current state: 0 for ARM state (default) or 1 for THUMB. A saved copy of CPSR, which is called Saved Program Status Register (SPSR), is for exception mode only. The usual method to enter or leave the THUMB state is via the Branch and Exchange (BX) or Branch, Link, and Exchange (BLX) instructions, but nearly every instruction that is permitted to update the PC may make a mode transition. During the branch, the CPU examines the least significant bit (LSB) of the destination address to determine the new state. Since all ARM instructions are aligned on either a 32- or 16-bit boundary, the LSB of the address is not used in the branch directly. However, if the LSB is 1 when branching from ARM state, the processor switches to THUMB state before it begins executing from the new address; if 0 when branching from THUMB state, the processor changes back to ARM state. The LSB is also set (or cleared) in the LR to support returning from functions that were called from a different mode. When an exception occurs, the processor automatically begins executing in ARM state at the address of the exception vector, even if the CPU is running in THUMB state when that exception occurs. When returning from the processor’s exception mode, the saved value of T in the SPSR register is used to restore the state. This bit can be used, for example, by an operating system to manually restart a task in the THUMB state – if that is how it was running previously.
III. METHODOLOGY: DUAL-ISA DBT SUPPORT
The DBT consists of an execution engine and a compilation engine. The execution engine will execute either native code (which has been generated from instructions by the compilation engine) or will execute instructions in an interpreter loop. The execution engine interpreter will also generate profiling data to pass to the compilation engine (see Figure 2). The execution engine maintains a machine state structure, within
from the decoder cache), the current mode is looked up from the state variable and sent to the decoder service, which then decodes the instruction using the correct ISA decode tree. If an instruction causes a CPU ISA mode change to occur (for example, in the case of the ARM architecture, a BT instruction) then the CPU state will be updated accordingly. Since the decoder service is a detached component, and may be called by a thread other than the main execution loop, it cannot (and should not) access the CPU state, and therefore must be instructed by the calling routine which ISA mode to use. Additionally, since a JIT compiler thread does not operate in sync with the execution thread, it also cannot access the CPU state and must call the decoder service with the ISA mode information supplied in the metadata of the basic-block it is currently compiling. A basic-block can only contain instructions of one ISA mode. This metadata is populated by the profiling element of the interpreter (see Figure 2). In order to remain retargetable (and therefore target hardware agnostic), the ISA mode is a first-class citizen in the DBT framework (see Figure 3), and is not tied to a specific architecture’s method of handling multiple ISAs. For example, the ARM architecture tracks the current ISA mode by means of the T bit in the CPSR register.
B. Hotswapping Software Instruction Decoders
The instruction decoder is implemented as a separate component, or service, within the DBT and as such is called by any routine that requires an instruction to be decoded. Such routines would be the interpreter, when a decoder cache miss occurs, and a JIT compilation thread, when an instruction is being translated. Upon such a request being made, the decoder must be provided with the PC from which to fetch the instruction, and the ISA that the instruction should be decoded with. Given this information, as part of a decoding request, the decoder service can then make a correctly sized fetch from the guest systems memory, and select the correct decoder tree with which to perform the decode of the instruction.
The interpreter will perform the decode request using the current machine state, available as part of the execution engine, and a JIT compilation thread will perform the decode request using the snapshot of the machine state provided as part of the compilation work unit (see Figure 4).
C. High-Level Retargetability
We use a variant of the ARCHC [21] architecture description language (ADL) for the specification of the target architecture, i.e. architecturally visible registers, instruction formats and behaviours. A simplified example of our ARM V5T model is shown in Listing 1. Please note the declaration of the two supported ISAs in lines 18–19, where the system is made aware of the presence of the two target ISAs and the ARM ISA is set as a default. Within the constructor in lines 25–26 we include the detailed specifications for both supported ISA.
After the top-level model (describing register banks, registers, flags and other architectural components) has been defined, details of both supported ISAs need to be specified. Simplified examples of the ARM and THUMB ISA models are shown in Listings 2 and 3 in Figure 5. For each ISA we need to provide its name (line 4) and fetch size (line 5) (of which instruction words are multiples of). This is followed by a specification of instruction formats present in the ARM and THUMB ISAs (lines 7–11) before each instruction is assigned exactly one of the previously defined instruction formats (lines 13–17). The main sections of the instruction definitions (starting in lines 21 and 20, respectively) describe the instruction patterns for decoder generation (lines 24 and 23), their assembly patterns for disassembly (lines 25 and 24) and names of functions that implement the actual instruction semantics, also called behaviours (lines 27 and 25).
In an offline stage, we generate a target-specific processor module (see Figure 3) from this processor and ISA description. In particular, the individual decoder trees (see Figure 4) for both the ARM and THUMB ISAs are generated from an ARCHC-like specification using an approach based on [22], [23]. Note that we use ARCHC as a description language only, and do not use or implement any of the existing ARCHC tools.
The benefit of choosing to use ARCHC as the description language is that it is well-known in the architecture design field, and descriptions exist for a variety of real architectures.
the profiling information (which includes a control-flow graph) encountered and after a certain configurable threshold is met, instructions as it is encountered. As the interpreter executes, instructions are used directly by the interpreter to execute each and code size of the generated code.
Fig. 6: Comparison: Semantic action of an ARM V5 adc instruction expressed as using high-level ARCHC-like specification (top) and low-level QEMU implementation (bottom).
unnecessary runtime decoding checks (such as flag setting). The generated processor module is dynamically loaded by our DBT system on startup and contains both a threaded interpreter and an LLVM based JIT compiler. At runtime the JIT compiler performs translation of regions of target instructions [2] to native code of the host machine using the offline generated generator functions, which employ additional dynamic optimisations such as partial evaluation [24] to improve both quality and code size of the generated code.
As the high-level implementation of the instructions are written in a strict subset of C, the behaviours for each instruction are used directly by the interpreter to execute each instruction as it is encountered. As the interpreter executes, it builds profiling information about the basic blocks it has encountered and after a certain configurable threshold is met, the profiling information (which includes a control-flow graph)
1. Data processing instructions can only operate on the first eight registers \( (x: 0 \text{ to } x: 7) \) - data must be explicitly moved from the high registers to the low registers.
2. No THUMB instructions (except for the conditional branch instruction) are predicated, and therefore local branches around conditional code must be made, in contrast to ARM where blocks of instructions can be simply marked-up with the appropriate predicate to exclude them from execution.
The optimisation strategies employed in our DBT system remove a lot of this overhead, local branches (i.e. branches within a region) are heavily optimised using standard LLVM optimisation passes and high-register operations are negated through use of redundant-load and dead-store elimination.
C. Dynamic ISA Switching
Our results on dynamic ISA switching are summarised in Table III. For each benchmark we list the total number of ARM instructions, ARM/THUMB instructions, ISA switches and average dynamic instruction count between ISA switches. All benchmarks make use of both the ARM and THUMB ISAs. On average 8.76\% of the total number of instructions are ARM, the rest THUMB instructions, but this figure varies significantly between benchmarks. 401.bzip2 and 429.mcf have similar ratios of THUMB instructions (both have approximately 99\%) but quite different relative performance characteristics. 429.mcf executes 3\% slower in dual-ISA mode, where 401.bzip2 executes 16\% faster. This kind of variance indicates that our DBT supporting a dual-ISA does not necessarily introduce any overhead, but is simply a function of the behaviour of the binary being translated.
D. Comparison to State-of-the-Art
Figure 8 shows the absolute performance in target MIPS of our DBT compared with the state-of-the-art QEMU. The performance of our DBT system is consistently higher than that of QEMU, on average our DBT is 192\% faster for dual-ISA implementations. Since the target instruction count is exactly the same between DBTs (per benchmark), this also indicates an improvement in DBT running time. We can attribute this to the ability of our JIT compiler to produce highly optimised native code, using aggressive LLVM optimisations that simply do not (and cannot, given the trace-based architecture) exist in QEMU. We employ a region-based compilation strategy,
enabling control-flow within a region to be subject to a series of loop optimisations. Our ability to hide compilation latency by means of offloading JIT compilation to multiple threads also provides a performance gain, as we are continuously executing target instructions, in contrast to QEMU which stalls as it discovers and compiles new code. The high-level code used to describe instruction implementations enables easy debugging, testing and verification, and we have internal tools that can automatically generate and run tests against reference hardware. In contrast, QEMU has a single large file that contains the decoder and the code generator, with limited documentation and no explanation of how instructions are decoded – or how their execution is modelled. Using our system, once the high-level code has been written, any improvements in the underlying framework (or even the processor module generator, see figure 3) are immediately available to all architecture descriptions, and if errors are detected in the decoder or instruction behaviours, it only requires correcting once in high-level code to fix in both the JIT and interpretive component.
E. Comparison to Native Execution
Figure 9 shows the absolute performance in target MIPS of our DBT compared with execution on a native ARM platform (QUALCOMM DRAGONBoard featuring four SNAPDRAGON 800 cores). On average, we are 31% slower than native execution for dual-ISA implementation, but there are some cases where our simulation is actually faster than the native execution on a 1.7GHz out-of-order ARM core. For example, 429.mcf is 3.1x faster in our DBT, compared to executing natively. This may be attributed to 429.mcf warming up quite quickly in our JIT, and spending the remaining time executing host-optimised native code. Conversely, 403.gcc is 2.2x slower than native in our DBT, which may be attributed to 403.gcc’s inherently phased behaviour, and therefore invoking multiple JIT compilation sessions throughout the lifetime of the benchmark.
V. RELATED WORK
DAISY [3] is an early software dynamic translator, which uses POWERPC as the input instruction set and a proprietary VLIW architecture as the target instruction set. It does not provide for dual-mode ISA support. SHADE [4] and EMBRA [5] are DBT systems targeting the SPARC V8/V9 and MIPS I ISAs, but neither system provides support for a dual-mode ISA. STRA-TA [6], [7] is a retargetable software dynamic translation infrastructure designed to support experimentation with novel applications of DBT. STRATA has been used for a variety of applications including system call monitoring, profiling, and code compression. The STRATA-ARM port [8] has introduced a number of ARM-specific optimisations, for example, involving reads of and writes to the exposed PC. STRATA-ARM targets the ARM V5T ISA, but provides no support for THUMB instructions. The popular SIMPLESCALAR simulator [9] has been ported to support the ARM V4 ISA, but this port is lacking support for THUMB. The SIMIT-ARM simulator can asynchronously perform dynamic binary translation (using GCC, as opposed to an in-built translator), and accomplish this by dispatching work to other processor cores, or across the network using sockets [1]. It does not, however, support the THUMB instruction set – nor does it intend to in the near future. XTREM [10] and XEEMU [11] are a power and performance simulators for the INTEL XSCLAE core. Whilst this core implements the ARM V5TE ISA, THUMB instructions are neither supported by XTREM or XEEMU. FACSIM [12] is an instruction set simulator targeting the ARM9E-S family of cores, which implement the ARM V5TE architecture. FACSIM employs DBT technology for instruction-accurate simulation and interpretive simulation in its cycle-accurate mode. Unfortunately, it does not support THUMB instructions in either mode. SYNTESIM [13] is a portable functional simulator generated from a high-level architectural description. It supports the ALPHA ISA,
but provides no support for mixed-mode instruction sets. SIMICS/ARM [14] has a fairly complete implementation of the core ARM v5 instruction set. The THUMB and enhanced DSP extensions are not implemented, though. ARMSS [15] is an interpretive simulator of the ARM920T architecture, which uses instruction caching but provides no THUMB support. Similarly, the ARM port of the popular PIN tool does not support THUMB extensions [26]. As outlined above, none of the ARM DBTs mentioned support the THUMB instruction set, and others do not offer any form of multiple-ISA support specific to their target platform. This could indicate that the problem of supporting multiple instruction sets may have been deemed too complex to be worth implementing, or not yet even considered.
QEMU [16] is a well-known retargetable emulator that supports ARM V5T platforms, including THUMB instructions. QEMU translates ARM/THUMB instructions to native x86 code using its tiny code generator (TCG). QEMU is interpreter-less, i.e. all executed code is translated. In particular, this means that TCG is not decoupled from the execution loop, but execution stops whilst code is JIT-compiled and only resumes afterwards. This design decision avoids the challenges outlined in this paper, but it places the JIT compiler on the critical path for code execution and misses the opportunity to offload the JIT compiler to another core of the host machine [27], [2], [28]. Another mixed-ISA simulator is presented in [29], however, this is based entirely on interpretive execution with instruction caching and about two orders of magnitude slower than either QEMU or our DBT system. ARM provides the ARMULATOR [30] and FAST MODELS [31] ISS. ARMULATOR is an interpretive ISS and has been replaced by JIT compilation-based FAST MODELS, which supports THUMB and operates at speeds comparable to QEMU-ARM, but no internal details are available due to its proprietary nature. LISA is a Hardware Description Language aimed at describing “programmable architectures, their peripherals and interfaces”. The project also produces a series of tools that accept a LISA definition and produce a toolchain consisting of compilers, assemblers, linkers and an instruction set simulator. The simulator produced is termed a JIT-CCS (just-in-time cache compiled simulator) [32] and is a synchronous JIT-only simulator, which compiles and executes on an instruction-by-instruction basis, caching the results of the compilation for fast re-use. However, each instruction encountered is not in fact compiled as such, but rather linked to existing pre-compiled instruction behaviours as they are encountered. These links are placed in a cache, indexed by instruction address and are tagged with the instruction data. This arrangement supports self-modifying code and arbitrary ISA mode switches, as when a cache lookup occurs, the tag is checked to determine if the cached instruction is for the correct mode, and that it is equal to the one that is about to be executed. In contrast to our asynchronous approach, the simulator knows which ISA mode the emulated processor is currently in at instruction execution time and if a cache miss occurs, it can use the appropriate instruction decoder at that point to select the pre-compiled instruction implementation. As our decode and compilation phase is decoupled from the execution engine, we cannot use this method to select which decoder to use. The main drawback to this approach is that it is not strictly JIT-compilation, but rather JIT-selection of instruction implementations, and hence no kind of run-time optimisation is performed, especially since the simulation engine executes an instruction at a time. This is in contrast to our approach, which compiles an entire region of discovered guest instructions at a time, and executes within the compiled region of code. Furthermore, the instructions are only linked to behaviours, and so specialisation of the behaviours depending on static instruction fields cannot occur, resulting in greater overhead when executing an instruction. Our partial evaluation approach to instruction compilation removes this source of overhead entirely. A commercialisation of the LISA tools is available from Synopsys as their Processor Designer offering, but limited information about the implementation of
the simulators produced is available for this proprietary tool, other than an indication that it employs the same strategy as described above.
VI. SUMMARY AND CONCLUSIONS
Asynchronous mixed-mode DBT systems provide an effective means to increase JIT throughput and, at the same time, hide compilation latency, enabling the use of potentially slower, yet highly optimising code generators. In this paper we have developed a novel methodology for integrating dual-ISA support to a retargetable, asynchronous DBT system: No prior asynchronous DBT system is known to provide any support for mixed-mode ISAs. We introduce ISA mode tracking and hot-swatting of software instruction decoders as key enablers to efficient ARM/THUMB emulation. We have evaluated our approach against the SPEC CPU2006 integer benchmark suite and demonstrate that our approach to dual-ISA support does not introduce any overhead. For an ARM V5T model generated from a high-level description our retargetable DBT system operates at 780 MIPS on average. This is equivalent to about 192% of the performance of state-of-the-art QEMU-ARM, which has seen years of manual tuning to achieve its performance and is one of the very few DBT systems that provides both ARM and THUMB support.
REFERENCES
|
{"Source-Url": "https://www.research.ed.ac.uk/portal/files/19957879/main.pdf", "len_cl100k_base": 6946, "olmocr-version": "0.1.53", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 42133, "total-output-tokens": 9692, "length": "2e12", "weborganizer": {"__label__adult": 0.0006589889526367188, "__label__art_design": 0.0006604194641113281, "__label__crime_law": 0.0005879402160644531, "__label__education_jobs": 0.0005283355712890625, "__label__entertainment": 0.0001322031021118164, "__label__fashion_beauty": 0.0002875328063964844, "__label__finance_business": 0.0003771781921386719, "__label__food_dining": 0.0005726814270019531, "__label__games": 0.0013065338134765625, "__label__hardware": 0.018035888671875, "__label__health": 0.00075531005859375, "__label__history": 0.0005931854248046875, "__label__home_hobbies": 0.0001876354217529297, "__label__industrial": 0.0015745162963867188, "__label__literature": 0.00029850006103515625, "__label__politics": 0.0005011558532714844, "__label__religion": 0.0010738372802734375, "__label__science_tech": 0.20556640625, "__label__social_life": 7.230043411254883e-05, "__label__software": 0.00835418701171875, "__label__software_dev": 0.7548828125, "__label__sports_fitness": 0.0005626678466796875, "__label__transportation": 0.00186920166015625, "__label__travel": 0.00035071372985839844}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 41300, 0.03089]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 41300, 0.3962]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 41300, 0.8966]], "google_gemma-3-12b-it_contains_pii": [[0, 1376, false], [1376, 7314, null], [7314, 11895, null], [11895, 16809, null], [16809, 19208, null], [19208, 21337, null], [21337, 22760, null], [22760, 25105, null], [25105, 29101, null], [29101, 33446, null], [33446, 41300, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1376, true], [1376, 7314, null], [7314, 11895, null], [11895, 16809, null], [16809, 19208, null], [19208, 21337, null], [21337, 22760, null], [22760, 25105, null], [25105, 29101, null], [29101, 33446, null], [33446, 41300, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 41300, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 41300, null]], "pdf_page_numbers": [[0, 1376, 1], [1376, 7314, 2], [7314, 11895, 3], [11895, 16809, 4], [16809, 19208, 5], [19208, 21337, 6], [21337, 22760, 7], [22760, 25105, 8], [25105, 29101, 9], [29101, 33446, 10], [33446, 41300, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 41300, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
e573913aa932b1c13bfdb81018ce12a819c3a181
|
fv3gfs-wrapper: a Python wrapper of the FV3GFS atmospheric model
Jeremy McGibbon\textsuperscript{1}, Noah D. Brenowitz\textsuperscript{1}, Mark Cheeseman\textsuperscript{1}, Spencer K. Clark\textsuperscript{1,2}, Johann Dahm\textsuperscript{1}, Eddie Davis\textsuperscript{1}, Oliver D. Elbert\textsuperscript{1,2}, Rhea C. George\textsuperscript{1}, Lucas M. Harris\textsuperscript{2}, Brian Henn\textsuperscript{1}, Anna Kwa\textsuperscript{1}, W. Andre Perkins\textsuperscript{1}, Oliver Watt-Meyer\textsuperscript{1}, Tobias Wicky\textsuperscript{1}, Christopher S. Bretherton\textsuperscript{1,3}, and Oliver Fuhrer\textsuperscript{1}
\textsuperscript{1}Vulcan Inc., Seattle, WA
\textsuperscript{2}Geophysical Fluid Dynamics Laboratory, NOAA, Princeton, NJ
\textsuperscript{3}Department of Atmospheric Sciences, University of Washington, Seattle, WA
Correspondence: Jeremy McGibbon (mcgibbon@uw.edu)
Abstract. Simulation software in geophysics is traditionally written in Fortran or C++ due to the stringent performance requirements these codes have to satisfy. As a result, researchers who use high-productivity languages for exploratory work often find these codes hard to understand, hard to modify, and hard to integrate with their analysis tools. \texttt{fv3gfs-wrapper} is an open-source Python-wrapped version of the NOAA (National Oceanic and Atmospheric Administration) FV3GFS (Finite-Volume Cubed Sphere Global Forecasting System) global atmospheric model, which is coded in Fortran. The wrapper provides simple interfaces to progress the Fortran main loop and get or set variables used by the Fortran model. These interfaces enable a wide range of use cases such as modifying the behavior of the model, introducing online analysis code, or saving model variables and reading forcings directly to and from cloud storage. Model performance is identical to the fully-compiled Fortran model, unless routines to copy state in and out of the model are used. This copy overhead is well within an acceptable range of performance, and could be avoided with modifications to the Fortran source code. The wrapping approach is outlined and can be applied similarly in other Fortran models to enable more productive scientific workflows.
1 Introduction
FV3GFS (Finite-Volume Cubed Sphere Global Forecasting System) (Zhou et al., 2019) is a prototype of the operational Global Forecast System of the National Centers for Environmental Prediction. In this document when we say FV3GFS we are referring specifically to the atmospheric component of the U. S. National Oceanic and Atmospheric Administration (NOAA) Unified Forecast System (UFS, https://ufscommunity.org/) for operational numerical weather prediction. We forked this code from the v1 branch of the UFS model in December 2019. It uses the Geophysical Fluid Dynamics Laboratory (GFDL) Finite-Volume Cubed-Sphere Dynamical Core (FV3). FV3 solves the non-hydrostatic equations of atmospheric motion discretized on a cubed sphere using a finite volume scheme on a terrain-following grid with D-grid wind staggering (Putman and Lin, 2007; Harris and Lin, 2013). The model is written in Fortran (Global Engineering Documents, 1991) and parallelized using a hybrid OpenMP (Open Multi-Processing, OpenMP Architecture Review Board (2020)) / MPI (Message Passing Interface, Message Passing Interface Forum (2015)) approach, which allows for performant execution through compilation.
Figure 1. Schematic of Fortran-centric workflow using the filesystem to transfer data to Python user code. Arrowheads indicate the direction of the model main loop, as well as data transfer out of the Fortran model and into the Python user script.
However, development of an atmospheric model using a low-level, strongly typed programming language with a small user base has trade-offs. Libraries for interacting with cloud storage, performing physical or statistical analysis, and using machine learning are not as readily available or widely used in languages like Fortran as they are in high-level languages such as Python. A Python interface to the compiled Fortran code can enable a much larger user base to interact with this code, and allow a large ecosystem of Python tools to be interfaced with model routines.
Python is often integrated into Fortran modeling workflows as a post-processing tool, as shown in Figure 1. In this workflow, Python is used to perform computations on data saved to the filesystem by the Fortran model. This approach has several shortcomings. It is rarely feasible to store the full-resolution model state at each model time step, so often statistics over time are stored instead. Unless sufficiently frequent snapshots are stored, computing new statistics directly from full-resolution instantaneous fields requires writing Fortran code to run in the model. This can be an issue if developer documentation is not available or the user is not familiar with Fortran. This approach requires writing to disk before data can be used in Python, which may be unnecessary if the written data is not a necessary end product. Such filesystem operations can be a significant bottleneck in computation time. This approach also does not provide a way to use Python libraries when modifying the behavior of the Fortran model, as any logic after the data is read from disk must be written in Fortran. Instead, machine learning practitioners port machine learning routines to Fortran using models that have been trained and saved using Python (Ott et al., 2020; Curcic, 2019).
In this work, we present a Python wrapper for the FV3GFS global atmospheric model. As shown in Figure 2, the FV3GFS model is compiled as a shared library with wrapper routines that provide an API to control and interact with the model. At the core of any weather or climate model is the main integration loop, which integrates the model state forward by a period if time. The wrapper splits the simple model main loop into a sequence of subroutines that can be called from Python. This allows the main loop to be written in Python, through calls to each section of the Fortran main loop (step_dynamics, step_physics). Furthermore, it allows copying variables into (set_state) or out of (get_state) the Fortran runtime environment, so it can be used in Python functions that can affect the integration of the Fortran model state. Data retrieved with get_state includes units information, for ease of debugging and for reference on data written to disk.
Figure 2. Schematic of Python-centric workflow using fv3gfs-wrapper, showing how it can interface with Python libraries during model execution. Arrowheads indicate data transfer between user Python code and Fortran model.
As the wrapper currently stands, configuration is deferred entirely to the Fortran model code. The only change in initialization is that MPI is initialized by mpi4py, after which the MPI communicator is passed as a Fortran handle to the model initialization routines. This allows us to maintain feature completeness with the existing Fortran model, without re-writing configuration logic.
This Python-centric workflow enables a fundamentally different way to integrate tools available in the Python ecosystem into a Fortran modeling workflow. A user can add online diagnostic code after a physics or dynamics step, and perform input or output (I/O) to or from cloud resources. The model state can be reset to a previous one, allowing sensitivity studies to be run online. Custom logic can be added to the main loop after a physics or dynamics step, such as a machine learning corrector parameterization or nudging to a cloud-based forcing dataset. The use of a Python main loop makes it significantly easier to integrate custom I/O, diagnostic routines, and physical parameterizations into the model.
This ease of integration is an important tool when developing parameterizations using machine learning. When developing such schemes, offline performance does not guarantee performance when run online within a model. However it can be difficult to rapidly train a model in a language with machine learning libraries such as Python and then convert it for use in Fortran. Solutions have so far been based on Fortran executables, either by calling Python from Fortran (Brenowitz and Bretherton, 2019) or by re-implementing neural network codes in Fortran (Ott et al., 2020; Curcic, 2019). Because of the strong tooling available for machine learning in Python, it is advantageous to be able to include Python machine learning code within the atmospheric model. Presently Python code can only be integrated outside of the dynamics and physics routines, and not within the physics suite. Adding flexibility to introduce Python code between individual physics schemes remains a subject for future work.
This is not the first time Python and high-performance compiled programs have been combined. qtcm (Lin, 2009) applies a similar wrapping approach to a quasi-equilibrium tropical circulation model using f2py (Peterson, 2009), an automated Fortran to Python interface generator. PyCLES (Pressel et al., 2015) is a full large-eddy simulation written in Cython, a variant of Python that compiles to C code and can interoperate with Python codes. CliMT (Monteiro et al., 2018) wraps Fortran model components into Python objects that can be composed to define a model main loop in Python. In astronomy, Python
computational codes such as nbodykit (Hand and Feng, 2017) run using numpy (Harris et al., 2020) and MPI for Python (Dalcín et al., 2008), and are shown to scale to thousands of ranks. These previous works provide confidence that a model using Python to call compiled code can provide the level of scaling and performance required for atmospheric and climate science research.
A consideration in designing new atmospheric models is the large amount of legacy Fortran code already available. As a consequence, new model components are often written in Fortran so they can interface with such legacy code. Efforts to re-write existing Fortran models (for example, to run on Graphics Processing Unit (GPU) architectures), can benefit from the ability to progressively replace existing components with refactored or re-written codes in other languages.
To motivate the design choices made in this work, we present our main priorities: retain existing functionality of the Fortran model, minimal sacrifice of performance, a main time stepping loop which is easy to understand and modify, and minimal changes to Fortran code.
Most of these priorities clearly come from our focus on improving model accessibility for researchers interested in modifying the behavior of the Fortran code. They would benefit from retaining the existing functionality they would like to modify, and they should be able to easily understand how the code can be modified. They may require efficient model performance on high-performance computers for research problems using higher-resolution simulations. By minimizing the needed changes to the Fortran code, we can reduce the effort required to switch to a new Fortran model version.
While this wrapper has many applications, we will focus on illustrative scenarios relevant to our own FV3GFS model development work. In addition to reproducing the existing model behavior, we will show how to: augment the Fortran model with a machine learning parameterization, include custom MPI communication as part of online diagnostic code, and perform online analysis in a Jupyter notebook.
We will begin by showing in Section 2 how fv3gfs-wrapper can be used to reproduce, bit-for-bit, the results of the existing Fortran model. We will then show in Section 3 how fv3gfs-wrapper enables each of these use cases while achieving our priorities of performance, ease of understanding, and ease of modification. Having presented the features of fv3gfs-wrapper by example, we will delve more deeply into their implementation in Section 4. Finally, we will discuss some of the challenges encountered in designing and implementing fv3gfs-wrapper in Section 4.5 before drawing our conclusions in Section 5.
2 Validation
For completeness and testing, fv3gfs-wrapper should be able to reproduce, bit-for-bit, the results of the Fortran model. This allows us to test the logic wrapping the Fortran code. Because the wrapper executes Fortran code identical to the original Fortran model, bit-for-bit regression on one parameter configuration or forcing dataset gives us confidence the code can be used for any parameter configuration or forcing dataset. The implementation of this use case is as follows:
```python
import fv3gfs.wrapper
if __name__ == '__main__':
fv3gfs.wrapper.initialize()
for i in range(fv3gfs.wrapper.get_step_count()):
```
Table 1. Run times of examples and compiled Fortran model. Baseline refers to reproducing existing Fortran behavior. Examples were run for 6 hours of simulation time at C48 resolution on 6 processors on a 2019 Macbook Pro. Each example was run three times, and the shortest time is reported.
<table>
<thead>
<tr>
<th>Example</th>
<th>Run time (s)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Fortran baseline</td>
<td>110</td>
</tr>
<tr>
<td>Wrapper baseline</td>
<td>110</td>
</tr>
<tr>
<td>Random Forest</td>
<td>116</td>
</tr>
<tr>
<td>Minimum Surface Pressure</td>
<td>110</td>
</tr>
</tbody>
</table>
The existing main routine in `coupler_main.f90` separates relatively cleanly into five routines: one each to initialize and finalize the model, one for dynamics (resolved fluid flow), one for physics (subgrid-scale processes), and one that will write intermediate restart data if intermediate restart files are enabled for the run and if we should write a restart on the current timestep. Each of these Python routines calls that section of the Fortran code, and then returns to a Python context.
The overhead of the Python time step loop and the wrapper functions is negligible in comparison to the computation done within a process (Table 1), meeting our performance goal. The conciseness of the main loop makes it easy to understand what the code is doing at a high level. This example is easy to modify, as shown in the use cases in the next section.
This code and the command-line examples below are available in the examples/gmd_timings directory of the git repository for `fv3gfs-wrapper` as referenced in the Code and data availability statement, using a 6-hour C48 run directory available as McGibbon et al. (2021d). The timings for each of these examples are included in Table 1. We can see the wrapper does not add significant overhead to the Fortran baseline timing.
3 Use cases in action
All examples discussed in this section are included in the public repository for `fv3gfs-wrapper` linked in the Code and data availability statement. We encourage the reader to download and run these examples on their own computer, using the example run directory available as McGibbon et al. (2021d).
3.1 Augmenting the model with machine learning
An important use case motivating this work is to be able to modify the operation of the model main loop, for example by adding a machine learning model that applies tendencies at the end of each timestep. This serves as an example for how the main loop...
can be modified more generically, such as by adding I/O functionality or online diagnostics, using `fv3gfs.wrapper.get_state` and `fv3gfs.wrapper.set_state` to interface with the Fortran model.
```python
import fv3gfs.wrapper
import fv3gfs.wrapper.examples
from datetime import timedelta
import f90nml
if __name__ == '__main__':
# load timestep from the namelist
namelist = f90nml.read('input.nml')
timestep = timedelta(seconds=namelist['coupler_nml']['dt_atmos'])
# initialize the machine learning model
rf_model = fv3gfs.wrapper.examples.get_random_forest()
fv3gfs.wrapper.initialize()
for i in range(fv3gfs.wrapper.get_step_count()):
fv3gfs.wrapper.step_dynamics()
fv3gfs.wrapper.step_physics()
# apply an update from the machine learning model
state = fv3gfs.wrapper.get_state(rf_model.inputs)
rf_model.update(state, timestep=timestep)
fv3gfs.wrapper.set_state(state)
fv3gfs.wrapper.save_intermediate_restart_if_enabled()
fv3gfs.wrapper.cleanup()
```
The example includes a compact random forest we have trained on nudging tendencies towards reanalysis data. The separation of physics and dynamics steps in the code makes it clear that the machine learning update is applied at the end of a physics step, and is included in any intermediate restart data. The random forest model used in this example is trained according to the approach in Watt-Meyer et al. (2021), with a small number of trees and layers chosen to decrease model size. As a proof of concept, the example model has not been tuned for stability, and may crash if run for longer than 6 hours or using a run directory other than the example provided. Model stability can be increased by enforcing the model specific humidity to be non-negative after applying the random forest update.
This example showcases how the wrapper makes it easy to modify the operation of the Fortran model. In our own efforts to re-write the FV3 dynamical core in a Python-based domain-specific language (DSL), we have directly replaced a call to the Fortran dynamics step with Python-based code. We have also added nudging routines that directly access Zarr (Miles et al., 2020) reference datasets stored in the cloud, and I/O routines to save model snapshots to Zarr files in cloud storage as the model executes. With Python’s threading support, this data transfer can happen as the Fortran code is running. These tasks would be difficult to implement in Fortran, due to more complex threading interfaces, no existing bindings for Zarr, and a lack of support from cloud storage providers.
3.2 MPI communication
When writing parallel models, inter-process communication is an important functionality. MPI4py (Dalcín et al., 2008) provides Python bindings for MPI routines, and supports the use of numpy arrays. Using MPI4py, we have been able to implement halo updates, gather, and scatter operations. The syntax for MPI4py is similar to the syntax used in Fortran. In our implementation, the same MPI communicator is used by the Fortran code as is used by MPI4py.
Here we show a simple example of computing the minimum global surface temperature and printing it from the root process. This showcases how you can use MPI4py within the model to compute diagnostics using inter-rank communication.
```python
import fv3gfs.wrapper
import numpy as np
from mpi4py import MPI
ROOT = 0
if __name__ == '__main__':
fv3gfs.wrapper.initialize()
# MPI4py requires a receive "buffer" array to store incoming data
min_surface_temperature = np.array(0)
for i in range(fv3gfs.wrapper.get_step_count()):
fv3gfs.wrapper.step_dynamics()
fv3gfs.wrapper.step_physics()
# Retrieve model minimum surface temperature
state = fv3gfs.wrapper.get_state(['surface_temperature'])
MPI.COMM_WORLD.Reduce(
state['surface_temperature'].view[:].min(),
min_surface_temperature,
root=ROOT,
op=MPI.MIN,
)
if MPI.COMM_WORLD.Get_rank() == ROOT:
units = state['surface_temperature'].units
print(f'Minimum surface temperature: {min_surface_temperature} {units}')
fv3gfs.wrapper.save_intermediate_restart_if_enabled()
fv3gfs.wrapper.cleanup()
```
3.3 Interactive use in a Jupyter notebook
While we typically run the model using batch submission or from the command line, all of the examples above can be executed from within a Jupyter notebook using ipyparallel. This allows retrieving, computing, and plotting variables from the Fortran
Figure 3. Screenshot of Jupyter notebook example using MPI communication to gather a field on 6 processes and plot it on the first process. Note the FV3GFS uses a "cubed sphere" grid, so that for 6 processes each one is responsible for one face of the cube.
model while it is paused at a point of interest. It also can serve as explicit documentation of modelled phenomena, whether to communicate to other model developers or for use in an educational setting.
We have prepared an example that interacts with the machine learning example model, using MPI communication from Python to gather and plot variables on a single rank (Figure 3). It can be accessed in the examples/jupyter directory of the Github repository, and makes use of Docker to ensure portability. While the example is written to run on 6 processes, ipyparallel allows notebooks to be run at larger scales on high-performance computing (HPC) clusters if the configuration is modified appropriately.
4 Implementation
4.1 Information transfer
To augment the Fortran model, we must read from and write to its state. This information transfer can be done in two ways, either by providing an interface to copy data between Fortran and Python arrays (effectively C arrays), or using the same memory in both codes. Re-using memory requires that the Fortran code use a pointer to a C array, allowing the same pointer to be used by the numpy array on the Python side. In the Fortran code for FV3GFS, arrays used for physical variables are defined as non-target allocatable arrays, which precludes sharing them with Python. It would require significant changes to the
Fortran code to instead use pointers to C arrays, which conflicts with our priority of making minimal changes to the Fortran code. Instead, the getters and setters (the routines which transfer variable values between Python and Fortran) perform a data copy between numpy-allocated C arrays and Fortran arrays within the wrapper layer.
### 4.2 Metaprogramming to pass arrays
Copying data from Python into a Fortran array requires a significant amount of code. Unless a structure of the Fortran data can be assumed, each Fortran variable to be accessed needs at least its own line of Fortran code, and in practice its own pair of subroutines, containing an assignment between a Python buffer and the correct Fortran variable. In our approach, each variable has two Fortran wrapper subroutines for getting and setting that variable, logic within a C wrapper layer for calling those Fortran wrapper subroutines, and header declarations for those subroutines.
Writing each of these manually would take significant time and effort. Instead we use Jinja templates (The Pallets Projects, 2019) to generate these wrappers using JSON files declaring necessary information such as the Fortran variable name, standard name, units, and dimensionality. For example, the Fortran variable name "zorl" has standard name "surface_roughness", units "cm", and dimensionality \[y, x\]. This greatly reduces the number of lines required to write the code. For physics variables, the template file and data file are 89 and 459 lines, respectively while the generated Fortran file is 1894 lines. Physics variables are also responsible for most of the lines in the 1680-line generated Cython file. Adding a new physics variable requires adding an entry to a JSON file with its standard name, Fortran name, Fortran container struct name, dimensions, and units. This JSON file is also used to automatically enable unit tests for the getters and setters of each physics variable.
### 4.3 Portability and testing using Docker
One choice made in developing fv3gfs-wrapper was to use Docker containers for testing and our own research use of the wrapped model. Using a Docker image ensures that across systems, we can consistently install the dependencies of FV3GFS, Flexible Modeling System (FMS), and Earth System Modeling Framework (ESMF) on a host system. Users can make use of the Docker container identically on cloud computing resources, continuous integration systems, and our host machines without the need for separate configuration of the compilation process for each system. This removes the possibility for error from incorrect build instructions or execution of those instructions, or unexpected interactions with the host environment. Furthermore, it documents the process required to build the environment for the model and fv3gfs-wrapper, which should help in setting it up directly (i.e. without use of containers) on a machine. Finally, it facilitates distribution of the model to others who may not have access to HPC resources and may want to reproduce our results on personal computers or cloud resources. The docker image at time of publication can be retrieved as gcr.io/vcm-ml/fv3gfs-wrapper:v0.6.0, or from McGibbon et al. (2021b).
4.4 Extending this approach to other models
While we have applied this wrapping approach to the FV3GFS model specifically, nothing about it is particular to this model. Our methodology should generalize to other atmospheric and climate models. This wrapper is an example of how one can wrap Fortran models in general to be accessible through Python. While using Cython and Fortran wrapper layers (as we have done here) involves writing more code than using automated wrapping tools such as f90wrap (Kermode, 2020), it provides the flexibility necessary to wrap the existing Fortran code with minimal changes. We found much of the repetitive boilerplate needed for this wrapping could be handled through Jinja templating. With this approach, a Python wrapper can be produced for very complex build systems with only minimal modifications (such as ensuring the necessary variables and routines are externally accessible) to the existing model code.
The use of getters and setters introduces a copy overhead cost when modifying the base model behavior. However, it avoids refactoring necessary for a shared memory implementation, which would require modifying the Fortran code to use C-accessible arrays that can be shared with Python. Writing a Fortran wrapper layer for the getters and setters ensures that any variable modifiable in Fortran can also be modified in Python.
In wrapping the FV3GFS, we have split the FV3GFS model main loop into a sequence of subroutines, which are then wrapped to call from Python. This task is likely to be different in other Fortran models, particularly models with abstract main loops or complex coupling infrastructures. So long as Fortran subroutines can be defined to execute each part of the model main loop, these can be wrapped to call from Python for model integration.
4.5 Challenges and limitations
Python reads many files on initialization when it imports packages. This can cause significant slow-down on HPC systems using shared filesystems. Approaches using parallel filesystems, such as Sarus on HPC or Docker-based cloud solutions, can avoid this issue. When a shared filesystem must be used, solutions exist such as python-mpi-bcast by Yu Feng (Feng, 2021), or modifying the CPython binary as reported by Enkovaara et al. (2011).
The wrapper currently treats the dynamics and physics routines each as a single subroutine. This does not allow inserting Python code within the physics suite, between schemes. This limitation may be removed in the future by adding a wrapper for physics schemes in the Common Community Physics Package (CCPP, Heinzeller et al. (2020)). Through CCPP, it should be possible to separate the physics driver into multiple calls, allowing Python code to be called between any chosen physics schemes.
It is also important to remember when trying to modify the behavior of FV3GFS that, with or without a wrapper, it is still fundamentally a complex parallel model. Parallel code is difficult to test, since the order of code execution is non-deterministic (Bianchi et al., 2018).
It may also be necessary to understand the physical relationships between different model variables in the Fortran code. For example, the dry air mass of a model layer in FV3GFS is a diagnostic function of the layer pressure thickness and tracer mixing ratios. Increasing the model specific humidity will remove dry air mass, unless the layer pressure thickness is also increased.
To account for this, we have included a routine `fv3gfs.set_state_mass_conserving` that modifies layer pressure thickness according to any changes in water tracer amounts.
5 Conclusions
We have presented `fv3gfs-wrapper`, a Python-wrapped version of the FV3GFS atmospheric model. The wrapper allows users to control and interact with an atmospheric model written in Fortran. The simple and intuitive interface allows for a Python-centric workflow and can be used to enable a wide range of use cases, such as machine learning parameterization development, online analysis, and interactive model execution. We do not see a decrease in model performance relative to the fully-compiled model, unless routines to copy the model state in and out of the Fortran model are used. This copy overhead is well within an acceptable range of performance, and could be avoided with modifications to the Fortran source code.
We showed examples of how Python and Docker can be used to reproduce and modify the existing Fortran model, and how the Fortran code can be called in an interactive Jupyter environment. In addition to accelerating research and development workflows, these examples show how a full-fledged weather and climate model can be made available for reproducible science and teaching.
The wrapping approach that is outlined can be applied similarly to other Fortran models. The Python-wrapped FV3GFS atmospheric model shows the way for a new generation of weather and climate models, where the top-level control flow of the model is implemented in a high-level language such as Python while the performance critical sections are implemented in a low-level, performant language. This is a powerful approach that has already been used in popular Python packages such as Numpy and Tensorflow. We hope to see this approach extended to other models, enabling more widespread access to Python tools in developing traditional Fortran models, and reducing the barrier to access for researchers and students interested in introducing online analysis code into these models.
Code and data availability. Code for this project is available in on GitHub at https://github.com/VulcanClimateModeling/fv3gfs-wrapper tag v0.6.0 (McGibbon et al., 2021a), https://github.com/VulcanClimateModeling/fv3gfs-fortran tag gmd_submission (Heinzeller et al., 2021), and https://github.com/VulcanClimateModeling/fv3gfs-util tag v0.6.0 (McGibbon et al., 2021c). It is also available as a Docker image at gcr.io/vcm-ml/fv3gfs-wrapper:v0.6.0 (McGibbon et al., 2021b). The model forcing directory used to time the examples is available as McGibbon et al. (2021d).
Author contributions. Jeremy McGibbon contributed the initial version of the wrapper and has led its development. Significant code contributions have been made by Noah Brenowitz, Oliver Watt-Meyer, Spencer Clark, Mark Cheeseman, Brian Henn, Tobias Wicky, Oliver Fuhrer, and Anna Kwa. All authors were involved in design discussions and provided feedback on the code. Jeremy McGibbon prepared the manuscript with contributions from co-authors.
Competing interests. The authors declare that they have no conflict of interest.
Acknowledgements. We thank Vulcan, Inc. for supporting this work. We acknowledge NOAA-EMC, NOAA-GFDL and the UFS Community for publicly hosting source code for the FV3GFS model (https://github.com/ufs-community/ufs-weather-model) and NOAA-EMC for providing the necessary forcing data to run FV3GFS. The FV3GFS model code used was forked from the UFS public release branch in December 2019. Computations supporting this work were also supported by a grant from the Swiss National Supercomputing Centre (CSCS) under project ID s1053.
References
|
{"Source-Url": "https://gmd.copernicus.org/preprints/gmd-2021-22/gmd-2021-22-manuscript-version3.pdf", "len_cl100k_base": 6805, "olmocr-version": "0.1.50", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 32899, "total-output-tokens": 10149, "length": "2e12", "weborganizer": {"__label__adult": 0.0004091262817382813, "__label__art_design": 0.0004563331604003906, "__label__crime_law": 0.00040531158447265625, "__label__education_jobs": 0.001209259033203125, "__label__entertainment": 0.00015556812286376953, "__label__fashion_beauty": 0.00026488304138183594, "__label__finance_business": 0.0003693103790283203, "__label__food_dining": 0.0005464553833007812, "__label__games": 0.0007696151733398438, "__label__hardware": 0.001781463623046875, "__label__health": 0.0007734298706054688, "__label__history": 0.0006442070007324219, "__label__home_hobbies": 0.0002148151397705078, "__label__industrial": 0.0010957717895507812, "__label__literature": 0.0003440380096435547, "__label__politics": 0.0005927085876464844, "__label__religion": 0.0007071495056152344, "__label__science_tech": 0.340087890625, "__label__social_life": 0.00020992755889892575, "__label__software": 0.01401519775390625, "__label__software_dev": 0.6328125, "__label__sports_fitness": 0.0005016326904296875, "__label__transportation": 0.001068115234375, "__label__travel": 0.0002865791320800781}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 38524, 0.03484]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 38524, 0.74664]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 38524, 0.81684]], "google_gemma-3-12b-it_contains_pii": [[0, 3441, false], [3441, 6494, null], [6494, 9423, null], [9423, 12786, null], [12786, 15310, null], [15310, 17934, null], [17934, 19858, null], [19858, 21488, null], [21488, 24720, null], [24720, 28162, null], [28162, 31243, null], [31243, 31857, null], [31857, 35886, null], [35886, 38524, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3441, true], [3441, 6494, null], [6494, 9423, null], [9423, 12786, null], [12786, 15310, null], [15310, 17934, null], [17934, 19858, null], [19858, 21488, null], [21488, 24720, null], [24720, 28162, null], [28162, 31243, null], [31243, 31857, null], [31857, 35886, null], [35886, 38524, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 38524, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 38524, null]], "pdf_page_numbers": [[0, 3441, 1], [3441, 6494, 2], [6494, 9423, 3], [9423, 12786, 4], [12786, 15310, 5], [15310, 17934, 6], [17934, 19858, 7], [19858, 21488, 8], [21488, 24720, 9], [24720, 28162, 10], [28162, 31243, 11], [31243, 31857, 12], [31857, 35886, 13], [35886, 38524, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 38524, 0.03704]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
d29852761dd715bdc6b8219e673693172d69d0cc
|
[REMOVED]
|
{"Source-Url": "http://barryfp.com/papers/porter06IDS.pdf", "len_cl100k_base": 7082, "olmocr-version": "0.1.53", "pdf-total-pages": 14, "total-fallback-pages": 0, "total-input-tokens": 30931, "total-output-tokens": 8849, "length": "2e12", "weborganizer": {"__label__adult": 0.00030231475830078125, "__label__art_design": 0.00045943260192871094, "__label__crime_law": 0.0003337860107421875, "__label__education_jobs": 0.0009565353393554688, "__label__entertainment": 0.00021278858184814453, "__label__fashion_beauty": 0.00017905235290527344, "__label__finance_business": 0.0004711151123046875, "__label__food_dining": 0.00034499168395996094, "__label__games": 0.0006489753723144531, "__label__hardware": 0.0027618408203125, "__label__health": 0.0006203651428222656, "__label__history": 0.000576019287109375, "__label__home_hobbies": 0.00011354684829711914, "__label__industrial": 0.0005474090576171875, "__label__literature": 0.0005173683166503906, "__label__politics": 0.0003521442413330078, "__label__religion": 0.0005574226379394531, "__label__science_tech": 0.433837890625, "__label__social_life": 0.00015807151794433594, "__label__software": 0.04962158203125, "__label__software_dev": 0.50537109375, "__label__sports_fitness": 0.0002753734588623047, "__label__transportation": 0.0006594657897949219, "__label__travel": 0.0002899169921875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 39069, 0.02071]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 39069, 0.41027]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 39069, 0.9266]], "google_gemma-3-12b-it_contains_pii": [[0, 2671, false], [2671, 5790, null], [5790, 9110, null], [9110, 11965, null], [11965, 14526, null], [14526, 17636, null], [17636, 20741, null], [20741, 23672, null], [23672, 25671, null], [25671, 27825, null], [27825, 29646, null], [29646, 32685, null], [32685, 35551, null], [35551, 39069, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2671, true], [2671, 5790, null], [5790, 9110, null], [9110, 11965, null], [11965, 14526, null], [14526, 17636, null], [17636, 20741, null], [20741, 23672, null], [23672, 25671, null], [25671, 27825, null], [27825, 29646, null], [29646, 32685, null], [32685, 35551, null], [35551, 39069, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 39069, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 39069, null]], "pdf_page_numbers": [[0, 2671, 1], [2671, 5790, 2], [5790, 9110, 3], [9110, 11965, 4], [11965, 14526, 5], [14526, 17636, 6], [17636, 20741, 7], [20741, 23672, 8], [23672, 25671, 9], [25671, 27825, 10], [27825, 29646, 11], [29646, 32685, 12], [32685, 35551, 13], [35551, 39069, 14]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 39069, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
1c480819195fc021e49d0cdc8d20ecc1a93c91ff
|
Empirical Study of Ad Hoc Collaborative Activities in Software Engineering
Sébastien Cherry¹, Pierre N. Robillard¹
¹ Software Engineering Research Laboratory, Computer Engineering Department, École Polytechnique de Montréal, C.P. 6079. succ. Centre-Ville, Montréal, Canada
Abstract. This paper presents empirical research on ad hoc collaborative activities found in an industrial software engineering setting. We believe that a better understanding of these activities and their content will help us to propose software development process enhancements and also provide some insight into the tools needed to support communications in a distributed software development environment. Further details of our motivations are included, followed by a discussion on our research methodology, and, finally, some results of a preliminary analysis confirming the significance of our data and the importance of the observed phenomenon.
1 Introduction
It is well supported in the literature that some problems encountered in software development are not attributable to technical factors, but rather to the human aspects of software engineering [2], [4], [5], [7], [11], [13], [14], [15]. While some aspects, such as “communication” [7], [15], “coordination” [4], [5] and “collaboration” [2], [13], are gaining recognition in the research community, some methodological challenges emerge. Human factors, for example, have been overlooked in the past for many reasons, but principally because of the difficulty in measuring these facets quantitatively [11]. Nevertheless, empirical research in software engineering is growing in popularity and beginning to be adapted to studying this new topic of interest, namely people, and methods and techniques are being borrowed which were formerly used in the human sciences such as psychology and sociology. Like many researchers, we think that this domain will offer research opportunities for years to come.
This paper presents in-progress empirical research in the context of a case study in the industry, and explores collaborative work in software engineering; specifically, the ad hoc collaborative activities that take place during the software development process. By ad hoc collaborative activities, we mean activities which are not formally prescribed, and which occur between two or more developers working on a specific project task and which happen informally and spontaneously. They can take many forms, such as in peer-to-peer conversations, electronic mail exchanges, and so on.
Details of our motivations are to be found in the next section of this paper, followed by a discussion of the research methodology used, including data collection methods, and the need to place greater emphasis on the analysis techniques that will
be used to explore the large quantity of amassed data. Finally, some results of a preliminary analysis are revealed, which confirm both the relevance of the collected data and the importance of the observed phenomenon.
2 Motivations
2.1 Why collaborative work?
As previously mentioned, a growing number of researchers support the view that many of the problems that arise during software development could be imputable to human factors associated with the software engineering process. Perry, Staudemayer and Votta (1994) [11], among others, believe that too much attention is given to the technological aspects of software engineering. They say that one of the reasons frequently mentioned is the difficulty of measuring these human factors quantitatively. The same comments are also supported by Seaman (1999) [16].
Many approaches have been envisaged to study the human aspects of software engineering. Some researchers have examined the communication occurring during software development [7], [15], while others have studied the coordination aspect [4], [5] and still others were interested in the collaborative work [2], [13].
With regard to collaborative work, Robillard and Robillard (2000) [13] have empirically identified four types of collaborative activities performed during the software engineering process. They defined “ad hoc” collaborative activities, for example, as the work carried out simultaneously by teammates on a particular task of the project and which is not prescribed by a formal process. One of the conclusions of this research was that ad hoc collaborative activities can play a major role in team communication dynamics, accounting for 41% of this dynamics during the case study. Furthermore, these activities constitute the longest of the working sessions, and in addition seem to have an important impact on individual activities, since they often precede long individual working sessions.
Also, Perry, Staudemayer and Votta (1994) [11] found during another case study that informal communications take up an average of 75 minutes per day per software developer. Seaman (1996) [15] also supports the view that this type of communication is a non-negligible element to be taken into account during a development process, and which is essential if developers are to carry out their tasks adequately.
Because they monopolize quite a considerable part of a software project and constitute an important element of it, as established above, exploratory research is essential to understanding the content of these ad hoc collaborative activities and the communication that ensues, and to measuring the impact, both positive and negative, on the rest of the development process. We believe that such research will help us to subsequently propose software engineering process enhancements which will be better adapted to the human and empirical realities of software development.
By contrast, collaborative software development, also known as “distributed software development”, is an increasingly fashionable domain of research these days.
Both these expressions refer to software development distributed over time and over relatively long distances, something that has become quite common business practice nowadays in cases where it occurs.
However, according to recent research in this domain [5], the distances between the members of virtual teams tends to obstruct informal communications, resulting in problems of coordination. This is another important reason for undertaking research in this field. It will also provide some insight into the tools needed to support communications in a distributed software development environment.
2.2 Why an empirical study?
Empirical research based on the experimental method has been conducted for a long time now, in many of the human sciences, such as psychology and sociology. It is, moreover, very often considered to be the only valid scientific method accepted in these domains. Although empirical research has been conducted in software engineering for many years, it is on a much smaller scale and only quite recently has it seen an increase in popularity. One reason for this is the growing interest in the human aspects of software engineering [16].
Further arguments supporting this new tendency, and strengthening the evidence for it, have been expressed by Tichy (1998) [17] in his paper, “Should Computer Scientists Experiment More?” However, those who uphold this practice in software engineering believe that, since the quantity of empirical research is on the increase, its quality should increase as well [10], [18].
3 Research Protocol
3.1 Problem Statement
Research Objectives. As discussed previously, the importance and the necessity for ad hoc collaborative work and the communications that ensue in software development are widely supported by many authors [2], [4], [5], [6], [7], [11], [13], [15]. Although some research has quantified the importance of the phenomenon, there has been no known attempt to determine and describe the content of that work. These considerations led us to define the following research objectives:
- To observe the collaborative work taking place in a case study in the industry to design a conceptual model and distinguish some patterns of exchanges.
- To characterize the ad hoc collaborative activities found, the communications that ensue, and to identify and describe their content.
- To generate a series of hypotheses which emerges from the results of this research, and which could later be validated by confirmatory research.
Theoretical Relevance of the Research. The fact that there has been little or no empirical study of ad hoc collaborative activities gives this research theoretical relevance. It will make it possible to establish a model of these activities and to gain a sense of the cognitive aspects involved from which we will be able to generate a series of hypotheses to create a theoretical base in this domain.
Practical Relevance of the Research. Based on the fact that good collaboration is an indispensable condition of a software development team working effectively to make a quality product which meets the needs of the user, in the time required and at the expected cost, this research is relevant in practice because it will potentially pave the way to the proposal of improvements to software engineering processes. Also, as previously mentioned, it will allow us to better understand the informal collaboration and communication aspects of software engineering, and provide some insight into the tools needed to support communications in a distributed software development environment.
3.2 Research Methodology
General Approach. The research is carried out by means of participant observation within the framework of a case study in an industrial environment. This type of approach is suitable in our case because this is exploratory research. Also, as Jorgensen (1989) [8] and Babbie (2001) [1] have stressed, study in the field combined with participant observation are appropriate when it is not a question of empirically verifying hypotheses formulated in advance, but rather of inductively generating theories from observations and from the empirical data collected.
Target-setting. The setting in which the chosen software development team works is a large enterprise which produces software for commercial purposes. It is a well-established, mature concern which has been in operation for several years, and where there exists a clearly defined development process. Nevertheless, even if the observations are done in a large company, this last also contains some attributes of smaller organizations since the development of software components is divided into small teams.
Also, based on a common-sense judgment (face validity) [1], we can say that the chosen team of eight individuals is representative of the majority of development teams, with a wide range of ages, amounts of schooling, years of experience in software development and length of service in the company.
Data Collection. The following data collection methods were identified from a preliminary ethnography period within the chosen team which lasted several months.
An initial data collection phase, which took place in the autumn of 2003, is now completed and was spread out over 8 weeks. The results presented in this paper were produced from these earliest data. The purpose of this collection was to gather the
maximum amount of information from the beginning to the end of the development of an update (patch) of a given version of the software produced.
The data collected during this first phase includes:
- 185 hours of audio-video recordings of working sessions over 37 workdays
- The capture of a total of 2496 e-mails exchanged by the 8 teammates
- A daily backup of the source code and other documents and artifacts found
E-mails were captured automatically, by means of triggers defined in the messaging software used in the company. This capture included both e-mails received and those sent by teammates, in order to permit cross-validation.
The daily backup of the source code, and the various documents and artifacts, was available for potential use for subsequent content analysis.
Data Analysis. One of the techniques that will be used for the analysis is Exploratory Sequential Data Analysis (ESDA) [3]. This technique is suited to exploratory research, where the objective is to find answers to research questions or to find patterns among the empirical data and to describe them using, for example, simple statistical representations.
ESDA allows researchers to define, from these descriptions, hypotheses which are subsequently verified by means of confirmatory research using statistical inference methods. However, the important feature of ESDA is that it applies more specifically to research where the sequential integrity of the data must be preserved.
Of the eight operations proposed by ESDA, encoding is certainly the most important. This involves labeling each sequence of data by means of a code formed using a particular syntax and contained in an exhaustive, exclusive and relatively restricted category list, and doing so to decrease the variability of the data, as well as to facilitate its subsequent manipulation. This encoding makes it possible to transform qualitative data into quantitative data, on which it is then possible to perform statistical analysis [3], [16].
The ESDA process, such as proposed by Fisher and Sanderson (1996) [3], is an iterative one, involving the definition of a series of concepts stemming from research questions of interest. The process will subsequently drive what should be observed among the collected raw data and what manipulations should be made to obtain derived data on which it is possible to generate theories or define hypotheses. It is iterative because it is often necessary to revisit certain steps; for example, to add, remove or redefine concepts or categories that are sometimes found intuitively and validated by their statistical representations.
Research Validity. To satisfy the validity criterion for the research, the empirical measure must faithfully translate the empirical reality of the measured phenomenon [1]. To enhance the validity of our research, particular attention is directed to the definition of the concepts or categories chosen to encode the data. These concept definitions, which must arise from the ESDA traditions that concern us [3], as well as from the context of the research field, ensure a degree of representativeness of the phenomenon under study by common-sense validity (face validity) [1], [8].
The concepts or categories under which the data will be encoded, as well as the number of categories chosen, will also be very important as far as content validity is
concerned [1]. This aspect of validity refers rather to the coverage of the meanings encompassed by the concepts. Furthermore, the validity of the connections or the relations (construct validity) [1] should be assured among the concepts forming the theoretical model appearing from the data. This can be done by means of certain correlation measures or statistical associations.
Finally, a data triangulation will be made between qualitative and quantitative data, as well as of data resulting from various sources [16], [18]. Concerning this last point, other phases of data collection are to be anticipated.
### 4 Preliminary Results
The results presented in this section are the product of a preliminary analysis concerning four of the eight developers in the team who were observed over a period of 8 hours. The choice of these individuals was not made by means of a sampling method, but from direct observations in the field: they had been identified as being likely to work more collaboratively than the others. This choice is justified because the objective of this research is not to find a magic number indicating the time spent on ad hoc collaborative work, but rather to investigate the content of these collaborative activities. It should also be noted that the results below do not take into account e-mail interactions, but only the peer-to-peer conversations and telephone exchanges.
As can be seen from Figure 1, 51% of the time is spent on ad hoc collaborative work, as against 49% for the other types of activities. This result tends to corroborate the observations made in the field that suggested the importance of the phenomenon.

Figure 2 indicates the percentage of time spent on ad hoc collaborative activities by the subjects observed. As was noted in the field, subjects MS2 and MS3 seem to have spent a large amount of their time collaborating and communicating in a spontaneous way with their colleagues. This can be explained by the nature of the work performed by these subjects. MS2 occupies the position of project manager in the team, and one of his functions is to circulate relevant information needed by the developers on his team. When we examine more closely the interactions in which MS2 is involved, we note that, for 78.13% of the time, his colleagues initiate the interactions. We can suggest hypothetically that MS2 constitutes a source of the information his colleagues require. However, it was noted in the field that a great deal of the information passed on by MS2 to his teammates is in the form of e-mails. It would be interesting to investigate this method of communication. MS3 is, for his part, responsible for the infrastructure of the software built, and often the individual consulted to
resolve problems. He manages several tasks at the same time, which brings him into communication with some of his colleagues more often.
\[ \text{Fig. 2. Percentage of ad hoc collaborative activities per subject} \]
By contrast, the average duration of the interactions analyzed by the four developers is 6:31 minutes, and these interactions involve, on average, 2.3 stakeholders. We should remember that an interaction is defined as a communicative unit which presents an evident internal continuity, while it breaks with what precedes it and what follows it [11]. Moreover, these results were based on a total of 82 observed interactions.
Figures 3 and 4 give an initial outline of a distribution as a percentage with regard to the number of occurrences and time spent on the various categories of ad hoc collaborative activities identified. “Cognitive synchronization” exists when two or more developers exchange information to ensure that they share the same knowledge or the same representation of the object in question. “Problem resolution” occurs when two or more developers are aware of the existence of a problem and attempt by different means to solve the problem or to mitigate it. “Development” occurs when two or more developers contribute to the development of a new feature or component of the software. “Management” is the result of two or more developers coordinating and planning activities such as meetings, common working sessions or setting schedules. “Conflict resolution” is the process of two or several developers taking part in discussions to resolve a difference of opinion. Ad hoc collaborative activities under the “not relevant” category group together all interactions which do not concern the project or the software built.
As Figure 3 suggests, 52.63% of the ad hoc collaborative activities that arose are forms of cognitive synchronization. This agrees with the direct observations made in the field. The figure is not surprising when we consider that the exchange of information and knowledge constitutes an essential element in software development, which is to crystallize [16] all the information required in quality software to meet the needs of the user. As shown in Figure 4, cognitive synchronization occupies 56.57% of the time spent on ad hoc collaborative work, which also supports the previous finding.
The other significant category, problem resolution, does not seem important in Figure 3 in terms of number of occurrences. Figure 4, however, suggests that this activity monopolizes almost a quarter of the time spent on ad hoc collaborative work by the four subjects observed. It demonstrates that perhaps, while problem resolution activities are relatively few in number, when they do arise, they monopolize a rather considerable amount of time. An analysis of the mean time spent by sequence as a function of ad hoc collaborative activity tends to confirm this, showing that problem resolution takes up to 9:48 minutes when it occurs, as opposed to the interaction average of 6:31 minutes.
The results relative to management activities also reveal an interesting finding. Unlike problem resolution activities, they are relatively numerous considering the fairly short time that they occupy. This may tend to confirm the theories of certain authors [7], who maintain that informal communications are necessary in order that the members of a team can coordinate their activities effectively.
5 Conclusion
It is clear, and widely supported, that good collaboration and communication are an essential condition of the successful delivery of a quality product by a software development team, one that meets the user’s needs in a timely fashion and at the expected cost.
It was revealed by previous research that ad hoc collaborative activities and informal communications occupy a considerable portion of the time that a developer spends on a software project. However, no research has attempted to describe the content of these activities, which leaves a vast field open for exploration.
The empirical research described in the present paper suggests the importance of investigating this field, because the authors believe that understanding how people collaborate will make it possible to propose practices to enhance collaboration and communication within a development team, as well as improve software development processes.
This article has briefly presented the methodology used to meet our research objectives. It was influenced by previous empirical research which was also aimed at investigating the human aspects of software engineering, but which was, however, adapted to the context of the field on which this study focuses.
The embryonic results that were partially presented in this paper, and that arise from a tiny portion of the considerable quantity of data that was collected, are very interesting. Although more analysis is needed, a model of data and patterns already seems to be emerging which will allow us to subsequently form hypotheses which can be validated by other, confirmatory research, thereby forging a knowledge base in this, as yet unknown, domain of software engineering.
6 Acknowledgments
This research would not have been possible without the agreement of the company in which it was conducted, and without the generous participation and patience of the software development team members from which the data was collected. To all these people, we extend our grateful thanks.
References
|
{"Source-Url": "http://www.scitepress.org/Papers/2004/26827/26827.pdf", "len_cl100k_base": 4304, "olmocr-version": "0.1.50", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 20586, "total-output-tokens": 5692, "length": "2e12", "weborganizer": {"__label__adult": 0.0003714561462402344, "__label__art_design": 0.0002803802490234375, "__label__crime_law": 0.000370025634765625, "__label__education_jobs": 0.0027256011962890625, "__label__entertainment": 5.3763389587402344e-05, "__label__fashion_beauty": 0.0001270771026611328, "__label__finance_business": 0.0002846717834472656, "__label__food_dining": 0.0003497600555419922, "__label__games": 0.0004651546478271485, "__label__hardware": 0.0005030632019042969, "__label__health": 0.0005121231079101562, "__label__history": 0.0001685619354248047, "__label__home_hobbies": 7.992982864379883e-05, "__label__industrial": 0.0002834796905517578, "__label__literature": 0.00035262107849121094, "__label__politics": 0.0002307891845703125, "__label__religion": 0.0003767013549804687, "__label__science_tech": 0.007236480712890625, "__label__social_life": 0.00017976760864257812, "__label__software": 0.00449371337890625, "__label__software_dev": 0.9794921875, "__label__sports_fitness": 0.0003192424774169922, "__label__transportation": 0.000415802001953125, "__label__travel": 0.00018167495727539065}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25883, 0.02793]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25883, 0.66798]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25883, 0.9504]], "google_gemma-3-12b-it_contains_pii": [[0, 2779, false], [2779, 5855, null], [5855, 8358, null], [8358, 11256, null], [11256, 14639, null], [14639, 17513, null], [17513, 19273, null], [19273, 20960, null], [20960, 23654, null], [23654, 25883, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2779, true], [2779, 5855, null], [5855, 8358, null], [8358, 11256, null], [11256, 14639, null], [14639, 17513, null], [17513, 19273, null], [19273, 20960, null], [20960, 23654, null], [23654, 25883, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25883, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25883, null]], "pdf_page_numbers": [[0, 2779, 1], [2779, 5855, 2], [5855, 8358, 3], [8358, 11256, 4], [11256, 14639, 5], [14639, 17513, 6], [17513, 19273, 7], [19273, 20960, 8], [20960, 23654, 9], [23654, 25883, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25883, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-27
|
2024-11-27
|
0799affef3286b6aedec2bded65cf94fdadec484
|
All in one Graphical Tool for the management of DIET a GridRPC Middleware
Eddy Caron, Frédéric Desprez, David Loureiro
July 1, 2008
Research Report N° 2008-24
All in one Graphical Tool for the management of DIET a
GridRPC Middleware
Eddy Caron, Frédéric Desprez, David Loureiro
July 1, 2008
Abstract
Grid Middleware are the link between large scale (and distributed) platforms and applications. Managing such a software system and the Grid environment itself can be a hard task when no dedicated (and integrated) tool exist. Some can be used through nice graphical interfaces, but they are usually dedicated to one or some limited tasks. They do not fulfill all the needs of a Grid end-user who wants to deploy Grid applications easily and rapidly.
The aim of this paper is to present the case study of an all-in-one software system, designed for the management of a Grid Middleware and gathering user-friendly graphical interfaces answering to the various needs of end-users. Moreover the software system eases the use of the Grid by avoiding the scripting layer under a nice GUI enabling the user a faster and more efficient use of the Grid environment. By this means we demonstrate how the DIET Dashboard fulfills all the needs of a unified tool for Grid management. This paper gives a comparison with existing and well-known tools dedicated to some specific tasks such as Grid resources management, Grid monitoring, or Middleware management.
Keywords: Grid Middleware, Grid management, Grid monitoring, Deployment, Workflow management
Résumé
Les intergiciels de grille sont le lien entre les ressources des plates-formes à large échelle (et distribuées) et les applications. Gérer un tel système et l’environnement de grille en lui-même est une tâche compliquée lorsqu’aucun outil dédié est mis à disposition. Des outils avec des interfaces graphiques ergonomiques ont été conçus mais ils sont le plus souvent dédiés à une ou quelques tâches précises, ce qui limite la portée de tel outil. L’ensemble des besoins d’un utilisateur d’un environnement grille ne sont pas couverts pour offrir un déploiement des applications portées sur la grille de façon simple et efficace. L’objectif de ce rapport est de présenter une étude de cas d’un logiciel tout-en-un conçu pour la gestion d’un intergiciel de grille comprenant des interfaces graphiques dédiées aux utilisateurs. De plus ce logiciel facilite l’utilisation de la grille en rendant transparente la couche de scripts sous une interface apportant à l’utilisateur un usage plus efficace et rapide de l’environnement. Nous décrivons de quelle façon le DIETDashboard remplit les conditions d’un outil unifié. Ce rapport offre également une comparaison avec des outils existants et reconnus dédiés à certaines tâches spécifiques telles que la gestion des ressources, la surveillance de la plate-forme ou la gestion de l’intergiciel.
Mots-clés: Intergiciel de grille, Gestion de grille, Monitoring de grille, Déploiement, Gestion de workflow
1 Introduction
Large problems ranging from huge numerical simulations to large scale data processing can now be solved through the Internet using Grid Middleware software systems. Several approaches exist for porting applications to Grid platforms. Examples include classical message-passing, batch processing, web portals, and GridRPC systems. This last approach implements a Grid version of the classical Remote Procedure Call (RPC) model. A more sophisticated extension of this includes high level scheduling mechanisms and data management. Thus clients spread over the Internet submit computation requests to a scheduler that locates one or more servers available on the Grid using some performance measure.
The aim of the DIET 1 (Distributed Interactive Engineering Toolbox) project is to develop a set of tools to build, deploy, and execute computational server daemons. It focuses on the development of scalable Middleware with initial efforts concentrated on distributing the scheduling problem across multiple agents. DIET consists of a set of elements that can be used together to build applications using the GridRPC paradigm. This Middleware is able to find an appropriate server according to the information given in the client’s request (e.g. problem to be solved, size of the data involved), the performance of the target platform (e.g. server load, available memory, communication performance) and the local availability of data stored during previous computations. The scheduler is distributed using several collaborating hierarchies connected either statically or dynamically (in a peer-to-peer fashion). Data management is provided to allow persistent data to stay within the system for future re-use. This feature avoids unnecessary communications when dependencies exist between different requests.
In a Grid environment, we need several complex tools for the management of resources, Grid Middlewares, and client/server applications. Most Grid software systems use command-line interfaces without any Graphical User Interface (GUI). For the creation of a tool dedicated to the management of Grid Middleware and Grid environments, different functions are mandatory. We can consider three main graphical interfaces for such framework: one for resource management, one for Grid monitoring, and one for the management of the Grid Middleware. DIET Dashboard 2 answers to the need of an unified set of tools providing the user with a complete, modular, portable, and powerful way to manage Grid resources of the applications that run on it.
The goal of this paper is to show the various aspects to be taken into account for the design of a graphical tool for Grid Middleware management and how it can ease the interaction with a Grid by avoiding the scripting layer. Thus we designed a tool to make the Grid as user-friendly as possible, in order to simplify its use. Many GUI tools dedicated to Grid management exist but they are all targeting one or two tasks. The aim of the DIET Dashboard is to provide an all-in-one and flexible software that gathers these tools in an efficient manner. We give a comparison with existing tools dedicated to some specific tasks such as Grid resources management, Grid monitoring, or Middleware management. By this way we demonstrate how the DIET Dashboard fulfilled all the needs of an unified tool making it easy to manage a Grid Middleware on Grid platforms.
The rest of the paper is organized as follows. In Section 2, we briefly review existing works on graphical tools for the Grid. Sections 3 and 4 describes the architectures of DIET and DIET Dashboard. Section 4.1 presents the features related to the Grid resources management of DIET Dashboard. Section 4.2 presents the features of DIET Dashboard related to Grid monitoring. Section 4.3 describes how it can manage the DIET Grid Middleware. To illustrate the use the DIET Dashboard, we present an experiment in Section 5. Finally, Section 6 concludes the paper.
2 Related Work
In this paper we focus on graphical tools designed for Grid environments. Here we will give a description of the three main families of tools dedicated to Grid Middleware software systems and
1 http://graal.ens-lyon.fr/DIET
2 http://graal.ens-lyon.fr/DIET/dietdashboard.html
Grid environments.
The first family concerns graphical tools for cluster resource management. They provide a Graphical User Interface (GUI) to check all information from batch schedulers. For example, QMON [16], the GUI designed for N1 Grid Engine from SUN, can examine the properties of any queue on the Grid (running, disabled, suspended, etc.). A second graphical menu provides a job submission interface with all the options available. A third interface monitors the jobs status (running, suspended, deleted, pending, etc.).
To illustrate the second family, we can consider Ganglia [12], the graphical tool designed for Grid monitoring. Based on a protocol using announces, this tool monitors a cluster or a set of clusters using XML, XDR and RRDtool to represent, retrieve and display the data. For each node Ganglia provides instantaneous information and history about the load, memory, I/O, etc. through a web interface.
The third family concerns tools designed for Grid Middleware software systems. Many tools exist for the visual specification and execution of scientific workflows as Kepler [1], Taverna [14], SGSDesigner [10], ScyFlow [13], or GridNexus [4]. For example, GridNexus is a graphical system for the creation and the execution of scientific workflows in a Grid environment. The user can assemble complex processes involving data retrieval, analysis and visualization by building a directed acyclic graph in a visual environment. Future works talk about the use of GridNexus to help creating and deploying new Grid services in addition to scripting existing services. This project plans to develop a generic module to provide interactive feedback while executing a workflow.
Graphical tools mentioned here are all designed with a specific aim. DIET Dashboard combines workflow management, resources reservation, resources mapping, automatic configuration, visualization, and deployment tools in one integrated graphical application.
3 DIET Architecture
The DIET component architecture is structured hierarchically for an improved scalability. Such an architecture is flexible and can be adapted to diverse environments including arbitrary heterogeneous computing platforms. The DIET toolkit [7] is implemented in CORBA and thus benefits from the many standardized, stable services provided by freely-available and high performance CORBA implementations. CORBA systems provide a remote method invocation facility with a high level of transparency. This transparency should not affect the performance substantially, as the communication layers in most CORBA implementations are highly optimized [8]. These factors motivate their decision to use CORBA as the communication and remote invocation fabric in DIET.
The DIET framework comprises several components. A Client is an application that uses the DIET infrastructure to solve problems using an RPC approach. Clients access DIET through various interfaces: web portals or programs using C, C++, or Java APIs. A SeD, or server daemon, acts as the service provider, exporting a functionality through a standardized computational service interface. A single SeD can offer any number of computational services (depending on the capacity of the machine). A SeD can also serve as the interface and execution mechanism for either a stand-alone interactive machine or a parallel supercomputer (or cluster) using an interface with a batch scheduler. The third component of the DIET architecture, agents, facilitate the service location and invocation interactions between clients and SeDs. Collectively, a hierarchy of agents provides higher-level services such as scheduling and data management. These services are made scalable by distributing them across a hierarchy of agents composed of a single Master Agent (MA) and several Local Agents (LA). Figure 1 shows an example of a DIET hierarchy.
4 DIET Dashboard
When the goal is to monitor a Grid, or deploy a Grid Middleware on it, several tasks are involved.
• Managing the resources of a Grid: allocating resources, deploying nodes with several operating systems, etc.
• Monitoring the Grid: getting the status of the clusters (number of available nodes in each state, number and main properties of each job, Gantt chart of the jobs history), the status of the jobs (number, status, owner, walltime, scheduled start, Ganglia information of the nodes) running on the platform, etc.
• Managing the Grid Middleware software system within a Grid environment: designing hierarchies (manually or automatically by matching resources on patterns), deploying them directly or through workflows of applications, etc.
The DIET Dashboard provides tools trying to answer these needs with an environment dedicated to the DIET GridRPC Middleware. It consists of a set of graphical tools that can be used separately or together. These tools can be divided in three categories:
1. Workflow tools: including workflow designer and workflow log service.
2. DIET tools: including tools to design and deploy DIET applications.
3. Grid tools (aka GRUDU \(^3\)): these tools are used to manage, monitor and access user Grid resources.
### 4.1 Grid Resources Management
When deploying an application over a Grid a user should be able to allocate resources for computation tasks by specifying the number of nodes needed, the duration of the jobs (also called walltime), the date when each job will start, their priority, etc. But they should have the possibility to choose between the default environment of the node and a user-defined one if the parallel implementation or even the default operating system provided (for example) does not fit the application needs. This management should be easy to realize in order to improve the Grid usage.
The following sections present how the Grid resources management was designed in the DIET Dashboard and an existing software dedicated to Sun Grid Engine called QMON.
\(^3\)http://graal.ens-lyon.fr/GRUDU
4.1.1 DIET Dashboard functionalities
The Grid resources management is realized inside GRUDU, the Grid resources module of DIET Dashboard. GRUDU can be easily configured to use different batch schedulers, or different Grids. GRUDU can be used inside DIET Dashboard, but also in a standalone mode for users that just want to monitor, manage, or realize reservations on the Grid.
Grid’5000 project aims at building a highly reconfigurable, controllable and monitorable experimental Grid platform gathering 9 sites geographically distributed in France featuring a total of 5000 processors. The main purpose of this platform is to serve as an experimental testbed for research in Grid Computing.
To allocate resources on Grid’5000, the resource tool offers a user-friendly interface allowing the selection of the number of nodes needed at each site, and the definition of the date, walltime of reservation and the queue where the job will be started. The user can select a job type (for example, deploy if you plan to change the operating system) for the reservation itself and launch a script on the reserved resources (see Figure 2). Concerning the clusters, the OAR batch scheduler uses properties for the reservations (for example, to select nodes with Myrinet interfaces) and the allocation tool provides an interface for the definition of these properties.

To manage resources, the user can deploy images on nodes with the operating system needed for the computations. The resources tool also provides a GUI for the deployment of images over Grid’5000 clusters through Kadeploy. (The deployment through Kadeploy allows the user to have its own operating system that he/she can tune and configure as he/she wants.) The nodes and the images (if the user plans to deploy on different clusters, one image per cluster) needed for the experiment (see Figure 3).
4.1.2 Comparison with QMON
QMON is the GUI to the N1 Sun Grid Engine (SGE). It provides an interface for the job submission and the resources management of a Grid and the SGE batch scheduler.
---
4 https://www.grid5000.fr
5 http://oar.imag.fr/
6 http://kadeploy.imag.fr/
QMON allows the user to submit either simple or parallel jobs on queues\(^7\) that are run in a passive and non interactive mode. The users can then monitor the jobs and the Grid status. But QMON does not provide an access to the computation nodes for interactive work, and a specific system cannot be deployed to get a user-defined system for the duration of the reservation. Moreover, to use different queues, the user must use a parallel job with a defined parallel environment such as MPI or PVM, whereas different nodes can be used on different clusters without the mandatory use of some parallel environment with OAR and the DIET Dashboard.
### 4.2 Grid Monitoring
Grid monitoring is important for a default user before he reserved resources, but also after he has reserved resources. Before submitting any job to a Grid, the user should be aware of the available nodes considering their states (free/already used/dead). Whenever there is not enough resources, the user should be able to know when these will be available for computation. After having successfully submitted some jobs, the user should have some interface to get the information about his jobs but also the other jobs running on the Grid. Even if sometimes more information could be interesting for expert users, too lower level information could be unusable for the default user who only wants to perform computations on some resources for a given period of time.
The following sections will present how the Grid monitoring is implemented within the DIET Dashboard and an existing software dealing with the monitoring called Ganglia.
#### 4.2.1 Functionalities of DIET Dashboard
Thanks to the resource tool we can monitor the state of the platform with charts presenting the load of the different clusters, the state of all clusters and all the users’ jobs on the Grid (see Figure 4).
We are also able to monitor the status of a particular cluster with charts summarizing the nodes states and a table composed of the jobs (running or waiting) on that cluster. A Gantt chart is also available helping the user to define when he can reserve some resources.
---
\(^7\) A QMON queue corresponds to a cluster in the DIET Dashboard for the batch scheduler OAR.
The resource tool also provides the user with all necessary information about every job that are present on a cluster, with, among others, the job Name, the job State, the job hosts, etc.
Finally a plugin generates instantaneous data and history concerning the main metrics (the CPU load, the disk/memory/swap used, the in/out bytes, etc.) of the user reserved nodes with information taken from the Ganglia data.

### 4.2.2 Comparison with Ganglia
Ganglia is a scalable distributed monitoring system for high-performance computing systems such as clusters and Grids. Ganglia provides resources usage metrics (memory, CPU, jobs...) for individual sites or whole Grids. These are low level and can be used to monitor the hardware of sites of whole Grids.
But Ganglia does not provide information of higher level such as the node states, the available resources of clusters or the information about the jobs existing in the clusters. From an user point of view that needs to reserve resources and realize some computations on that nodes, the information about the jobs and the clusters in DIET Dashboard can be sufficient, whereas the ones from Ganglia can be useless because of a too lower level for a standard use. These informations are to be considered as a complement to the monitoring part of the DIET Dashboard (and it is moreover the purpose of a plugin as described in Section 4.2.1).
### 4.3 Grid Middleware Management
When using a tool managing Grids and Grid Middleware such as DIET, a user expects features such as the design a hierarchy of Middleware elements, the remote deployment of locally created hierarchies, or the discovery of online existing and usable services for further use in workflows.
Others functionalities can also be offered like log service or real-time execution for running work-flows, or resources dependent generation of hierarchies according to predefined existing models. The following sections present how the Grid Middleware management is implemented in the DIET Dashboard as well as an existing software with monitoring features called GridNexus.
### 4.3.1 Workflow tools
**Workflow designer** A large number of scientific applications are represented by graphs of tasks which are connected based on their control and data dependencies. The workflow paradigm on Grids is well adapted for representing such applications and the development of several workflow engines \[2, 11, 15\] illustrates significant and growing interest in workflow management within the Grid community. The success of this paradigm in complex scientific applications can be explained by the ability to describe such applications in high levels of abstraction and in a way that makes it easy to understand, change, and execute them.
Several techniques have been established in the Grid community to define workflows. The most commonly used model is the graph and especially the Directed Acyclic Graph (DAG). Since there is no standard language to describe scientific workflows, the description language is environment dependent and usually XML based, though some environments use scripts. In order to support workflow applications in the DIET environment, we have developed and integrated a workflow engine. Our approach has a simple and a high level API, the ability to use different advanced scheduling algorithms, and it should allow the management of multi-workflows sent concurrently to the DIET platform.
In this context, a workflow designer was developed to help users to design workflow applications but also to execute them. Figure 5(a) shows an overview of this tool, where they can have a description of the available services (discovered with online mode) and design a workflow by a drag and drop mechanism. The user does not need to know details about the requested services neither to define them. Once the workflow designed, one can either save it to an XML format supported by the DIET workflow engine or execute it directly. In the second case, the workflow input must be defined.
The XML representation of designed workflows describes required tasks and data dependencies. A task is a DIET service and a data dependency is a link between two parameters. The workflow designer checks and guarantees data type compatibility between source and target ports of each created link.
The workflow description level used here is known as “abstract description”. This level of description does not include any runtime information but is sufficient for the workflow execution. DIET hierarchy and workflow engine manage automatically and transparently the user tasks scheduling and execution.


### Figure 5: Workflow tools.
Workflow log service To improve workflow monitoring, we propose a tool dedicated to workflow monitoring that displays the real-time execution processes of different workflows. This graphical tool has two major roles: first it is a central event service that receives and handles the events related to tasks execution progression. Secondly it provides a graphical representation of workflow state. This tool, shown in Figure 5(b), displays the different workflows after they start their execution. Each node of the workflow can be in one of the following states: “waiting”, “running”, or “done”.
4.3.2 DIET tools
A DIET platform can be represented by a hierarchy of agents and servers. Designing and deploying such a hierarchy of distributed and heterogeneous elements can be a hard task for the end user. In our previous works [6], we have defined a XML format to describe DIET platforms. This format describes a DIET hierarchy but also the information about used resources and environments.
To deploy DIET hierarchies on a Grid environment the DIET Dashboard provides two methods:
In two steps: First the user creates by hand his DIET hierarchy with the DIET designer. Instead of manipulating complex XML files, the user simply adds Local Agents or Server Daemons to the Master Agent or already added Local Agents. Concerning the Server Daemons you can define the binary to launch, the input parameters etc. This level describes only the application level, and the obtained application description can be extended with runtime information. The main frame of the DIET designer is presented in Figure 6.
To extend this application level hierarchy the user should use the DIET mapping tool (see Figure 7). This tool allows the user to map the allocated Grid’5000 resources to a DIET application. For each Grid’5000 site, the nodes (or hosts) are used in a homogeneous manner but the user can select a particular host if needed.
in one step: The XMLGoDIETGenerator builds a GoDIET XML file that can be used with the DIET deployment tool from a compact description and a reservation directory. For large experiments, writing the GoDIET file by hand is time consuming and if the user should redo this experiment with a different set of machines, the GoDIET file will be generated according to the available resources.
The way hierarchies are described (through a framework from which their are created according to the available resources) have also to be the most flexible to let the user write all possible hierarchies. One should notice that the XMLGoDIETGenerator is “resources driven” because the final hierarchy will directly depend on the available resources provided, whereas the ones created with the DIET designer and mapping tools will not change if there is more or less available resources.

When the DIET hierarchies are generated the user can deploy these hierarchies on the Grid thanks to the DIET deploy tool (see Figure 8). This tool is a graphical interface to GoDIET. It provides the basic GoDIET operations: open, launch, stop, and also a monitoring mechanism to check if DIET application elements are still alive (the states are the same as for the workflow log service). As the workflow log service, the DIET deployment tool can be used in a local or a remote mode.
### 4.3.3 Comparison with GridNexus
GridNexus provides a GUI for the workflow construction and execution. This interface is a “Drag and Drop” environment that can be used to build workflows from generic Grid and web services. The output is XML-based and easy to modify or use from specialized tools around GridNexus. The user designs the workflow by linking elements as for the workflow designer of DIET Dashboard. After having designed the workflow it can be run and the user can see the results of the workflow or get the corresponding script of the workflow. The workflows can be abstracted to simplify the workflow design. These “composites” can then be used as elements of other workflows. GridNexus comes with a library of pre-defined elements that can be used from the GUI, but we can also generate workflows from URL of WSDL that define services.
However GridNexus does not show the evolution of the workflow execution, and it does not provide some log functions in order to prevent from services failures or anything else. Moreover GridNexus does not discover online services but the user should provide him the services which could be complicated for the end-user that might not know where those services are located. Finally GridNexus only manages workflows of tasks, and does not allow the user to design and execute her/his own hierarchies of elements, in order to later execute clients (the ones that are not workflows of executions) on computations nodes.
5 Experiments
An experiment has been realized to test the capabilities of DIET and DIET Dashboard for a large number of machines. This experiment has been realized on Grid’5000, and the chosen application was cosmological computations. For this experiment, the entire Grid’5000 platform was reserved which gave us 12 clusters used on 7 sites for a duration of 48 hours. Finally 979 machines were used with an user-defined environment containing all the needed software for the experiment. Figure 9 gives a bar chart representing the occupation of the cluster with the jobs for the experiment, taken from the resources tool of the DIET Dashboard.
The aim of the experiment was also to start the largest machines reservation over the Grid, for the deployment of the largest DIET hierarchy in order to execute the maximum number of cosmological application jobs. The MPI code executed by the DIET servers called RAMSES\[17\] was developed in Saclay (DAPNIA/CEA) to study large scale structures and galaxies formation. This code is a Grid-based hydro solver with adaptive mesh refinement.
Thanks to GRUDU, reservations were done at the Grid level and not on each cluster in 20 seconds. To get an user-defined environment on each machine, GRUDU was able to realize the deployment of every machines of the 12 clusters involved at the same time in roughly 25 minutes. Finally the DIET hierarchy was created through the use of the XMLGoDIETGenerator in 5 seconds and deployed through the DIET Deploy tool and GoDIET in 23 seconds.
If theses tasks would have been done without GRUDU:
- the reservation would have been realized with oargridsub (a non-graphical utility dedicated to OAR) by hand by reserving every nodes of each cluster at a time.
Here is a dummy example of oargridsub command:
```
oargridsub
cluster1:rdef="nodes=2",cluster2:rdef="nodes=1",cluster3:rdef="nodes=1",
cluster4:rdef="nodes=2",cluster5:rdef="nodes=1",cluster6:rdef="nodes=1",
cluster7:rdef="nodes=2",cluster8:rdef="nodes=1",cluster9:rdef="nodes=1",
```
8among the uncrashed nodes.
9http://irfu.cea.fr/Projets/COAST/ramses.htm
Figure 9: Chart representing the occupation of the different clusters and the node repartition between the different job states (Free/Job/Dead/Absent).
```
cluster10:rdef="nodes=2",cluster11:rdef="nodes=1",cluster12:rdef="nodes=1",
-s '2007-09-07 16:00:00'
-w '0:10:00'
-p ~/runhpl/runhpl
```
- The use of an user-defined environment would have been impossible without KaDeploy, it would have taken the same amount of time per cluster and not for all of them, and the configuration of the deployment would have been more difficult because of several conditional choices.
- The DIET hierarchy would have been written by hand and not easily readable because of the resources-dependency of the hierarchy description file avoided by the pattern-matching realized by the XMLGoDIETGenerator.
The DIET platform deployed was composed of one Master Agent, 12 Local Agents, and 29 Server Daemons. One job can be executed on each SeD at a given time. 816 nodes were used for the application jobs. As far as the different clusters do not provide the same compilation environment, an image of an environment specially created has been deployed on every reserved nodes.
During the experiments, the main difficulties came from the hardware limitations (typically the disk space which was not large enough to backup data, or some no well defined permissions of /tmp directories on some clusters), and not from DIET or the DIET Dashboard that allowed a good dispatching of the Middleware requests and the fast and efficient management of these hardware problems.
6 Conclusion
With the development of Grid technologies and the availability of large scale platforms, it becomes mandatory to manage Grid applications efficiently and easily. In this paper, they have presented the DIET Dashboard environment which is a complete, modular, portable, and powerful set of tools dedicated to a Grid Middleware. With this tool, a non-expert user can manage Grid resources, monitor the Grid itself and manage the Grid Middleware by designing its Grid applications or using workflows and then deploying these Grid applications over the Grid platform.
The DIET Dashboard offers a large number of modules, created to answer the different needs of tools appearing in a Grid context. The software architecture design of DIET Dashboard makes its extensible (modules can easily be added to the core of the application).
The performance of the DIET Dashboard and GRUDU (the tool dedicated to the Grid management) have been tested through the experiment realized on Grid’5000. This experiment showed that the resources tool is able to monitor the entire Grid, and reserve resources on a large number of sites and clusters.
GRUDU is one answer to the need of an efficient tool for the management of both hardware and software part of the Grid. GRUDU abstracts the scripting part of the management of a Grid, in order to provide to the user a easy-to-use GUI where all the necessary operations are available. Users do not need to write obscure and complex command lines for the management of their resources anymore, which is often one of the main barriers in the use of Grid environments.
All these elements prove that the DIET Dashboard is as stable and efficient tool that unifies different tools into one single modular graphical application.
7 Acknowledgments
DIET was developed with financial support from the French Ministry of Research (RNTL GASP and ACI ASP) and the ANR (Agence Nationale de la Recherche) through the LEGO project referenced ANR-05-CIGC-11 and Gwendia project (ANR-06-MDCA-009). All experiments were done over the Grid’5000 platform.
We would like to thank the developers of the DIET Middleware and in particular Abdelkader Amar for his work around DIET Dashboard.
References
|
{"Source-Url": "http://www.ens-lyon.fr/LIP/Pub/Rapports/RR/RR2008/RR2008-24.pdf", "len_cl100k_base": 6772, "olmocr-version": "0.1.50", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 34098, "total-output-tokens": 8895, "length": "2e12", "weborganizer": {"__label__adult": 0.0002999305725097656, "__label__art_design": 0.000835418701171875, "__label__crime_law": 0.000316619873046875, "__label__education_jobs": 0.0020084381103515625, "__label__entertainment": 0.00019562244415283203, "__label__fashion_beauty": 0.00019216537475585935, "__label__finance_business": 0.0005288124084472656, "__label__food_dining": 0.00030994415283203125, "__label__games": 0.0007066726684570312, "__label__hardware": 0.002124786376953125, "__label__health": 0.000560760498046875, "__label__history": 0.0006299018859863281, "__label__home_hobbies": 0.00014293193817138672, "__label__industrial": 0.0008363723754882812, "__label__literature": 0.00038242340087890625, "__label__politics": 0.0003533363342285156, "__label__religion": 0.0005974769592285156, "__label__science_tech": 0.44482421875, "__label__social_life": 0.00017178058624267578, "__label__software": 0.07293701171875, "__label__software_dev": 0.469970703125, "__label__sports_fitness": 0.00025272369384765625, "__label__transportation": 0.0006165504455566406, "__label__travel": 0.0002675056457519531}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 37209, 0.03887]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 37209, 0.46586]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 37209, 0.87432]], "google_gemma-3-12b-it_contains_pii": [[0, 162, false], [162, 3004, null], [3004, 7278, null], [7278, 11263, null], [11263, 13236, null], [13236, 15414, null], [15414, 17650, null], [17650, 19411, null], [19411, 22418, null], [22418, 24738, null], [24738, 27223, null], [27223, 29326, null], [29326, 31455, null], [31455, 33091, null], [33091, 36372, null], [36372, 37209, null]], "google_gemma-3-12b-it_is_public_document": [[0, 162, true], [162, 3004, null], [3004, 7278, null], [7278, 11263, null], [11263, 13236, null], [13236, 15414, null], [15414, 17650, null], [17650, 19411, null], [19411, 22418, null], [22418, 24738, null], [24738, 27223, null], [27223, 29326, null], [29326, 31455, null], [31455, 33091, null], [33091, 36372, null], [36372, 37209, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 37209, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 37209, null]], "pdf_page_numbers": [[0, 162, 1], [162, 3004, 2], [3004, 7278, 3], [7278, 11263, 4], [11263, 13236, 5], [13236, 15414, 6], [15414, 17650, 7], [17650, 19411, 8], [19411, 22418, 9], [22418, 24738, 10], [24738, 27223, 11], [27223, 29326, 12], [29326, 31455, 13], [31455, 33091, 14], [33091, 36372, 15], [36372, 37209, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 37209, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
94b3611e60835d97990f1900e5dab029c847775d
|
Abstract—Bulk Synchronous Parallel ML or BSML is a high-level language for programming parallel algorithms. Built upon the Objective Caml language, it provides a safe setting for implementing Bulk Synchronous Parallel (BSP) algorithms. It avoids concurrency related problems: deadlocks and non-determinism. BSML is based on a very small core of parallel primitives that extended functional sequential programming to functional BSP programming with a parallel data structure and operations to manipulate it. However, in practice the primitives for writing the parallel non-communicating parts of the program are not so easy to use. Thus we designed a new syntax that makes programs easier to write and read. Revised BSML is presented and its expressiveness and performance are illustrated through an application example.
I. INTRODUCTION
In the context of “Think Parallel or Perish”, parallel code would be the norm. But many programmers are not able to manipulate low-level routines without introducing bugs as deadlocks and non-determinism. Furthermore, low-level programming forbids optimisations that could be done with a more structured parallelism. Collective operations and skeletons offer a global view of the application and exhibit a more structured parallelism. However such high-level programming is still rare. One of the reasons is that they often do not provide a sufficiently wide set of patterns for a practical and efficient programming. That makes the design of new, robust and general parallel programming languages an important area of research. Creating such a language involves a tradeoff between expressiveness, by offering the programmer the freedom to write all the parallel details of algorithms, and structure necessary to build more easily correct programs with predictable performances.
Bulk Synchronous Parallel ML or BSML, is an extension of Objective Caml to code Bulk Synchronous Parallel (BSP) algorithms. It combines the high degree of abstraction of ML and good performance with the scalable and predictable performances of BSP. In the BSP model, programs are written as a sequence of steps, called super-steps, each alternates a phase of computation, a phase of communication and a finishes with a global barrier. Communications are bulk and collective. That simplify parallel programs because programmer is not responsible for managing low-level communication details (how data are packaged, routed and received by other processors). Within a super-step, the work is done in parallel but the global structure of the algorithm is sequential. This simple structure has proven to be worth in practice for many parallel applications (contains many references).
BSML is based on a structured model of parallelism, but is universal for this model: Any BSP algorithm could be written using BSML. Other structured parallelism approaches such as algorithmic skeletons first define a set of parallel patterns, the skeletons, and the model of parallelism is then derived from this set. When the set is not fit to a particular algorithm, it may be extended: This requires of course of lot of work for the library implementor and does not help the user of the library who has to choose among a bigger set of skeletons.
On the contrary, BSML offers a very small set of parallel primitives over a parallel data structure, called parallel vector: four functions to create and manipulate this structure as well as four constants to access the BSP parameters of the underlying architecture. By comparison, the standard BSPlib library for BSP programming in C offers about fifteen primitives, and MPI more than a hundred.
However one of the primitive used to describe communications in BSML is not very simple to use with the current non-communicating primitives. Moreover the non-nesting requirement (explained below) of parallel vectors introduces additional constraints that makes a lot of construction/deconstruction of parallel vectors necessary. Novice BSML programmers offer find these two aspects difficult to deal with.
We have thus make the choice to design a revised syntax for BSML. Even if this syntax is just syntax and do not modify the principles of BSML programming, it makes BSML programs simpler to read and write. In practice, we believe simple syntax may be as important as simple semantics.
This paper describes our new syntax and illustrate it with application examples. First, we describe the classic and revised BSML syntax and informal semantics in section ???. Section ?? is devoted to the implementation of bigger examples together with some timings. Section ?? presents related work. Future work and conclusion are discussed in Section ??.
Some basic knowledge of any ML programming language is assumed. We refer to [?, Manual, chapter 1] for an Objective Caml tutorial.
II. REVISED BULK SYNCHRONOUS PARALLEL ML
BSML is currently implemented as a library for the Objective Caml language [2]. The version used for this paper can be downloaded [3]. Before presenting the classic and revised syntax and informal semantics of BSML, let us present the bulk synchronous parallel model on which BSML is based.
The bulk synchronous parallel model [2], [7] offers an abstract model of parallel architecture, a model of parallel program execution together with a performance model. A BSP computer is a homogenous distributed memory machine with global synchronisation unit. Any general purpose parallel architecture can be seen as a BSP computer. A BSP program is executed as a sequence of super-steps. A super-step is composed of three successive and logically disjointed phases.
In the local computation phase, each processor used its local data to perform sequential computations. Each processor may only requests data transfers to or/and from other processors. In the communication phase the network delivers the requested data transfers. A synchronisation barrier involving all the processors of the BSP computer is the third phase and ends the super-step. It is only at the end of this third phase that the transferred data becomes available for the local computation phase of the next super-step.
The performance of a BSP computer is characterised by 3 parameters. The parameter $p$ is the number of processor-memory pairs. The communication and synchronisation performances are characterised by the parameter $L$ that is the time required for a global synchronisation, and the parameter $g$ that is the time for collectively delivering a 1-relation (communication phase where every processor receives or/and sends at most one word). The network can deliver a $h$-relation in time $g \times h$ for any natural $h$. In practice the BSP parameters can be determined using benchmarks (usually a fourth parameter $r$, the computing power of the processors, is first determined).
For a super-step, if at processor $i$, $w_i$ is the local sequential work performed during the computation phase, $h_i^+$ is the size of data sent from $i$ to other processors, and $h_i^-$ the size of the received data by processor $i$ from other processors, then the execution time (or cost) of the super-step is:
$$\max_{0 \leq i < bsp_p} w_i + \max_{0 \leq i < bsp_p} \max(h_i^+, h_i^-) \times g + L$$
The cost of a BSP program is the sum of the costs of its super-steps.
BSML is based on a data-type called parallel vector which, among all Objective Caml types, enables parallelism. A parallel vector has type `a par` and embeds $p$ values of any type `a` at each of the $p$ different processors in the parallel machine. The nesting of parallel vectors is not allowed. As BSML is currently implemented as a library, nested evaluation of an expression of type `(t par) par` for a given type `t` would lead to an unspecified behaviour. A type system could ensure that no such expression exist in the program [2]. However the current implementation of BSML as a library does not offer this specific type system. Only an implementation of BSML as a full language could offer such a feature. It is thus currently the responsibility of the programmer to avoid nesting of parallel vectors.
The number $p$ of processors is fixed throughout the execution of the program. It can be accessed in BSML using the integer constant $bsp_p$. The other BSP parameters are also accessible as float values through constants $bsp_g$, $bsp_l$ and $bsp_r$.
BSML comes, as Objective Caml, with three modes of compilation/evaluation:
- a byte-code compiler (a set of scripts calling the Objective Caml byte-code compiler with the appropriate BSML modules, in a similar way mpicc is not a full compiler; there are several scripts as there are several available implementations of BSML depending on the underlying low-level communication used: TCP, MPI or sequential),
- a native code compiler (also a set of scripts),
- an interactive loop also called top-level.
This interactive loop is a sequential simulator for BSML. Thanks to BSML semantical properties it is ensured that the parallel and the sequential implementations behave the same [2].
On starting, the BSML top-level is as follows:
```
BSPML version 0.5
The BSP Machine has 4 processors
• BSP parameters g = 20.3 flops/word
• BSP parameters L = 4571. flops
• BSP parameters r = 498952227. flops/s
```
The values of the BSP parameters are measured values of a quad-core i7 machine. The `#` symbol is the prompt that invites the user to enter an expression to be evaluated. The top-level then gives an answer of the form: Name of the defined value (possibly none, written “-”), type and pretty-printing of the value. In case the value cannot be pretty-printed (for example functions), an abstract representation is given (for example `<fun>`).
The $p$ processors are labelled with integers from 0 to $p - 1$ which we call `pid` (Processor Identifier) of the processors. We distinguish this structure from an usual sequential vector or array of size $p$ because the different values, that will be called `local`, are blind from each other: It is only possible to access the local value $x_i$ in two cases: Locally, on processor $i$ (by the use of a specific primitive), or after some communications.
These restrictions are inherent to distributed memory parallelism. This makes parallelism explicit and programs more readable. Since a BSML program deals with a whole parallel machine and individual processors at the same time, a distinction between the levels of execution that take place will be needed:
- **Replicated** execution is the default. Code that does not involve BSML primitives (nor, as a consequence, parallel vectors) is run by the parallel machine as it would be by a single processor. Replicated code is executed at the same time by every processor, and leads to the same result everywhere.
• **Local** execution is what happens inside parallel vectors, on each of their components: The processor uses its local data to do computation that may be different from the other’s.
• **Global** execution concerns the set of all processors together, but as a whole and not as a single processor. Typical example is the use of communication primitives.
The implementation of the classic BSML library is based on the primitives of Figure ?? where the following denotes a parallel vector: \( \langle x_0, x_1, \ldots, x_{p-1} \rangle : `a`par. This vector holds the value \( x_i \) at processor \( i \).
**mkpar** builds a parallel vector. The components of the obtained parallel vector are the results of the application of its argument function to the pid of every processor. Examples evaluated in the top-level follow (still using the quad-code its argument function to the pid of every processor. Examples:
```ocaml
let this : int Bsml.par = <0, 1, 2, 3>
let vec' 0) proj = parfun (replicate (fun x -> y))
val proj : 'a -> 'a Bsml.par = <fun>
# let total_exchange vec =
parfun
(fun f -> List.map f procs)
(put(apply (replicate(fun v dst -> v) vec)))
val total_exchange : 'a Bsml.par -> 'a list Bsml.par = <fun>
# total_exchange this
A simpler version of this function is given below in the new syntax and commented.
Having a very small core of parallel operations is a great strength for the formalisation of the language. It makes the definitions clear and short. However, the program, even if high-level, still has to deal with replicated values and parallel vectors, and the use of the primitives can sometimes become awkward. Indeed, every operation inside of parallel vectors has to call a primitive and define an "ad hoc" function. This gets worse when working with multiple vectors, with nested calls to **apply**. Simply transforming a pair of vectors into a vector of pairs is written:
```ocaml
let combine_vectors(v, w) = apply(parfun(fun v w -> v, v) w)
```
This could be made simpler with the definition of
```ocaml
let parfun2 f x y = apply (parfun f x) y
```
We get then:
```ocaml
let combine_vectors(v, w) = parfun2 (fun v w -> v, v) w
```
which is easier to read, but still unsatisfactory because we have to define, each time, a specific function. This implies creating named parameters although our function will only be applied to our vectors, and can be confusing:
```ocaml
let combine_vectors(v, w) = parfun2 (fun v w -> v, v) w
```
which is exactly the same as above but can lead the programmer to errors.
Instead of a point of view based on primitives, we can consider the execution levels such that one can declare code that will be executed globally as in standard Objective Caml and code that will be executed locally, inside a parallel vector. Then, to access local data in a local section, we need no more to define additional functions because opening vectors now can be done locally. A local section is represented by `≪ e≫`, as a parallel vector. Replicated information is available inside the vector, as with the **mkpar** above. To access local information, we add the syntax `$x$` to open the vector \( x \) and get the local value it contains; `$\$` can obviously be used only within local sections. It is now possible to write **combine_vectors** as follows:
```ocaml
let combine_vectors(v, w) = apply(parfun(fun v w -> v, v) w)
```
let combine_vectors (v, w) = ($v$, $w$) which is shorter, clearer and thus less error-prone. Additionally, the local pid can be accessed with $this$, to replace calls to `mkpar`. Synchronous primitives (proj and put) do not need a special syntax, but their use is already made more simple.
The total_exchange example could be rewritten:
let total_exchange vec =
let msg = put $fun$ dst $→$ $vec$ $msg$ $procs$ $in$
List.map $msg$ procs
It is much clearer now that at each processor the function given as argument to put is a function which returns the local value of parallel vector vec for every destination processor meaning that this local value will be sent to all processors. Using the received values is also clearer: At each processor the local function which encodes the messages received by the processor will be applied to every processor identifier thus yielding the list of all received messages.
Figure ?? gives a summary of the revised BSML syntax.
III. APPLICATIONS AND EXPERIMENTS
A. Heat Equation
We now illustrate how BSML could be used to implement a scientific application. The heat equation describes the variation in temperature over time in a given material. It can be used to simulate the evolution of the distribution of heat in a material. We will consider here the one dimensional heat equation:
$$\frac{\partial u}{\partial t} - \gamma \frac{\partial^2 u}{\partial x^2} = 0$$
where $u(x,t)$ gives the temperature in position $x$ at time $t$ and $\gamma$ is the thermal diffusivity of the material.
Using a discretization method on time and space using steps $dx$ and $dt$ we obtain the following equation:
$$u(x,t+dt) = \frac{\gamma dt}{dx^2} [u(x+dx,t)+u(x-dx,t)-2u(x,t)] + u(x,t)$$
There exist parallel implementations of 1D heat equation using algorithmic skeletons, for example using the SkeTo library for C++ [2].
In sequential, using only functions List.map and List.map2 and with the following functions:
### let rec remove_last l = match l with
### | [x] -> [] | x::xs -> x::(remove_last xs);
### val remove_last : 'a list -> 'a list
### and u_minus_dx = L.shift_left r_bound u
### let u1 = List.map2 (+.) u_plus_dx u_minus_dx
### let u2 = List.map2 (fun v1 v2 -> v1 -. 2.*v2) u1 u in
### let u3 = List.map (fun v -> gamma*dx*(dx*dx)+v) u2 in
### List.map2 ( +. ) u3 u
### val heat : float -> float -> float -> float -> float
### l_bound and r_bound are the bounding conditions, i.e. the temperature at both ends of the material (outside the material). These are constant float values. This program is quite similar to SkeTo heat equation example. Skeleton libraries could not easily be extended by the users to add new skeletons. On the contrary it is easy in Objective Caml to define the map3 function and to rewrite the heat function which becomes both easier to read and a bit more efficient:
### let rec map3 f 1 2 3 = match (f,1,2,3) with
### | [], [], [] -> []
### | [x1::xs1, x2::xs2, x3::xs3] ->
### | (f x1 x2 x3).(map3 f xs1 xs2 xs3);
### val map3 : ('a -> 'b -> 'c -> 'd) ->
### 'a list -> 'b list -> 'c list -> 'd list
### let heat2 gamma dx dt l_bound r_bound u =
### let u_plus_dx = shift_right l_bound u
### and u_minus_dx = shift_left r_bound u
### in
### map3 (fun u -> gamma*dx*(dx*dx)+.u) u_plus_dx u_minus_dx u
B. Parallel Heat Equation
Now if we want to develop a parallel version of this code, we can distribute $u$ which would have type float list par. In
this case, at each processor and at each step of the simulation we would apply the sequential version of the heat function on the local part of \( u \). Of course to be able to do so for the first and last elements of the local list \( u \), one should have the values of the last element of the local part of \( u \) held by the left neighbour processor and the first element of the local part of \( u \) held by the right neighbour processor. With a last function defined as:
\[
\text{let last } l = \text{List.hd(List.rev } l)\text{;;}
\]
\[
\text{val last : 'a list } \rightarrow 'a = <\text{fun}>
\]
we could implement the function that get the last values of the local neighbours as:
\[
\text{let get_l_bounds } l_bound u =
\]
\[
\text{let msg } = \text{put } <\text{fun} \text{ dst }\rightarrow
\text{if dst=($this$+1) && $this$<>(bsp_p−1)}
\text{then [last } u\text{] else [] } \text{ in}
\text{< if } $this$=0
\text{then l_bound else List.hd (msg$ ($this$−1)) } \text{;;}
\text{val get_l_bounds } : 'a list } \rightarrow 'a list Bsml.par } = <\text{fun}>
\]
For the vector of function needed to the \text{put} primitive, the case of the last processor is also specific when we retrieve the sent values: No left processor sent it a value thus it should use the \( l \_\text{bound} \) boundary value. An example of use follows where Tools.from_to n1 n2 builds an integer list from n1 to n2:
\[
\text{let vec } = \text{< Tools.from_to } (2+$this$)$ (2+$this$+1) } \text{;;}
\text{val vec : int list Bsml.par } = [0; 1; 2; 3; 4; 5; 6; 7];
\text{let get_l_bounds } : 10 vec
\]
The \( get_l \_\text{bounds} \) could be written in a similar way (and has the same type) using a \( get_l \_\text{bounds} \) function equals to \( get_l \_\text{bounds} \) function.
\\[
\text{let shift_left } l_bound u = \text{let len } = \text{Array.length } u \text{ in}
\text{Array.init len (fun } i \rightarrow \text{if } i=0 \text{ then first } u \text{ else } u.(i−1));;
\text{val shift_left : 'a list } \rightarrow 'a list Bsml.par } = <\text{fun}>
\\]
\[
\text{let shift_right } l_bound u = \text{let len } = \text{Array.length } u \text{ in}
\text{Array.init len (fun } i \rightarrow \text{if } i=len−1 \text{ then last } u \text{ else } u.(i+1));;
\text{val shift_right : 'a list } \rightarrow 'a list Bsml.par } = <\text{fun}>
\]
\\[
\text{let map3 } f a1 a2 a3 = \text{let len } = \text{Array.length } a1 \text{ in}
\text{Array.init len (fun } i \rightarrow f (a1.(i))(a2.(i))(a3.(i)));;
\text{val map3 : ('a } \rightarrow 'b } \rightarrow 'c \rightarrow 'd \rightarrow
\text{'a list } \rightarrow 'b list } \rightarrow 'c list } \rightarrow 'd list } = <\text{fun}>
\]
instead of the previous ones, the code of heat2 renamed heat3 would operate on arrays instead of lists. With the following definitions of last and first:
\[
\text{let first } a = a.(0);;
\text{val first : 'a list } \rightarrow 'a = <\text{fun}>
\]
\[
\text{let last } a = a.((\text{Array.length } a)−1);;
\text{val last : 'a list } \rightarrow 'a = <\text{fun}>
\]
the code of \( get \_\text{bounds} \) would operate on a parallel vector of arrays instead of a parallel vector of lists. Then the following function operates on a parallel vector of arrays:
\[
\text{let par_heat3 gamma } dx dt l_bound r_bound u =
\text{let l_bounds, r_bounds = get_bounds } l_bound r_bound u \text{ in}
\text{let heat3 gamma } dx dt \$lbounds\$ \$rbounds\$ \$u$\text{;;}
\text{val par_heat3 : float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow
\text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow
\text{float array } Bsml.par } \rightarrow \text{float array } Bsml.par } = <\text{fun}>
\]
From the definition of map3 we see that we could write a more efficient version of heat3 that does not requires the creation of intermediate arrays \( u \_\text{plus} \_dx \) and \( u \_\text{minus} \_dx \):
\[
\text{let heat4 gamma } dx dt l_bound r_bound u =
\text{let len } = \text{Array.length } u \text{ in}
\text{Array.init len (fun } i \rightarrow
\text{let updx } = \text{if } i=\text{len−1} \text{ then } r_bound else u.(i+1)
\text{and umdx } = \text{if } i=0 \text{ then } l_bound else u.(i−1)
\text{in gamma } \times dt/(dx \times dx) \times (updx+umdx−2.\times u.(i)) \text{. } u.(i));;
\text{val heat4 : float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow
\text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow
\text{float array } Bsml.par } \rightarrow \text{float array } Bsml.par } = <\text{fun}>
\]
Using this new heat4 leads to a new \text{par_heat4}. It is even possible to change the pure functional style of arrays to an imperative style. For this we need two arrays:
\[
\text{let heat5 gamma } dx dt l_bound r_bound u u' =
\text{let len } = \text{Array.length } u \text{ in}
\text{for } i=0 \text{ to len−1 do}
\text{u.(i) } \leftarrow
\text{let updx } = \text{if } i=\text{len−1} \text{ then } r_bound else u.(i+1)
\text{and umdx } = \text{if } i=0 \text{ then } l_bound else u.(i−1)
\text{in gamma } \times dt/(dx \times dx) \times (updx+umdx−2.\times u.(i)) \text{. } u.(i));;
\text{val heat5 : float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow
\text{float array } Bsml.par } \rightarrow \text{float array } Bsml.par } \rightarrow
\text{float array } Bsml.par } = <\text{fun}>
\]
and the parallel version becomes:
\[
\text{let par_heat5 gamma } dx dt l_bound r_bound u u' =
\text{let l_bounds, r_bounds = get_bounds } l_bound r_bound u \text{ in}
\text{let heat5 gamma } dx dt \$lbounds\$ \$rbounds\$ \$u$\$ u$\text{;;}
\text{val par_heat5 : float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow \text{float } \rightarrow
\text{float array } Bsml.par } \rightarrow \text{float array } Bsml.par } \rightarrow
\text{float array } Bsml.par } = <\text{fun}>
\]
When we iterate, we switch the two arrays at each step.
Experiments were performed on a cluster of 8 nodes connected by a giga-bit Ethernet network. Each node contains two processors (Quad-Core AMD Opteron Processor 2376, 2.29 GHz, 16Gb of RAM) and runs Linux Ubuntu with kernel
2.6.24-24. The BSML programs were compiled in native code with the MPI version of the communication module (OpenMPI 1.4.2 with gcc 4.3.2) and Objective Caml 3.11.1.
Arrays versions are much more efficient. The version par_heat2 with lists, on 16 processors for a list of size $10^6$ and 10 iterations, is about 17 times slower than the par_heat3 version. Figure ?? compares the versions with arrays on a global array of size $10^7$ with 100 iterations.
IV. RELATED WORK
As certain readers could point out, the notation $\ll e \gg$ introduced in this paper and which built a parallel vector, is close to constructions like “e1 par e2” in parallel Haskell [?] (if it is used $p-1$ times) or like creation of processes in Eden [?]. However the meaning of these constructions differ. Fine-grained parallelism introduced by GPH's par takes two arguments that are to be evaluated in parallel. The expression “e1 par e2” has the same value as “e2”. Its dynamic behaviour is to indicate that “e1” could be evaluated by a new parallel thread, with the parent thread continuing evaluation of “e2”. Threads are then distributed on the processors at run-time. Communications are implicit by the share of variables.
In our case, parallelism is explicit (as well as the communications and the distribution of data) and especially it is prohibited to nested parallelism to optimise performances of the implementation [?]. BSML is clearly a lower level programming language compared to algorithmic skeletons for example but it comes with a realistic cost model and is well adapt to the writing coarse-grain algorithms. Moreover, it can be used to implement higher-order parallel functions that could be used as algorithmic skeletons.
V. CONCLUSION AND FUTURE WORK
Parallel architectures are taking the lead in computer hardware. There are many research in advanced programming paradigms to find the best ways to accommodate parallelism. We present in this paper a new syntax for our high-level BSP language: BSML.
This new syntax reduces size of the code. Code is also simpler to be read which ease debugging and reasoning about performances. Our new syntax could be simulated by our past BSML primitives making past codes compatible and allowing the use of existing proofs developments about BSML in Coq [?].
Future work includes the development of applications with BSML, the integration of an exception handling mechanism (that already exists [?]) in the public release. In a longer term, we plan to work on an implementation of BSML as a full language rather than a library. We are also working on proving the correctness of the implementation of the Revised BSML syntax.
|
{"Source-Url": "http://lacl.univ-paris12.fr/gava/papers/pdaa_gava_loulergue.pdf", "len_cl100k_base": 6586, "olmocr-version": "0.1.49", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 24022, "total-output-tokens": 7077, "length": "2e12", "weborganizer": {"__label__adult": 0.0004012584686279297, "__label__art_design": 0.0003058910369873047, "__label__crime_law": 0.0003662109375, "__label__education_jobs": 0.000263214111328125, "__label__entertainment": 8.475780487060547e-05, "__label__fashion_beauty": 0.000156402587890625, "__label__finance_business": 0.00022542476654052737, "__label__food_dining": 0.0004458427429199219, "__label__games": 0.0006132125854492188, "__label__hardware": 0.0016565322875976562, "__label__health": 0.0005207061767578125, "__label__history": 0.00027489662170410156, "__label__home_hobbies": 0.00011014938354492188, "__label__industrial": 0.0006837844848632812, "__label__literature": 0.00023245811462402344, "__label__politics": 0.00030422210693359375, "__label__religion": 0.0005850791931152344, "__label__science_tech": 0.03778076171875, "__label__social_life": 7.915496826171875e-05, "__label__software": 0.00559234619140625, "__label__software_dev": 0.9482421875, "__label__sports_fitness": 0.0003910064697265625, "__label__transportation": 0.0006041526794433594, "__label__travel": 0.0002465248107910156}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 26585, 0.01343]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 26585, 0.59838]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 26585, 0.847]], "google_gemma-3-12b-it_contains_pii": [[0, 4827, false], [4827, 10791, null], [10791, 14189, null], [14189, 17627, null], [17627, 23918, null], [23918, 26585, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4827, true], [4827, 10791, null], [10791, 14189, null], [14189, 17627, null], [17627, 23918, null], [23918, 26585, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 26585, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 26585, null]], "pdf_page_numbers": [[0, 4827, 1], [4827, 10791, 2], [10791, 14189, 3], [14189, 17627, 4], [17627, 23918, 5], [23918, 26585, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 26585, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
0da89fc04795ce82c0beb6f84ac6e9439b08921d
|
Conference Paper
Improving and modelling the performance of a Publish-Subscribe message broker
Rafael Rocha
Cláudio Maia
Luis Lino Ferreira
CISTER-TR-190708
2019/10/14
Improving and modelling the performance of a Publish-Subscribe message broker
Rafael Rocha, Cláudio Maia, Luis Lino Ferreira
CISTER Research Centre
Polytechnic Institute of Porto (ISEP P.Porto)
Rua Dr. António Bernardino de Almeida, 431
4200-072 Porto
Portugal
Tel.: +351.22.8340509, Fax: +351.22.8321159
E-mail: rtdrh@isep.ipp.pt, clrrm@isep.ipp.pt, llf@isep.ipp.pt
https://www.cister-labs.pt
Abstract
The Event Handler is a publish-subscribe broker implemented over REST/HTTP(S) is an auxiliary system of the Arrowhead framework for Industrial IoT applications. However, during the course of this work we found that the existing implementation of the Event Handler suffers from serious performance issues. This paper describes the engineering process that ultimately enabled it to reach much more acceptable levels of performance, by using appropriate software configurations and design patterns. Additionally, we also illustrate how this enhanced version of the Event Handler can be modeled using Petri nets, to depict the performance impact of different thread pool configurations and CPU core availability. Where the main objective of this model is to enable the prediction of the system performance to guarantee the required quality of service.
Improving and modeling the performance of a Publish-Subscribe message broker
Rafael Rocha, Cláudio Maia, Luis Lino Ferreira
CISTER Research Center, ISEP Polytechnic Institute of Porto
Porto, Portugal
}\{rtdrh, crr, llf\}@isep.ipp.pt
Pal Varga
Dept. of Telecomm. and Media Informatics, Budapest University of Technology and Economics
Budapest, Hungary
pvarga@tmit.bme.hu
Abstract—The Event Handler – a publish-subscribe broker implemented over REST/HTTP(S) – is an auxiliary system of the Arrowhead framework for IoT applications. During this work we found that the existing implementation of the Event Handler suffers from serious performance issues. This paper describes the reengineering effort that ultimately enabled it to reach much more acceptable levels of performance, by using appropriate software configurations and design patterns. Additionally, we also illustrate how this enhanced version of the Event Handler can be modeled using Petri nets, to depict the performance impact of different thread pool configurations and CPU core availability. The main objective of this modeling process is to enable the estimation of the system’s performance to guarantee the required quality of service.
Keywords—Performance, Publish-Subscribe, HTTP, REST, SOA, Java, Petri Nets, Real-Time
I. INTRODUCTION
The Arrowhead Framework [1] aims at using a service-oriented approach (SOA) for IoT applications, by providing a set of services [1] that support the interaction between applications, such as services capable of providing sensor readings. One of the available Arrowhead systems, the Event Handler (EH), is used to propagate updates from a producer service to one or more consumer applications. In this sense, the EH serves as a REST/HTTP(S) implementation of a publish-subscribe message broker, handling the distribution of messages (i.e. events) from publishers to subscribers (as is portrayed on Fig. 1). For an Arrowhead publisher service to continuously notify its subscribers within its performance requirements, the EH’s performance is of extreme importance. There are two important performance parameters to take into account in a publish-subscribe setting: i) the end-to-end delay for a message to go from a producer to a consumer; and ii) the message throughput, i.e., the number of messages which can be sent per time unit and processed by the EH. These two performance parameters are evaluated in this work and then modeled using Petri nets, in order to characterize and estimate the EH’s performance on different hardware and network scenarios.
However, the existing implementation of the EH suffers from severe end-to-end message latency (leading up to a maximum of 4.9 seconds to deliver a message), mostly due to the wasteful creation of threads and HTTP connections, which also lead to unnecessarily high CPU and memory usage, particularly affecting resource-constrained host machines. While each of these issues has its own documented solutions, we were unable to find properly documented solutions for Java-based message brokers. Therefore, this paper attempts to provide a novel solution for this problem, by presenting the engineering effort that was necessary to significantly improve the EH’s end-to-end latency.
II. THE EVENT HANDLER
A. The Arrowhead Framework
The Arrowhead Framework is the result of a set of European projects in which SOA principles have been applied to IoT and industrial applications. As the main result of the Arrowhead project, the framework continued its development independently and is now being used in multiple industrial installations and further developed in other projects. It aims at enabling all systems to work in a common and unified approach – leading towards high levels of interoperability. The software framework includes a set of Core Services [1] (service discovery, orchestration and authentication) that support the interaction between Application Services.
The Arrowhead Framework builds upon the local cloud concept, where local automation tasks should be encapsulated and protected from outside interference. Application services are not able to communicate with services outside the local cloud (intra-cloud orchestration), except with other Arrowhead compliant local clouds (inter-cloud orchestration). Each local cloud must contain, at least, the three mandatory core systems: Service Registry, Authorization and Orchestration. Thus, enabling the communication between Arrowhead application services. These core systems are then accompanied by automation supporting services that further improve the core capabilities of a local cloud, from measuring quality of service to enabling message propagation between multiple systems. The Event Handler (EH) is one of these supporting systems.
B. The Event Handler (original version)
The (Arrowhead's) EH uses a REST-based architecture implemented on top of Grizzly [2] and Jersey [3]. Grizzly comprises: i) a core framework that facilitates the development of scalable event-driven applications using Java
Non-blocking I/O API, and ii) both client-side and server-side HTTP services. Jersey is a framework that facilitates the development of RESTful Web Services and its clients, by providing an implementation of the standard JAX-RS API (which is the standard specification for developing REST services in Java) and some extensions. The standard use of Jersey (which uses servlets as its underlying mechanism) will lead to the creation of a new thread for each request and then destroy the thread after its work is completed. Thus, RESTful services using standard Jersey will slow down when there are thousands of requests sent at the same time or at a very fast pace (later explored in section II-C3). In order to solve this problem, several implementations of web containers can provide a thread pool, which reuses previously created threads to execute current tasks and offers a solution to the problem of thread creation overhead and resource consumption. This in turn lowers the thread creation responsibility down a layer below Jersey and to the web container [4]. Grizzly is a popular implementation of these web containers.
However, the Grizzly HTTP server module in the EH does not currently have a configured thread pool. Thus, it will most likely not be able to efficiently handle multiple requests. Moreover, for the client applications that are meant to use the EH, i.e. the publishers and subscribers, the Arrowhead Consortia provides client skeletons to be extended with the developers’ own application code [5]. These client skeletons use the same Jersey/Grizzly setup and server configuration as the Arrowhead systems.
1) The testing environment
In order to evaluate the EH’s performance, we conducted a test on the system, with one Publisher sending 2000 events (sequentially, with no delay) to the EH, which connects to one Subscriber. Each request is 71 bytes long, on a 100 Mb/s Switched Ethernet LAN. While there is an emergence of wireless connectivity in industrial scenarios, it was important for us to test the EH in a wired environment, so we would have minor network latency. To measure the latency between Publisher, EH, and Subscriber, each time one of these components sends or receives an HTTP request, it outputs a message describing the action and the current timestamp. We deployed the EH and the Subscriber on Raspberry Pis. There are two main reasons to use this platform: i) when testing software in a resource-constrained platform, bottlenecks become more obvious and easier to identify; ii) Raspberry Pi hardware is heavily documented and its usage is widespread for industrial and IoT applications. The testing environment is displayed in Fig. 2, basically constituted by a publisher, a subscriber and the EH, with all clocks synchronized using a local NTP server, which provides accuracies in the range of 0.1 ms [6].
2) Performance evaluation
After sending 2000 events to the original EH, 41.9% of these events had an end-to-end latency greater than 100 ms, and 20.3% of these had a latency greater than 1 s, with an average of approximately 666.3 ms. Moreover, the maximum latency reaches the 4.9 s. This type of performance is a symptom of a bottleneck in the system. Consequently, the official implementation of the EH was revised.
C. Improving the Event Handler
A manual code review was performed on the Publisher, the EH, and the Subscriber. Two major problems were detected. The first problem was that none of the three components reused connections. This has a major performance impact on communications, since establishing a connection from one system to another is rather complex and consists of multiple packet exchanges between two endpoints (connection handshaking), which can cause major overhead, especially for small HTTP messages [7]. In fact, a much higher data throughput is achievable if open connections are re-used to execute multiple requests. This problem required a different solution for the three systems: a) the Publisher had to use a connection pool so that it could reuse its connections to the EH (see section II-C1); b) the EH had to use Jersey’s own Server-Sent Events mechanism to establish a persistent connection to each of its Subscribers (see section II-C2). The second problem consisted in the EH creating a new thread for every incoming request, which would then greatly impact the machine’s available RAM and response times. Thus, the EH required a thread pool to manage incoming requests in a less wasteful manner, as threads can be reused among different requests (see section II-C3).
1) Reuse open connections between the Publisher and the Event Handler
In order to reuse open connections between the Publisher and the EH, the best choice was to implement a connection pool on the Publisher, via the Apache HTTP Client on Jersey’s transport layer. On an Apache HTTP Client [7], the client maintains a maximum number of connections on a per endpoint basis (which can be configured), so a request for an endpoint for which the client already has a persistent connection available in the pool will be handled by reusing a connection from the pool rather than creating a brand-new connection.
On our setup, only one connection per route was set in order to maintain message order, since using multiple parallel connections might lead to the processing of messages out of order.
2) Establish a persistent connection between the Event Handler and each Subscriber
The EH also did not reuse previously created connections to its subscribers, consequently adding a large overhead on each message end-to-end delay, due to the establishment of a connection. Thus, to avoid creating a connection to each subscriber on every request, we used Jersey’s Server-Sent Events (SSE) [8] mechanism in the new implementation of the EH.
The SSE mechanism can be used to handle a one-way publish-subscribe model. When the Subscriber sends a
request to the EH, the EH holds a connection between itself and the Subscriber until a new event is published. When an event is published, the EH sends the event to the Subscriber, while keeping the connection open so that it can be reused for the next events. The Subscriber processes the events sent from the EH individually and asynchronously without closing the connection. Therefore, the EH can reuse one connection per Subscriber.
3) Reuse previously created threads in the Event Handler
As explained in section II-B, if the Grizzly HTTP server’s threadpool is not configured, Grizzly follows Jersey’s model of generating a new thread for each request, by default. In other words, with every wave of two thousand requests sent to the EH, Jersey will allocate 2000 server threads almost simultaneously and closes them soon afterwards [9]. Naturally, this leads to a great amount of overhead (thread creation and teardown and context switching between thousands of threads) and a large consumption of system memory (host OS must dedicate a memory block for each thread stack; with default settings, just four threads consume 1 Mb of memory [10]), which becomes largely inefficient.
The solution for this is to configure a thread pool on the Grizzly HTTP server module, which will reuse threads instead of destroying them. The key question is, what should be the optimal thread pool size for this scenario? While there is no clear-cut answer for this, it is usually suggested that if the HTTP request is CPU bound (as in this case), the amount of threads should be (at maximum) equal to the number of CPU cores in the host machine [11]. Otherwise, if the request is more I/O bound then more threads can successfully run in parallel. Therefore, the empirical process of identifying the optimal pool size consisted in starting with the same number of threads as the number of CPU cores and increasing them until there was no discernible improvement in throughput. Through this process, an interesting 10 ms average latency was achieved with a thread pool of 64 threads.
III. PERFORMANCE EVALUATION
After the major refactoring on the original EH, the enhanced version was put to the test on a similar environment and workload as the original. By subsequently repeating the same testing process, the test results were exceedingly better than the previous version’s (see Fig. 3), with an average end-to-end latency of approximately 8.95 ms and a maximum latency of 32.00 ms. This high variation is mostly due to the Java implementation and its garbage collection mechanisms.
After guaranteeing that the enhanced version was superior to the original one in the same test scenario, the performance of this new version was evaluated with two other test scenarios: 1) instead of 2000 events, the Publisher shall send 9000 events, in order to detect potential bottlenecks; 2) the same scenario as scenario 1, however, instead of using a single Subscriber, six different Subscribers were used. Test results showed a similar performance increase. For scenario 1, the average end-to-end latency was 8.98 ms, with a maximum latency of 52.00 ms. As for scenario 2, the average end-to-end latency was 10.68 ms, with a maximum latency of 45.67 ms, measured between all six subscribers. A histogram with the end-to-end latency distribution for these two scenarios is displayed in Fig. 4.
IV. MODELING THE EVENT HANDLER’S PERFORMANCE
In order to be able to estimate the performance of different applications supported by the EH system, it is necessary to take into account specific thread pool configurations, number of CPU cores and communication latencies and model it. Such a model was developed using Petri nets, which easily allows modeling systems that deal with concurrent activities [12, 13], such as communication networks, multiprocessor systems, and manufacturing systems.
To develop this Petri net model, we adapted Lu & Gokhale’s methodology [14] which has been previously used to model the performance of a Web server with a thread pool architecture. The resulting Petri net is displayed in Fig. 5. For the stochastic analysis of the model, we decided to use Oris...
assign_request_to_thread holds unprocessed requests, while the Active_Threads number of active threads (represented by the token sum in pool limit – only assigning requests to a thread if the total letter “e” next to the assign_request_to_thread transition represents the EH thread pool limit – only assigning requests to a thread if the total number of active threads (represented by the token sum in Active_Threads, Executing_Thread_CoreX, and Ready_to_Send places) has not exceeded the specified limit. In the Petri net, this condition is executed through an enabling function (i.e. a boolean expression) in the transition, hence the letter “e” next to the assign_request_to_thread transition. Once a request is assigned to a thread, the thread is executed by one of the CPU cores. The Executing_Thread_CoreX place (X should be replaced by the corresponding core) represents the thread’s execution, while the executing_CoreX transition represents the amount of time it takes to execute. An inhibitor arc (which is used to mandate that the transition must only fire when the place has no tokens) is used from Executing_Thread_CoreX to the respective IX transition to avoid the firing of transition IX when Executing_Thread_CoreX already has a token, therefore guaranteeing that only one request is being executed on a specific CPU core. Once the executing_CoreX transition finishes, it sends a token to Ready_to_Send, where the event is ready to be sent to its subscribers.
Several real experiments have been performed in order to fine tune the module with real data extracted from several test runs from where we derive the values for each request type (i.e., considering requests sent from Publisher to EH, requests sent from EH to each Subscriber), and the CPU execution time for each request, and determine their most appropriate probability distribution function to be applied in the Petri net model. We determined that the requests sent from the Publisher to the EH had a Gamma distribution with shape \(k\) = 13.235 and rate \(\lambda\) = 2.088. However, Oris only provides transitions with an Erlang distribution which is a particular case of the Gamma distribution, where \(k\) should be an integer value. Similarly, the requests sent from the EH to its Subscribers also had a Gamma distribution with \(k\) = 6.235 and \(\lambda\) = 2.683, where \(k\) was then rounded to 6, to likewise satisfy the Erlang distribution requirements. Finally, the CPU execution times in the EH (i.e. executing_CoreX) were decided to be represented as transitions with a uniform distribution, where the early finish time is 0 ms and the late finish time is 1 ms.
B. Stochastic analysis of the Petri net model
Oris provides a tool for transient analysis which consists in analyzing the probability of a process transitioning from one place to the other at a specific instant in time. Thus, the analysis creates a chart – in which the “time” variable is used as the X-axis and the “possible arrival state” variable (in other words, the place probability) is used as the Y-axis, where each time instant represents a probability distribution, which means that the sum of all values in each time instant must equal 1. This chart is displayed in Fig. 6.
1) Interpreting the analysis results
First, the only places that are present in the chart are Publisher, Ready_to_Send, Executing_Thread_CoreX and Subscriber_. The reason for this is because the other places (aside from Request_Queue) only depend on immediate transitions, thus the token will not spend any time in these places, meaning that these do not have an impact in the overall processing time. Although Request_Queue is linked to an immediate transition (i.e. assign_request_to_thread), this transition is restricted to the EH’s thread pool size, which (as explained previously) is represented by the token sum in the Active_Threads place, the Executing_Thread_CoreX places, and the Ready_to_Send place. Since only one token is sent in this particular analysis, Request_Queue will not be storing any tokens, thus it will not be present in this chart.
Until time 2.1 ms, the probability of a token being in Publisher is approximately 1, whereas the other places are
still 0, because the Publisher takes at least 2 ms to send an event to the EH. Between time 2 and 13.6 ms, the probability of the Publisher sending a message decreases nonlinearly to 0, while the exact opposite happens to the Subscriber, i.e., the probability that Subscriber_# has received the token rises nonlinearly to 1. In fact, at time 7.6 ms, the two curves cross each other, which means that, beyond this point, there is a higher probability of an event having reached the respective Subscriber, than it still being published by the Publisher. Furthermore, from 2.1 to 13.5 ms, the probability of the token being in Executing_Thread_CoreX has an almost Gaussian distribution, which means that once the message is sent from the Publisher, it is processed by the EH for a maximum of 1 second. After this process, the message is then ready to be sent. Indeed, from 2.5 to 17 ms, similar to Executing_Thread_CoreX, the probability of the token being in Ready_to_Send also has a Gaussian distribution, meaning that once the EH is ready to send the published event, the Publisher has already sent the message, and the Subscriber is about to receive it – hence the probability decrease in Publisher and the increase in Subscriber_# right after the probability peak in Ready_to_Send.
According to the analysis’s time estimations, the maximum time it takes to send an event (i.e. with a 99% chance) from the Publisher to the EH (i.e., when the probability for the Publisher place reaches approximately 0) is around 13.6 ms, while the estimated latest time for a Subscriber to receive an event (i.e., when the probability for the Subscriber_# places reaches approximately 1) is around 17.1 ms. Nevertheless, there is a 99% chance that Subscribers will receive the published event around 14.3 ms. Furthermore, the probability for the Ready_to_Send place to hold a token peaks (47%) at the 7.6 ms, which means that the EH is ready to send the published event to its subscribers at this instant, 47% of the times.
2) Comparing the model with the actual experiments
Overall, the values collected from the model match the results obtained in the experiments of the enhanced EH. In the Petri Net model, the probability distribution for Subscriber_# to receive the published message is only higher than Publisher, Executing_Thread_CoreX, and Ready_to_Send after the 8.5 ms instant. One can see that this value is matched with the experiment results for one Subscriber reported in Fig. 4, where it is possible to see that around 60% of the events were delivered with an 8 ms latency. Whereas for the six Subscribers tests, where the average end-to-end latency is approximately 10.68 ms, the corresponding probability distribution is 80.4%.
C. Validating the Petri net model
In addition to the initial stochastic analysis with one token, another stochastic analysis was performed with four tokens to examine how the model scales with the processing of multiple messages. In other words, each token is supposed to represent a message. The same transient analysis matrix was calculated, and the distribution of the estimated end-to-end latencies for four messages is depicted in Fig. 7, juxtaposed with the real test results from Fig. 4. Unfortunately, due to some processing limitations of the Oris tool, we were unable to assess the performance for more than four tokens. Nevertheless, this stochastic analysis with four tokens is able to capture the latency interval for most messages, i.e. from 8 to 16 ms, which mostly goes in hand with the event latency distributions of the test results. However, the authors feel that these latency estimations must still be further improved in order to fine tune the probability for each latency and also to capture a wider range of latencies, since the more extreme latencies (i.e. below 8 ms and above 17 ms) are not represented. In terms of improving these estimations, this could be done by: i) changing the probability distributions and the parameters chosen for each transition; or ii) changing the Petri net model itself.
V. CONCLUSIONS AND FUTURE WORK
By changing how the original EH and its clients handled HTTP requests and thread creation, the enhanced version of the EH is now able to achieve much higher levels of performance, evolving from an average latency of 666.3 ms to 8.95 ms. In fact, considering the average latency of both versions for the same test scenario, the EH had an overall performance boost of over 98%. Nonetheless, the system’s performance might still be able to improve even further than its current state by optimizing the EH’s thread pool size and the Publisher’s connection pool. However, the gains would
most likely be marginal. Moreover, we propose a Petri net model for the EH in order to estimate the overall end-to-end latency probability of each component (Publisher, EH, and Subscribers). Results show that the model provides a good estimation of results. However, it could still be further improved, either by changing the probability distributions and their parameters chosen for each transition or by editing the Petri net model itself. Nevertheless, these questions are expected to be the focus for future research work. The results and the model produced by this work can now be used by the Arrowhead QoS manager in order to be able to calculate/estimate the delays of Arrowhead services in different configurations.
ACKNOWLEDGMENTS
This work was partially supported by National Funds through FCT/MEC (Portuguese Foundation for Science and Technology) within the CISTER Research Unit (CEC/04234); also, by EU ECSEL JU under the H2020 Framework Programme, JU grant nr. 737459 (Productive4.0 project) and by the Portuguese National Innovation Agency (ANI) under the European Regional Development Fund (FEDER), through the “Portugal 2020” (PT2020) partnership, within the framework of the System of Incentives to Research and Technological Development (SII&DT) and the Operational Program for Competitiveness and Internationalization (POCI), within project FLEXIGY, nº 34067 (AAC nº 03/SII/2017).
REFERENCES
|
{"Source-Url": "https://cister.isep.ipp.pt/docs/improving_and_modelling_the_performance_of_a_publish_subscribe_message_broker/1558/attach.pdf", "len_cl100k_base": 5553, "olmocr-version": "0.1.49", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 23079, "total-output-tokens": 6903, "length": "2e12", "weborganizer": {"__label__adult": 0.0002925395965576172, "__label__art_design": 0.00029015541076660156, "__label__crime_law": 0.0002925395965576172, "__label__education_jobs": 0.0005903244018554688, "__label__entertainment": 7.045269012451172e-05, "__label__fashion_beauty": 0.00014257431030273438, "__label__finance_business": 0.0003070831298828125, "__label__food_dining": 0.00032782554626464844, "__label__games": 0.00035643577575683594, "__label__hardware": 0.001445770263671875, "__label__health": 0.00048470497131347656, "__label__history": 0.000244140625, "__label__home_hobbies": 8.893013000488281e-05, "__label__industrial": 0.0007290840148925781, "__label__literature": 0.0001882314682006836, "__label__politics": 0.0002225637435913086, "__label__religion": 0.00040268898010253906, "__label__science_tech": 0.06341552734375, "__label__social_life": 8.893013000488281e-05, "__label__software": 0.01158905029296875, "__label__software_dev": 0.91748046875, "__label__sports_fitness": 0.0002791881561279297, "__label__transportation": 0.0006504058837890625, "__label__travel": 0.00021004676818847656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 29595, 0.04101]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 29595, 0.25511]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 29595, 0.92167]], "google_gemma-3-12b-it_contains_pii": [[0, 172, false], [172, 1427, null], [1427, 6467, null], [6467, 12378, null], [12378, 16552, null], [16552, 20771, null], [20771, 25450, null], [25450, 29595, null]], "google_gemma-3-12b-it_is_public_document": [[0, 172, true], [172, 1427, null], [1427, 6467, null], [6467, 12378, null], [12378, 16552, null], [16552, 20771, null], [20771, 25450, null], [25450, 29595, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 29595, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 29595, null]], "pdf_page_numbers": [[0, 172, 1], [172, 1427, 2], [1427, 6467, 3], [6467, 12378, 4], [12378, 16552, 5], [16552, 20771, 6], [20771, 25450, 7], [25450, 29595, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 29595, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
488bd972c65273ec3f8e57485ede10a3b3ec9581
|
Information Extraction (IE)
Fadi Biadsy
CS4705
Oct 30, 2008
Information Extraction (IE) -- Task
• Idea: ‘extract’ or tag particular types of information from arbitrary text or transcribed speech
Named Entity Tagger
• Identify types and boundaries of named entity
• For example:
– Alexander Mackenzie, (January 28, 1822 - April 17, 1892), a building contractor and writer, was the second Prime Minister of Canada from ....
➔ <PERSON>Alexander Mackenzie</PERSON>, (<TIMEX>January 28, 1822 <TIMEX>- <TIMEX>April 17, 1892</TIMEX>), a building contractor and writer, was the second Prime Minister of <GPE>Canada</GPE> from ....
IE for Template Filling Relation Detection
• Given a set of documents and a domain of interest, fill a table of required fields.
• For example:
– Number of car accidents per vehicle type and number of casualty in the accidents.
<table>
<thead>
<tr>
<th>Vehicle Type</th>
<th># accidents</th>
<th># casualties</th>
<th>Weather</th>
</tr>
</thead>
<tbody>
<tr>
<td>SUV</td>
<td>1200</td>
<td>190</td>
<td>Rainy</td>
</tr>
<tr>
<td>Trucks</td>
<td>200</td>
<td>20</td>
<td>Sunny</td>
</tr>
</tbody>
</table>
IE for Question Answering
• Q: When was Gandhi born?
A: October 2, 1869
• Q: Where was Bill Clinton educated?
A: Georgetown University in Washington, D.C.
• Q: What was the education of Yassir Arafat?
A: Civil Engineering
• Q: What is the religion of Noam Chomsky?
A: Jewish
Approaches
1. Statistical Sequence Labeling
2. Supervised
3. Semi-Supervised and Bootstrapping
Approach for NER
• **<PERSON>Alexander Mackenzie</PERSON>**, (<TIMEX>January 28, 1822</TIMEX> - <TIMEX>April 17, 1892</TIMEX>), a building contractor and writer, was the second Prime Minister of **<GPE>Canada</GPE>** from ....
• **Statistical sequence-labeling** techniques approach can be used – similar to POS tagging.
– Word-by-word sequence labeling
– Example of Features:
• POS tags
• Syntactic constituents
• Shape features
• Presence in a named entity list
Supervised Approach for relation detection
• Given a corpus of annotated relations between entities, train two classifiers:
1. A binary classifier:
• Given a span of text and two entities
• Decide if there is a relationship between these two entities.
2. A classifier is trained to determine the types of relations exist between the entities
• Features:
– Types of two named entities
– Bag-of-words
– ...
• Example:
– A rented SUV went out of control on Sunday, causing the death of seven people in Brooklyn
– Relation: Type=Accident, Vehicle Type= SUV, causality = 7, weather = ?
• Pros and Cons?
Pattern Matching for Relation Detection
• **Patterns:**
- “[CAR_TYPE] went out of control on [TIMEX], causing the death of [NUM] people”
- “[PERSON] was born in [GPE]”
- “[PERSON] was graduated from [FAC]”
- “[PERSON] was killed by <X>”
• **Matching Techniques**
- **Exact matching**
• Pros and Cons?
- **Flexible matching** (e.g., [X] was .* killed .* by [Y])
• Pros and Cons?
Pattern Matching
• How can we come up with these patterns?
• Manually?
– Task and domain specific -- tedious, time consuming, and not scalable.
Semi-Supervised Approach
AutoSlog-TS (Riloff, 1996)
- **MUC-4 task**: extract information about terrorist events in Latin America.
- Two corpora:
1) Domain-dependent corpus that contains relevant information
2) A set of irrelevant documents
- Algorithm:
1. Using some heuristic rules, all patterns are extracted from both corpora. For example:
**Rule**: `<Subj>` passive-verb
- `<Subj>` was murdered
- `<Subj>` was called
2. Pattern Ranking: The output patterns are then ranked by frequency of their occurrences in corpus1 / corpus2.
3. Filter out the patterns by hand
Bootstrapping
Born in Connecticut on July 8, 1946, George was born in Y on Z, X was born in Y.
< George W. Bush, Connecticut>
George W. Bush was born in Connecticut
X was born in Y
Seed Patterns
Tuple Set
Tuple Search
Pattern Extraction
Pattern Set
Tuple Extraction
Seed Tuples
TASK 12: (DARPA – GALE year 2)
PRODUCE A BIOGRAPHY OF [PERSON].
1. Name(s), aliases:
2. *Date of Birth or Current Age:
3. *Date of Death:
4. *Place of Birth:
5. *Place of Death:
6. Cause of Death:
7. Religion (Affiliations):
8. Known locations and dates:
9. Last known address:
10. Previous domiciles:
11. Ethnic or tribal affiliations:
12. Immediate family members
13. Native Language spoken:
14. Secondary Languages spoken:
15. Physical Characteristics
16. Passport number and country of issue:
17. Professional positions:
18. Education
19. Party or other organization affiliations:
20. Publications (titles and dates):
Biography – two approaches
• To obtain high precision, we handle each slot independently using bootstrapping to learn IE patterns.
• To improve the recall, we utilize a biographical-sentence classifier.
Biography patterns from Wikipedia
Martin Luther King, Jr., **January 15, 1929** – April 4, 1968) was the most famous leader of the American civil rights movement, a political activist, a Baptist minister, and was one of America's greatest orators. In 1964, King became the youngest man to be awarded the Nobel Peace Prize (for his work as a peacemaker, promoting nonviolence and equal treatment for different races). On April 4, 1968, King was assassinated in Memphis, Tennessee.
In 1977, he was posthumously awarded the Presidential Medal of Freedom by Jimmy Carter. In 1986, Martin Luther King Day was established as a United States holiday. In 2004, King was posthumously awarded the Congressional Gold Medal.[1] King often called for personal responsibility in fostering world peace.[2] King's most influential and well-known public address is the "I Have A Dream" speech, delivered on the steps of the Lincoln Memorial in Washington, D.C. in 1963.
### Early life
Martin Luther King, Jr., was born on **January 15, 1929**, in Atlanta, Georgia. He was the second child of the Reverend Martin Luther King, Sr. and Alberta Williams King. Between his sister, Willie Christine (September 11, 1927) and younger brother, Albert Daniel (nicknamed A.D., July 30, 1930 – July 21, 1969) According to his father, the attending physician mistakenly entered "Michael" on Martin Jr.'s birth certificate.[3] King sang with his church choir at the 1939 Atlanta premiere of the movie **Go West**.
Martin Luther King, Jr., (January 15, 1929 – April 4, 1968) was the most famous leader of the American civil rights movement, a political activist, a Baptist minister, and was one of America’s greatest orators. In 1964, King became the youngest man to be awarded the Nobel Peace Prize for his work as a peacemaker, promoting nonviolence and equal treatment for different races. On April 4, 1968, King was assassinated in Memphis, Tennessee.
In 1977, he was posthumously awarded the Presidential Medal of Freedom by Jimmy Carter. In 1968, Martin Luther King Jr. Day was established as a United States holiday. In 2004, King was posthumously awarded the Congressional Gold Medal. King often called for personal responsibility in fostering world peace. King’s most influential and well-known public address is the “I Have A Dream” speech, delivered on the steps of the Lincoln Memorial in Washington, D.C. in 1963.
- Martin Luther King, Jr., (January 15, 1929 – April 4, 1968) was the most ...
- Martin Luther King, Jr., was born on January 15, 1929, in Atlanta, Georgia.
Run NER on these sentences
• <Person> Martin Luther King, Jr. </Person>, (<Date>January 15, 1929</Date> – <Date> April 4, 1968</Date>) was the most...
• <Person> Martin Luther King, Jr. </Person>, was born on <Date> January 15, 1929 </Date>, in <GPE> Atlanta, Georgia </GPE>.
• Take the token sequence that includes the tags of interest + some context (2 tokens before and 2 tokens after)
Convert to Patterns:
- \(<\text{Target\_Person}\> (\,<\text{Target\_Date}\> – \,<\text{Date}\>)\) was the
- \(<\text{Target\_Person}\>, \text{was born on } \,<\text{Target\_Date}\>, \text{in}
- Remove more specific patterns – if there is a pattern that contains other, take the smallest > \(k\) tokens.
- \(\Rightarrow\) \(<\text{Target\_Person}\>, \text{was born on } \,<\text{Target\_Date}\>
- \(\Rightarrow\) \(<\text{Target\_Person}\> (\,<\text{Target\_Date}\> – \,<\text{Date}\>)
- Finally, verify the patterns manually to remove irrelevant patterns.
Examples of Patterns:
• 502 distinct place-of-birth patterns:
– 600 <Target_Person> was born in <Target_GPE>
– 169 <Target_Person> ( born <Date> in <Target_GPE> )
– 44 Born in <Target_GPE>, <Target_Person>
– 10 <Target_Person> was a native <Target_GPE>
– 10 <Target_Person> 's hometown of <Target_GPE>
– 1 <Target_Person> was baptized in <Target_GPE>
– ...
• 291 distinct date-of-death patterns:
– 770 <Target_Person> ( <Date> - <Target_Date> )
– 92 <Target_Person> died on <Target_Date>
– 19 <Target_Person> <Date> - <Target_Date>
– 16 <Target_Person> died in <GPE> on <Target_Date>
– 3 < Target_Person> passed away on < Target_Date >
– 1 < Target_Person> committed suicide on <Target_Date>
– ...
Biography as an IE task
• This approach is good for the consistently annotated fields in Wikipedia: place of birth, date of birth, place of death, date of death
• Not all fields of interests are annotated, a different approach is needed to cover the rest of the slots
Bouncing between Wikipedia and Google
- Use **one** seed tuple **only**:
- <Target Person> and <Target field>
- Google: “Arafat” “civil engineering”, we get:
Yasser Arafat
By 1956, Arafat graduated with a bachelor's degree in civil engineering and served as a second lieutenant in the Egyptian Army during the Suez Crisis.
www.jewishvirtuallibrary.org/jsource/biography/arafat.html - 81k - Cached - Similar pages - Note this
Yasser Arafat: Biography and Much More from Answers.com
In the 1950s, Arafat studied at Fu'ad I University in Cairo (now Cairo University), majoring in civil engineering. He was reportedly a member of the Muslim...
www.answers.com/topic/yasser-arafat - 89k - Cached - Similar pages - Note this
Engology.com, Engineer Yasser Arafat, Nobel Peace Prize Winner ...
After the war, Arafat studied civil engineering at the University of Cairo. He headed the Palestinian Students League and, by the time he graduated, ...
www.engology.com/engpolsevasserarafat.htm - 7k - Cached - Similar pages - Note this
Yasser Arafat and the Palestine Liberation Organization
It was there that Yasser Arafat, a Civil Engineering student, and his coterie, including Salah Khalaf (Abu Iyad), later to become Arafat's second in command ...
www.palestinefacts.org/pf_1948to1967_pic_arafat.php - 14k - Cached - Similar pages - Note this
A Life in Retrospect: Yasser Arafat | TIME
Here's one thing we know for sure: Yasser Arafat was a grand ... at King Fuad I University (now Cairo University), where he studied civil engineering,...
www.time.com/time/world/article0,8599,781586-1,00.html - 39k - Cached - Similar pages - Note this
Yassir Arafat's Biography
Yasser Arafat was born in 1929 in Jerusalem. His full name is: Mohammed Abad Arouf Arafat. He studied civil engineering at Cairo University,...
www.aretzisrael.org/~jkatz/arafatbio.html - 72k - Cached - Similar pages - Note this
Biographical and other information on Yasser Arafat who is in bad ...
In 1951, at the age of 21, Arafat got military training with the Egyptian army. — In 1956, Arafat earned a degree in civil engineering at the University of...
www.freemuslims.org/news/article.php?article=198 - 14k - Cached - Similar pages - Note this
Bouncing between Wikipedia and Google
• Use one seed tuple only:
• Google: “Arafat” “civil engineering”, we get:
⇒ Arafat graduated with a bachelor’s degree in civil engineering
⇒ Arafat studied civil engineering
⇒ Arafat, a civil engineering student
⇒ ...
• Using these snippets, corresponding patterns are created, then filtered out.
Bouncing between Wikipedia and Google
• Use one seed tuple only:
• Google: “Arafat” “civil engineering”, we get:
⇒ Arafat *graduated with a bachelor’s degree* in civil engineering
⇒ Arafat *studied civil engineering*
⇒ Arafat, a civil engineering *student*
⇒ ...
• Using these snippets, corresponding patterns are created, then filtered out manually
• Due to time limitation the automatic filter was not completed.
– To get more seed tuples, go to Wikipedia biography pages only and search for:
– “*graduated with a bachelor’s degree in***
– We get:
Burnie Thompson - Wikipedia, the free encyclopedia
In 2000, he graduated with a bachelor's degree in political science from California State University, Fullerton. Two years later he graduated from The ...
en.wikipedia.org/wiki/Burnie_Thompson - 19k - Cached - Similar pages - Note this
Roscoe Lee Browne - Wikipedia, the free encyclopedia
Born in Woodbury, New Jersey, Browne first attended historically black Lincoln University in Pennsylvania, and graduated with a bachelor's degree in 1946. ...
en.wikipedia.org/wiki/Roscoe_Lee_Browne - 38k - Cached - Similar pages - Note this
Henry Luke Orombi - Wikipedia, the free encyclopedia
Robert has graduated with a Bachelor's Degree in Environment Studies from Makerere University and Daniel, a gifted musician like his father, is working on ...
en.wikipedia.org/wiki/Henry_Luke_Orombi - 25k - Cached - Similar pages - Note this
Gustave Eiffel - Wikipedia, the free encyclopedia
Eiffel's study habits improved and he graduated with a bachelor's degree in both science and humanities. Eiffel went on to attend college at Sainte Barbe ...
en.wikipedia.org/wiki/Gustave_Eiffel - 52k - Cached - Similar pages - Note this
Erin Crocker - Wikipedia, the free encyclopedia
... New York, where she graduated with a bachelor's degree in industrial and management engineering in 2003. In 2002, Crocker signed with Woodring Racing to ...
en.wikipedia.org/wiki/Erin_Crocker - 30k - Cached - Similar pages - Note this
Jim Boeheim - Wikipedia, the free encyclopedia
Boeheim enrolled in Syracuse University as a student in 1963 and graduated with a bachelor's degree in social science in 1969(SU Athletics). ...
en.wikipedia.org/wiki/Jim_Boeheim - 30k - Cached - Similar pages - Note this
Denise Bode - Wikipedia, the free encyclopedia
She graduated with a bachelor's degree in political science from the University of Oklahoma where she chaired the University of Oklahoma Student Congress. ...
Bouncing between Wikipedia and Google
• **New seed tuples:**
– “Burnie Thompson” “political science”
– “Erin Crocker” “industrial and management engineering”
– “Denise Bode” “political science”
– …
• Go back to Google and repeat the process to get more seed patterns!
Bouncing between Wikipedia and Google
• This approach worked well for a few fields such as: education, publication, Immediate family members, and Party or other organization affiliations
• Did not provide good patterns for some of the fields, such as: Religion, Ethnic or tribal affiliations, and Previous domiciles), we got a lot of noise
• Why the bouncing idea is better than using only one corpus?
• Non of the patterns match? Back-off strategy...
Biographical-Sentence Classifier
(Biadsy, et al., 2008)
• Train a binary classifier to identify biographical sentences
• Manually annotating a large corpus of biographical and non-biographical information (e.g., Zhou et al., 2004) is labor intensive
• Our approach: collect biographical and non-biographical corpora automatically
Training Data – Biographical Corpus from Wikipedia
- Utilize Wikipedia biographies
- Extract 17K biographies from the xml version of Wikipedia
- Apply simple text processing techniques to clean up the text
Constructing the Biographical Corpus
1. Identify the subject of each biography
2. Run NYU's ACE system to tag NEs and do coreference resolution (Grishman et al., 2005)
Constructing the Biographical Corpus
3. Replace each **NE** by its tag type and subtype
In *September 1951*, King began his doctoral studies in theology at Boston University.
In [TIMEX], [PER_Individual] began [TARGET_HIS] doctoral studies in theology at [ORG_Educational].
3. Replace each NE by its tag type and subtype
4. Non-pronominal referring expression that is coreferential with the target person is replaced by [TARGET_PER]
In September 1951, King began his doctoral studies in theology at Boston University.
In [TIMEX], [TARGET_PER] began [TARGET_HIS] doctoral studies in theology at [ORG_Educational].
Constructing the Biographical Corpus
3. Replace each NE by its tag type and subtype
4. Non-pronominal referring expression that is coreferential with the target person is replaced by [TARGET_PER]
5. Every pronoun $P$ that refers to the target person is replaced by [TARGET_$P$], where $P$ is the pronoun replaced
In September 1951, King began his doctoral studies in theology at Boston University.
In [TIMEX], [TARGET_PER] began [TARGET_HIS] doctoral studies in theology at [ORG_Educational].
Constructing the Biographical Corpus
3. Replace each NE by its tag type and subtype
4. Non-pronominal referring expressions that are coreferential with the target person are replaced by [TARGET_PER]
5. Every pronoun $P$ that refers to the target person is replaced by [TARGET_$P$], where $P$ is the pronoun replaced
6. Sentences containing no reference to the target person are removed
In September 1951, King began his doctoral studies in theology at Boston University.
In [TIMEX], [TARGET_PER] began [TARGET_HIS] doctoral studies in theology at [ORG_Educational].
Constructing the Non-Biographical Corpus
- English newswire articles in TDT4 used to represent non-biographical sentences
1. Run NYU’s ACE system on each article
2. Select a PERSON NE mention at random from all NEs in article to represent the target person
3. Exclude sentences with no reference to this target
4. Replace referring expressions and NEs as in biography corpus
Biographical-Sentence Classifier
• Train a classifier on the biographical and non-biographical corpora
– Biographical corpus:
• 30,002 sentences from Wikipedia
• 2,108 sentences held out for testing
– Non-Biographical corpus:
• 23,424 sentences from TDT4
• 2,108 sentences held out for testing
Biographical-Sentence Classifier
• Features:
– Frequency of 1-2-3 grams of class-based/lexical, e.g.:
• [TARGET_PER] was born
• [TARGET_HER] husband was
• [TARGET_PER] said
– Frequency of 1-2 grams of POS
• Chi-square for feature selection
Classification Results
- Experimented with three types of classifiers:
<table>
<thead>
<tr>
<th>Classifier</th>
<th>Accuracy</th>
<th>F-Measure</th>
</tr>
</thead>
<tbody>
<tr>
<td>SVM</td>
<td>87.6%</td>
<td>0.87</td>
</tr>
<tr>
<td>M. Naïve Bayes (MNB)</td>
<td>84.1%</td>
<td>0.84</td>
</tr>
<tr>
<td>C4.5</td>
<td>81.8%</td>
<td>0.82</td>
</tr>
</tbody>
</table>
- Note: Classifiers provide a confidence score for each classified sample
Thank you
|
{"Source-Url": "http://www1.cs.columbia.edu/~julia/courses/CS4705/kathy/Slides09/Class17-IE/IE.pdf", "len_cl100k_base": 4930, "olmocr-version": "0.1.53", "pdf-total-pages": 39, "total-fallback-pages": 0, "total-input-tokens": 56856, "total-output-tokens": 6576, "length": "2e12", "weborganizer": {"__label__adult": 0.0006718635559082031, "__label__art_design": 0.0018177032470703125, "__label__crime_law": 0.00182342529296875, "__label__education_jobs": 0.1549072265625, "__label__entertainment": 0.0005688667297363281, "__label__fashion_beauty": 0.0005488395690917969, "__label__finance_business": 0.0014657974243164062, "__label__food_dining": 0.0006475448608398438, "__label__games": 0.0019435882568359375, "__label__hardware": 0.0012111663818359375, "__label__health": 0.0006766319274902344, "__label__history": 0.00269317626953125, "__label__home_hobbies": 0.0004835128784179687, "__label__industrial": 0.0008697509765625, "__label__literature": 0.0068817138671875, "__label__politics": 0.0023555755615234375, "__label__religion": 0.0012159347534179688, "__label__science_tech": 0.1513671875, "__label__social_life": 0.001605987548828125, "__label__software": 0.2060546875, "__label__software_dev": 0.458984375, "__label__sports_fitness": 0.0003390312194824219, "__label__transportation": 0.0004279613494873047, "__label__travel": 0.00043392181396484375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 19016, 0.02491]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 19016, 0.77743]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 19016, 0.89604]], "google_gemma-3-12b-it_contains_pii": [[0, 61, false], [61, 197, null], [197, 633, null], [633, 1089, null], [1089, 1376, null], [1376, 1472, null], [1472, 1958, null], [1958, 2584, null], [2584, 2984, null], [2984, 3131, null], [3131, 3738, null], [3738, 4027, null], [4027, 4650, null], [4650, 4855, null], [4855, 6342, null], [6342, 7413, null], [7413, 7805, null], [7805, 8366, null], [8366, 9094, null], [9094, 9364, null], [9364, 9529, null], [9529, 11583, null], [11583, 11940, null], [11940, 12518, null], [12518, 14451, null], [14451, 14769, null], [14769, 15225, null], [15225, 15558, null], [15558, 15765, null], [15765, 15935, null], [15935, 16212, null], [16212, 16554, null], [16554, 17052, null], [17052, 17624, null], [17624, 18004, null], [18004, 18321, null], [18321, 18579, null], [18579, 19007, null], [19007, 19016, null]], "google_gemma-3-12b-it_is_public_document": [[0, 61, true], [61, 197, null], [197, 633, null], [633, 1089, null], [1089, 1376, null], [1376, 1472, null], [1472, 1958, null], [1958, 2584, null], [2584, 2984, null], [2984, 3131, null], [3131, 3738, null], [3738, 4027, null], [4027, 4650, null], [4650, 4855, null], [4855, 6342, null], [6342, 7413, null], [7413, 7805, null], [7805, 8366, null], [8366, 9094, null], [9094, 9364, null], [9364, 9529, null], [9529, 11583, null], [11583, 11940, null], [11940, 12518, null], [12518, 14451, null], [14451, 14769, null], [14769, 15225, null], [15225, 15558, null], [15558, 15765, null], [15765, 15935, null], [15935, 16212, null], [16212, 16554, null], [16554, 17052, null], [17052, 17624, null], [17624, 18004, null], [18004, 18321, null], [18321, 18579, null], [18579, 19007, null], [19007, 19016, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 19016, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 19016, null]], "pdf_page_numbers": [[0, 61, 1], [61, 197, 2], [197, 633, 3], [633, 1089, 4], [1089, 1376, 5], [1376, 1472, 6], [1472, 1958, 7], [1958, 2584, 8], [2584, 2984, 9], [2984, 3131, 10], [3131, 3738, 11], [3738, 4027, 12], [4027, 4650, 13], [4650, 4855, 14], [4855, 6342, 15], [6342, 7413, 16], [7413, 7805, 17], [7805, 8366, 18], [8366, 9094, 19], [9094, 9364, 20], [9364, 9529, 21], [9529, 11583, 22], [11583, 11940, 23], [11940, 12518, 24], [12518, 14451, 25], [14451, 14769, 26], [14769, 15225, 27], [15225, 15558, 28], [15558, 15765, 29], [15765, 15935, 30], [15935, 16212, 31], [16212, 16554, 32], [16554, 17052, 33], [17052, 17624, 34], [17624, 18004, 35], [18004, 18321, 36], [18321, 18579, 37], [18579, 19007, 38], [19007, 19016, 39]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 19016, 0.0298]]}
|
olmocr_science_pdfs
|
2024-12-04
|
2024-12-04
|
c9904d2c33a690fb402b52767ccadf7900824a3f
|
[REMOVED]
|
{"Source-Url": "https://hal.archives-ouvertes.fr/file/index/docid/432167/filename/gaaloul_i3e.pdf", "len_cl100k_base": 6551, "olmocr-version": "0.1.53", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 35461, "total-output-tokens": 8601, "length": "2e12", "weborganizer": {"__label__adult": 0.0004625320434570313, "__label__art_design": 0.0007042884826660156, "__label__crime_law": 0.004894256591796875, "__label__education_jobs": 0.003679275512695313, "__label__entertainment": 0.00016570091247558594, "__label__fashion_beauty": 0.00028443336486816406, "__label__finance_business": 0.004337310791015625, "__label__food_dining": 0.0004413127899169922, "__label__games": 0.0006890296936035156, "__label__hardware": 0.0014734268188476562, "__label__health": 0.00104522705078125, "__label__history": 0.0005841255187988281, "__label__home_hobbies": 0.00016629695892333984, "__label__industrial": 0.0010442733764648438, "__label__literature": 0.0003955364227294922, "__label__politics": 0.001750946044921875, "__label__religion": 0.0004014968872070313, "__label__science_tech": 0.254150390625, "__label__social_life": 0.00031948089599609375, "__label__software": 0.14208984375, "__label__software_dev": 0.57958984375, "__label__sports_fitness": 0.00024390220642089844, "__label__transportation": 0.0006327629089355469, "__label__travel": 0.0002493858337402344}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 35444, 0.02736]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 35444, 0.27484]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 35444, 0.87402]], "google_gemma-3-12b-it_contains_pii": [[0, 976, false], [976, 3455, null], [3455, 6495, null], [6495, 8830, null], [8830, 9646, null], [9646, 12269, null], [12269, 15121, null], [15121, 17874, null], [17874, 19917, null], [19917, 21471, null], [21471, 24218, null], [24218, 25126, null], [25126, 27691, null], [27691, 30615, null], [30615, 33834, null], [33834, 35444, null]], "google_gemma-3-12b-it_is_public_document": [[0, 976, true], [976, 3455, null], [3455, 6495, null], [6495, 8830, null], [8830, 9646, null], [9646, 12269, null], [12269, 15121, null], [15121, 17874, null], [17874, 19917, null], [19917, 21471, null], [21471, 24218, null], [24218, 25126, null], [25126, 27691, null], [27691, 30615, null], [30615, 33834, null], [33834, 35444, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 35444, null]], "pdf_page_numbers": [[0, 976, 1], [976, 3455, 2], [3455, 6495, 3], [6495, 8830, 4], [8830, 9646, 5], [9646, 12269, 6], [12269, 15121, 7], [15121, 17874, 8], [17874, 19917, 9], [19917, 21471, 10], [21471, 24218, 11], [24218, 25126, 12], [25126, 27691, 13], [27691, 30615, 14], [30615, 33834, 15], [33834, 35444, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 35444, 0.07692]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
6b2fbf8f88b764a6232f7f0124449c0936b55913
|
Abstract
The FIDO family of protocols introduce a new security concept, Application Facets, to describe the scope of user credentials and how a trusted computing base which supports application isolation may make access control decisions about which keys can be used by which applications and web origins.
This document describes the motivations for and requirements for implementing the Application Facet concept and how it applies to the FIDO protocols.
Table of Contents
1. Notation
1.1 Key Words
2. Overview
2.1 Motivation
2.2 Avoiding App-Phishing
2.3 Comparison to OAuth and OAuth2
2.4 Non-Goals
3. The AppID and FacetID Assertions
3.1 Processing Rules for AppID and FacetID Assertions
3.1.1 Determining the FacetID of a Calling Application
3.1.2 Determining if a Caller's FacetID is Authorized for an AppID
3.1.3 TrustedFacet List and Structure
3.1.3.1 Dictionary TrustedFacetList Members
3.1.3.2 Dictionary TrustedFacets Members
3.1.4 AppID Example 1
1. Notation
Type names, attribute names and element names are written as code.
String literals are enclosed in "", e.g. "UAF-TLV".
In formulas we use "I" to denote byte wise concatenation operations.
This document applies to both the U2F protocol and the UAF protocol. UAF specific terminology used in this document is defined in [FIDOGlossary].
All diagrams, examples, notes in this specification are non-normative.
1.1 Key Words
The key words "must", "must not", "required", "shall", "shall not", "should", "should not", "recommended", "may", and "optional" in this document are to be interpreted as described in [RFC2119].
2. Overview
This section is non-normative.
Modern networked applications typically present several ways that a user can interact with them. This document introduces the concept of an Application Facet to describe the identities of a single logical application across various platforms. For example, the application MyBank may have an Android app, an iOS app, and a Web app accessible from a browser. These are all facets of the MyBank application.
The FIDO architecture provides for simpler and stronger authentication than traditional username and password approaches while avoiding many of the shortfalls of alternative authentication schemes. The core of the FIDO protocols are challenge and response operations performed with a public/private keypair that serves as a user's credential.
To minimize frequently-encountered issues around privacy, entanglements with concepts of "identity", and the necessity for trusted third parties, keys in FIDO are tightly scoped and dynamically provisioned between the user and each Relying Party and only optionally associated with a server-assigned username. This approach contrasts with, for example, traditional PKIX client certificates as used in TLS, which introduce a trusted third party, mix in their implementation details identity assertions with holder-of-key cryptographic proofs, lack audience restrictions, and may even be sent in the cleartext portion of a protocol handshake without the user's notification or consent.
While the FIDO approach is preferable for many reasons, it introduces several challenges.
- What set of Web origins and native applications (facets) make up a single logical application and how can they be reliably identified?
- How can we avoid making the user register a new key for each web browser or application on their device that accesses services controlled by the same target entity?
- How can access to registered keys be shared without violating the security guarantees around application isolation and protection from malicious code that users expect on their devices?
- How can a user roam credentials between multiple devices, each with a user-friendly Trusted Computing Base for FIDO?
This document describes how FIDO addresses these goals (where adequate platform mechanisms exist for enforcement) by allowing an application to declare a credential scope that crosses all the various facets it presents to the user.
2.1 Motivation
FIDO conceptually sets a scope for registered keys to the tuple of (Username, Authenticator, Relying Party). But what constitutes a Relying Party? It is quite common for a user to access the same set of services from a Relying Party, on the same device, in one or more web browsers as well as one or more dedicated apps. As the Relying Party may require the user to perform a costly ceremony in order to prove her identity and register a new FIDO key, it is undesirable that the user should have to repeat this ceremony multiple times on the same device, once for each browser or app.
2.2 Avoiding App-Phishing
FIDO provides for user-friendly verification ceremonies to allow access to registered keys, such as entering a simple PIN code and touching a device, or scanning a finger, without matter for security purposes if the user re-uses the same verification inputs across Relying Parties, and in the case of a biometric, she may have no choice.
Modern operating systems that use an "app store" distribution model often make a promise to the user that it is "safe to try" any app. They do this by providing strong isolation between applications, so that they may not read each other's data or mutually interfere, and by requiring explicit user permission to access shared system resources.
If a user were to download a maliciously constructed game that instructs her to activate her FIDO authenticator in order to "save your progress" but actually unlocks her banking credential and takes over her account, FIDO has failed, because the risk of phishing has only been moved from the password to an app download. FIDO must not violate a platform's promise that any app is "safe to try" by keeping good custody of the high-value shared state that a registered key represents.
2.3 Comparison to OAuth and OAuth2
The OAuth and OAuth2 of protocols were designed for a server-to-server security model with the assumption that each application instance can be issued, and keep, an "application secret". This approach is ill-suited to the "app store" security model. Although it is common for services to provision an OAuth-style application secret into their apps in an attempt to allow only authorized/official apps to connect, any such "secret" is in fact shared among everyone with access to the app store and can be trivially recovered through basic reverse engineering.
In contrast, FIDO's facet concept is designed for the "app store" model from the start. It relies on client-side platform isolation features to make sure that a key registered by a user with a member of a well-behaved "trusted club" stays within that trusted club, even if the user later installs a
malicious app, and does not require any secrets hard-coded into a shared package to do so. The user must, however, still make good decisions about which apps and browsers they are willing to perform a registration ceremony with. App store policing can assist here by removing applications which solicit users to register FIDO keys to for Relying Parties in order to make illegitimate or fraudulent use of them.
2.4 Non-Goals
The Application Facet concept does not attempt to strongly identify the calling application to a service across a network. Remote attestation of an application identity is an explicit non-goal.
If an unauthorized app can convince a user to provide all the information to it required to register a new FIDO key, the Relying Party cannot use FIDO protocols or the Facet concept to recognize as unauthorized, or deny such an application from performing FIDO operations, and an application that a user has chosen to trust in such a manner can also share access to a key outside of the mechanisms described in this document.
The facet mechanism provides a way for registered keys to maintain their proper scope when created and accessed from a Trusted Computing Base (TCB) that provides isolation of malicious apps. A user can also roam their credentials between multiple devices with user-friendly TCBs and credentials will retain their proper scope if this mechanism is correctly implemented by each. However, no guarantees can be made in environments where the TCB is user-hostile, such as a device with malicious code operating with "root" level permissions. On environments that do not provide application isolation but run all code with the privileges of the user, (e.g. traditional desktop operating systems) an intact TCB, including web browsers, may successfully enforce the proper scope of credentials for web origins only, but cannot meaningfully enforce application scoping.
3. The AppID and FacetID Assertions
When a user performs a Registration operation [JAFArchOverview] a new private key is created by their authenticator, and the public key is sent to the Relying Party. As part of this process, each key is associated with an AppID. The AppID is a URL carried as part of the protocol message sent by the server and indicates the target for this credential. By default, the audience of the credential is restricted to the Same Origin of the AppID. In some circumstances, a Relying Party may desire to apply a larger scope to a key. If the AppID URL has the https scheme, a FIDO client may be able to dereference and process it as a TrustedFacetList that designates a scope or audience restriction that includes multiple facets, such as other web origins within the same DNS zone of control of the AppID’s origin, or URLs indicating the identity of other types of trusted facets such as mobile apps.
NOTE
Users may also register multiple keys on a single authenticator for an AppID, such as for cases where they have multiple accounts. Such registrations may have a Relying Party assigned username or local nicknames associated to allow them to be distinguished by the user, or they may not (e.g. for 2nd factor use cases, the user account associated with a key may be communicated out-of-band to what is specified by FIDO protocols). All registrations that share an AppID, also share these same audience restrictions.
3.1 Processing Rules for AppID and FacetID Assertions
3.1.1 Determining the FacetID of a Calling Application
In the Web case, the FacetID must be the Web Origin [RFC6454] of the web page triggering the FIDO operation, written as a URI with an empty path. Default ports are omitted and any path component is ignored.
An example FacetID is shown below:
https://login.mycorp.com/
In the Android [ANDROID] case, the FacetID must be a URI derived from the Base64 encoded SHA-256 (or SHA-1) hash of the APK signing certificate [APK-Signing]:
```
android:apk-key-hash-sha256:<base64_encoded_sha256_hash_of_apk-signing-cert>
android:apk-key-hash-sha1:<base64_encoded_sha1_hash_of_apk-signing-cert>
```
The SHA-1 hash can be computed as follows:
**EXAMPLE 1: Computing an APK signing certificate SHA256 hash**
```
# Export the signing certificate in DER format, hash, base64 encode and trim '='
dirkeytool -exportcert \
-alias <alias-of-entry> \
-keystore <path-to-apk-signing-keystore> &>2 /dev/null | \
openssl sha256 -binary | \
openssl base64 | \
sed \'/=\'/g'
```
**EXAMPLE 2: Computing an APK signing certificate SHA1 hash**
```
# Export the signing certificate in DER format, hash, base64 encode and trim '='
dirkeytool -exportcert \
-alias <alias-of-entry> \
-keystore <path-to-apk-signing-keystore> &>2 /dev/null | \
openssl sha1 -binary | \
openssl base64 | \
sed \'/=\'/g'
```
The Base64 encoding is the the "Base 64 Encoding" from Section 4 in [RFC4648], with padding characters removed.
**NOTE**
If compatibility with older versions of FIDO Clients (i.e. the ones not yet supporting SHA-256 for FacetIDs) is required, both entries should be specified.
In the iOS [IOS] case, the FacetID must be the BundleID [BundleID] URI of the application:
```
https://example.com/bundle-id
```
3.1.2 Determining if a Caller's FacetID is Authorized for an AppID
1. If the AppID is not an HTTPS URL, and matches the FacetID of the caller, no additional processing is necessary and the operation may proceed.
2. If the AppID is null or empty, the client must set the AppID to be the FacetID of the caller, and the operation may proceed without additional processing.
3. If the caller's FacetID is an HTTPS Origin sharing the same host as the AppID, (e.g. if an application hosted at https://fido.example.com/myAppID and set an AppID of https://fido.example.com/myAppID), no additional processing is necessary and the operation may proceed. This algorithm may be continued asynchronously for purposes of caching the TrustedFacetList, if desired.
4. Begin to fetch the TrustedFacetList using the HTTP GET method. The location must be identified with an HTTPS URL.
5. The URL must be dereferenced with an anonymous fetch. That is, the HTTP GET must include no cookies, authentication, Origin or Referer headers, and present no TLS certificates or other forms of credentials.
6. The response must set a MIME Content-Type of "application/fido.trusted-apps+json".
7. The caching related HTTP header fields in the HTTP response (e.g. "Expires") should be respected when fetching a TrustedFacetList.
8. The server hosting the TrustedFacetList must respond uniformly to all clients. That is, it must not vary the contents of the response body based on any credential material, including ambient authority such as originating IP address, supplied with the request.
9. If the server returns an HTTP redirect (status code 3xx) the server must also send the HTTP header FIDO-AppID-Redirect-Authorized: true and the client must verify the presence of such a header before following the redirect. This protects against abuse of open redirectors within the target domain by unauthorized parties. If this check has passed, restart this algorithm from step 4.
10. A TrustedFacetList may contain an unlimited number of entries, but clients may truncate or decline to process large responses.
11. From among the objects in the trustedFacets array, select the one with the version matching that of the protocol message version. With "matching" we mean: the highest version that appears in the TrustedFacetList that is smaller or equal to the actual protocol version being used.
12. The scheme of URLs in ids must identify either an application identity (e.g. using the apk, ios, or similar scheme) or an https: Web Origin [RFC6454].
13. Entries in ids using the https:// scheme must contain only scheme, host and port components, with an optional trailing /. Any path, query string, username/password, or fragment information must be discarded.
14. All Web Origins listed must have host names under the scope of the same least-specific private label in the DNS, using the following algorithm:
1. Obtain the list of public DNS suffixes from https://publicsuffix.org/list/effective_tld_names.dat (the client may cache such data), or equivalent functionality as available on the platform.
2. Extract the host portion of the original AppID URL, before following any redirects.
3. The least-specific private label is the portion of the host portion of the AppID URL that matches a most-specific public suffix plus one additional label to the left (also known as 'effective top-level domain'+1 or eTLD+1).
4. For each Web Origin in the TrustedFacetList, the calculation of the least-specific private label in the DNS must be a case-insensitive match of that of the AppID URL itself. Entries that do not match must be discarded.
15. If the TrustedFacetList cannot be retrieved and successfully parsed according to these rules, the client must abort processing of the requested FIDO operation.
16. After processing the trustedFacets entry of the correct version and removing any invalid entries, if the caller's FacetID matches one listed in ids, the operation is allowed.
3.1.3 TrustedFacet List and Structure
The Trusted Facets JSON resource is a serialized trustedFacetList hosted at the AppID URL. It consists of a dictionary containing a single member, trustedFacets which is an array of TrustedFacets dictionaries.
```webidl
dictionary TrustedFacetList {
TrustedFacets[] trustedFacets;
}
```
3.1.3.1 Dictionary TrustedFacetList Members
trustedFacets of type array of TrustedFacets
An array of TrustedFacets.
```webidl
dictionary TrustedFacets {
Version version;
DOMString[] ids;
}
```
3.1.3.2 Dictionary TrustedFacets Members
version of type Version
The protocol version to which this set of trusted facets applies. See [UAFProtocol] for the definition of the version structure.
ids of type array of DOMString
An array of URLs identifying authorized facets for this AppID.
3.1.4 AppID Example 1
".com" is a public suffix. "https://www.example.com/appID" is provided as an AppID. The body of the resource at this location contains:
```json
{ "trustedFacets" : [{ "version": { "major": 1, "minor": 0 }, "ids": [
```
For this policy, "https://www.example.com" and "https://register.example.com" would have access to the keys registered for this AppID, and "https://user1.example.com" would not.
### 3.1.5 AppID Example 2
"hosting.example.com" is a public suffix, operated under "example.com" and used to provide hosted cloud services for many companies. "https://companyA.hosting.example.com/appID" is provided as an AppID. The body of the resource at this location contains:
```json
EXAMPLE 4
{
"trustedFacets": [
{
"version": { "major": 1, "minor": 0 },
"ids": ["https://register.example.com", // DISCARD, does not share "companyA.hosting.example.com" label
"https://fido.companyA.hosting.example.com", // VALID, shares "companyA.hosting.example.com" label
"https://xyz.companyA.hosting.example.com", // VALID, shares "companyA.hosting.example.com" label
"https://companyB.hosting.example.com" // DISCARD, "companyB.hosting.example.com" does not match
]
}
]
}
```
For this policy, "https://fido.companyA.hosting.example.com" would have access to the keys registered for this AppID, and "https://register.example.com" and "https://companyB.hosting.example.com" would not as a public-suffix exists between these DNS names and the AppID's.
### 3.1.6 Obtaining FacetID of Android Native App
This section is non-normative.
The following code demonstrates how a FIDO Client can obtain and construct the FacetID of a calling Android native application.
```java
EXAMPLE 5: AndroidFacetID SHA256
private String getFacetID(Context aContext, int callingUid) {
String packageNames[] = aContext.getPackageManager().getPackagesForUid(callingUid);
if (packageNames == null) {
return null;
}
try {
PackageInfo info = aContext.getPackageManager().getPackageInfo(packageNames[0], PackageManager.GET_SIGNATURES);
byte[] cert = info.signatures[0].toByteArray();
InputStream input = new ByteArrayInputStream(cert);
CertificateFactory cf = CertificateFactory.getInstance("X509");
X509Certificate c = (X509Certificate) cf.generateCertificate(input);
MessageDigest md = MessageDigest.getInstance("SHA256");
return "android:apk-key-hash-sha256:" +
Base64.encodeToString(md.digest(c.getEncoded()), Base64.DEFAULT | Base64.NO_WRAP | Base64.NO_PADDING);
} catch (PackageManager.NameNotFoundException e) {
e.printStackTrace();
} catch (CertificateException e) {
e.printStackTrace();
} catch (NoSuchAlgorithmException e) {
e.printStackTrace();
} catch (CertificateEncodingException e) {
e.printStackTrace();
}
return null;
}
```
```java
EXAMPLE 6: AndroidFacetID SHA1
private String getFacetID(Context aContext, int callingUid) {
String packageNames[] = aContext.getPackageManager().getPackagesForUid(callingUid);
if (packageNames == null) {
return null;
}
try {
PackageInfo info = aContext.getPackageManager().getPackageInfo(packageNames[0], PackageManager.GET_SIGNATURES);
byte[] cert = info.signatures[0].toByteArray();
InputStream input = new ByteArrayInputStream(cert);
CertificateFactory cf = CertificateFactory.getInstance("X509");
X509Certificate c = (X509Certificate) cf.generateCertificate(input);
MessageDigest md = MessageDigest.getInstance("SHA1");
return "android:apk-key-hash:" +
Base64.encodeToString(md.digest(c.getEncoded()), Base64.DEFAULT | Base64.NO_WRAP | Base64.NO_PADDING);
}
```
3.1.7 Additional Security Considerations
The UAF protocol supports passing FacetID to the FIDO Server and including the FacetID in the computation of the authentication response.
Trusting a web origin facet implicitly trusts all subdomains under the named entity because web user agents do not provide a security barrier between such origins. So, in AppID Example 1, although not explicitly listed, "https://foobar.register.example.com" would still have effective access to credentials registered for the AppID "https://www.example.com/appID" because it can effectively act as "https://register.example.com".
The component implementing the controls described here must reliably identify callers to securely enforce the mechanisms. Platform inter-process communication mechanisms which allow such identification should be used when available.
It is unlikely that the component implementing the controls described here can verify the integrity and intent of the entries on a TrustedFacetList. If a trusted facet can be compromised or enlisted as a confused deputy [[FIDOGlossary]] by a malicious party, it may be possible to trick a user into completing an authentication ceremony under the control of that malicious party.
3.1.7.1 Wildcards in TrustedFacet identifiers
This section is non-normative.
Wildcards are not supported in TrustedFacet identifiers. This follows the advice of RFC6125 [[RFC6125]], section 7.2.
FacetIDs are URLs that uniquely identify specific security principals that are trusted to interact with a given registered credential. Wildcards introduce undesirable ambiguity in the definition of the principal, as there is no consensus syntax for what wildcards mean, how they are expanded and where they can occur across different applications and protocols in common use. For schemes indicating application identities, it is not clear that wildcarding is appropriate in any fashion. For Web Origins, it broadly increases the scope of the credential to potentially include rogue or buggy hosts.
Taken together, these ambiguities might introduce exploitable differences in identity checking behavior among client implementations and would necessitate overly complex and inefficient identity checking algorithms.
A. References
A.1 Normative references
[FIDOGlossary]
[RFC2119]
[RFC4648]
[RFC6125]
[RFC6454]
[UAFProtocol]
A.2 Informative references
[ANDROID]
The Android™ Operating System. URL: http://developer.android.com/
[APK-Signing]
[BundleID]
[UAFArchOverview]
[IOS]
- iOS Dev Center. URL: https://developer.apple.com/devcenter/ios/index.action
|
{"Source-Url": "https://fidoalliance.org/specs/fido-v2.0-rd-20170927/fido-appid-and-facets-v2.0-rd-20170927.pdf", "len_cl100k_base": 5132, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 19434, "total-output-tokens": 6182, "length": "2e12", "weborganizer": {"__label__adult": 0.0003540515899658203, "__label__art_design": 0.00029277801513671875, "__label__crime_law": 0.0010175704956054688, "__label__education_jobs": 0.0002498626708984375, "__label__entertainment": 6.03795051574707e-05, "__label__fashion_beauty": 0.00014710426330566406, "__label__finance_business": 0.0004227161407470703, "__label__food_dining": 0.0002541542053222656, "__label__games": 0.0005164146423339844, "__label__hardware": 0.002025604248046875, "__label__health": 0.00032830238342285156, "__label__history": 0.00022172927856445312, "__label__home_hobbies": 7.68899917602539e-05, "__label__industrial": 0.0003917217254638672, "__label__literature": 0.00019884109497070312, "__label__politics": 0.00030612945556640625, "__label__religion": 0.00035691261291503906, "__label__science_tech": 0.0399169921875, "__label__social_life": 7.528066635131836e-05, "__label__software": 0.0256195068359375, "__label__software_dev": 0.92626953125, "__label__sports_fitness": 0.00023257732391357425, "__label__transportation": 0.0004200935363769531, "__label__travel": 0.000171661376953125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 24776, 0.03152]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 24776, 0.48556]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 24776, 0.85158]], "google_gemma-3-12b-it_contains_pii": [[0, 1025, false], [1025, 6767, null], [6767, 11965, null], [11965, 17005, null], [17005, 20601, null], [20601, 24776, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1025, true], [1025, 6767, null], [6767, 11965, null], [11965, 17005, null], [17005, 20601, null], [20601, 24776, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 24776, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 24776, null]], "pdf_page_numbers": [[0, 1025, 1], [1025, 6767, 2], [6767, 11965, 3], [11965, 17005, 4], [17005, 20601, 5], [20601, 24776, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 24776, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
39d2544e8d1832cf5bfe95be1a6fa4e7c07ad315
|
Comparison of Frequent Pattern Mining Algorithms for Data Streams: A Survey
Raghavendra Naik, Pramilarani K
Department of Computer Science & Engineering, New Horizon College of Engineering Bangalore, India
Abstract: Frequent Itemset Mining is the problems in most of the data mining applications. There are different frequent patterns mining algorithms available for parallel execution of items, such as FP-Growth, Apriori, RARM algorithms. However, as these parallel mining algorithms which have lack of features like automated parallelization, proper load balancing, and distribution of data on large clusters. We have done different analysis of frequently used algorithms for finding frequent patterns with the intention of finding how different algorithms can be implemented and used to get frequently used patterns in bigger transactional databases. This has been represented done a comparative survey on few of the following algorithms: Frequent Pattern Growth algorithm, Apriori algorithm, Rapid Association Rule Mining (RARM), Associated Sensor Pattern Mining of Data Stream (ASPMS) and ECLAT algorithm frequent pattern mining algorithms. This study finds each algorithm’s performance, advantages, and disadvantages for large scale of item sets in database systems.
Index Terms - Frequent Pattern Growth, Apriori, Rapid Association Rule Mining (RARM), ECLAT, Data Mining, Frequent Patterns, MapReduce.
I. INTRODUCTION
The main subject matter in data mining is mining the frequent patterns. There are a lot of researches have been made and lots of efficient algorithms have been designed to search frequent pattern in the large transactional database. Agrawal et al. (1993) for the first time proposed a concept of pattern mining in the form of market-based analysis for finding the relation between items that are fetched in a market. This concept used transactional databases and other data repositories in order to extract association’s casual structures, interesting correlations or frequent patterns among the set [1]. Frequent patterns are the items or itemsets which repeatedly occur in database transactions with a user-specified frequency. An itemset whose occurrence frequency is greater than the minimum threshold will be considered as the frequent pattern. For example in market based analysis if the minimum threshold is 30% and bread appears with eggs and milk more than three times or at least three times then it will be a frequent itemset [2].
In mining pattern stage different techniques are applied to find candidates for frequent patterns and then frequent patterns are generated. There are two main problems with frequent pattern mining techniques. The first problem is that the database is repeatedly scanned for each search, and the second is complex candidate datasets are generated for each scan and process to scan the amount is huge. These two are the main problems in frequent pattern mining. Studies demonstrate that a lot of efforts have been performed for devising best techniques and worth mentioning approaches are Apriori, RARM, ECLAT, FP Growth, and ASPMS algorithms [3].
II. PROBLEM STUDY
Various studies have been acknowledged on Frequent Pattern itemsets in the field of Data mining as it has a broad range of applications in sequential pattern searching, correlations, association rules in mining, frequent pattern based graph constraints, and various data mining tasks. It is most crucial to find an efficient mining algorithm for frequent itemsets and numerous result patterns. As the frequency of the items are higher and amount of data generated, it results in generating a huge number of result sets are generated. For this reason pruning methodologies are used to eliminate unwanted patterns and efficiency and speed are increased in mining process. As a result of this, the main aim for us to optimize the process of data mining for frequent patterns and also yields in scalable, efficient and can find the important patterns which is suitable in different areas and methods.
III. RELATED WORK
Frequent pattern mining techniques have become an obvious need in many real world applications e.g. in market basket analysis, advertisement, medical field, monitoring of patients routines etc [9]. To make a comparison among these algorithms, we use the same transactional database for all algorithms, this transactional database is based on data of a smart home where sensors are installed on daily usage objects and patients while performing their daily tasks, touch these objects and these sensor items are maintained in database. Studies of Frequent pattern mining is acknowledged in the data mining field because of its applicability in mining sequential patterns, structural patterns, mining association rules, constraint based frequent patterns mining, correlations and many other data mining tasks [10]. Efficient algorithms for mining frequent itemsets are crucial for mining association rules as well as for some other information mining assignments [13]. The Problem of mining frequent itemset ascended first as sub-problem of mining association rules [5].
A database consists of transactions and a transaction is denoted by T. Let there is an itemset I= {I1, I2,…In} consist of n items. A Transaction T contains a subset of items from itemset I [4]. Association rule is in the form of inference stating such that if x then y(x→y) where x and y both are subset of items in the itemset I. As transactional database is large and we are interested in those items that are used frequently, there is an important parameter "support" that helps in identifying those items that are of interest. Support for an association rule (x→y) is defined as no of transactions or percentage which contains xUy over total number of transactions in database. Minimum lower bound of support for association rule is set by user and this support value is set as a minimum threshold and itemset whose number of transactions is above than defined threshold is considered as frequent itemset. As this threshold is a large value, more valuable knowledge is obtained and if this threshold is a minimum value, a large number of Itemsets are generated. Therefore irrelevant information should be pruned, that is the main goal of frequent pattern
mining. In order to analyze different frequent pattern mining algorithms in coming paragraphs comparative analysis of these algorithms have discussed with the purpose to investigate their strengths and weaknesses in order to utilize their effectiveness in respective field.
Frequent item set mining used in wide range of application areas such as decision support, web usage mining, bioinformatics, etc. There are varieties of algorithms proposed by different researchers for Frequent Itemset Mining. Each of it has its own advantages and disadvantages. Following is the review of some of the research papers from various conferences and publications.
The proposed research work [1] presents the problem related with extraction of frequent items from huge datasets. It gives the rules that have the minimum confidence and least transactional support. The proposed algorithm calculates the itemsets for the very first pass and automatically has to adjust among the number of passes that are for itemsets and data. The calculation here their use is to shortening technique to avoid certain itemsets. So, it gives proper related itemsets/datasets from considerably greater size databases. Advantage of this algorithm is, buffer management techniques is the items which could not in the memory in current pass will be shifted to next pass, also it improves the execution due to enhances parallelization technique, but it has inability of automatic parallelization. Parallel Frequent Pattern Growth algorithm using balanced partitioning (BPFP) [8] is another great system proposed for pattern mining. It works in two stages, in the first stage of BPFP, load is computed based on the conditional pattern. In the other stages, the load is divided into many groups. For this reason, MapReduce operation is used on frequent itemset mining algorithm.
IV. ASSOCIATION RULE MINING ALGORITHM
4.1 Problem Statement of ARM
Association Rule mining is one of the important data mining algorithm used in many applications.
4.1.1 Generation of Frequent Itemsets
Frequent itemsets from different data sources will be collected into a global database. As there are a lot of data are combined and moved the frequent data to global database will leads to increases in, the number of messages that need to be processed to find frequent n itemset. The major drawback with the frequentset mining methods is the explosion of the numerous results and therefore it is difficult to find the most suitable frequent itemsets. Therefore the concepts of finding frequent itemsets from a database are the major challenges.
4.1.2 Mining associations Rules
From the formal statement of association rule mining, rule can be defined as: Let- I = {i1, i2, ..., in} be a set of n number of binary attributes called items. Let a set of transactions D can be defined as D = {t1, t2, ....m} which are called the database. For each transaction in D has given a unique transaction ID and contains a subset of the items in I. A association rule is defined as an implication of the form X→Y where X, Y ⊆ I and X ∩ Y = ∅. The sets of items (for short itemsets) X and Y are set of items in I and consequent of the rule another side respectively. The association rule would be defined like associating one item likely by the characteristic of another or nearest item. The problem is to generate all association rules that have support and confidence greater than the user-specified minimum support and minimum confidence. In the first pass, the support of each individual item is counted, and the large ones are determined. In each subsequent pass, the large itemsets determined in the previous pass is used to generate new itemsets called candidate itemsets. The support of each candidate itemset is counted, and the large ones are determined. This process continues until no new large itemsets are found.
In short we can say that ARM is a two steps process - (i) Generation of frequent itemsets whose support is greater than or equal to the minimum support threshold set by user from database D and (ii) Strong association rules that have confidence greater than or equal to the minimum confidence threshold set by user are generated from these frequent itemsets [2], [3].
4.2 Parallel Apriori Algorithm
Apriori is the basic and most popular algorithm proposed by R. Agrawal and R. Srikant [3] which generates Candidate datasets and key to frequent itemsets. Candidate datasets are itemsets having all the frequent itemsets. The algorithm is based on the properties of Apriori which denotes that all non-empty subsets of a frequent itemset should be frequent [2]. The core step of the algorithm is generation of candidate k-itemsets Ck from frequent (k-1)-itemsets Lj-1 and it consists of join and prune actions. In join step, the conditional join of Lj-1. Lj-1 is assigned to a candidate dataset Cj and the size of Cj is reduced by pruning some steps using Apriori property [2]. The disadvantages of serial algorithms become high due to continuous and repeatedly scanning of database and large memory consumption and cost of I/O operations will be high. To improve the performance of Apriori algorithm, there are many parallel and sequential algorithms have been introduced. Few among the popular sequential approaches are Partitioning, Transaction reduction, Hash-based technique, Dynamic itemset counting (DIC) and Sampling [15], [16], [17], [18], [19]. According to R. Agrawal and J. Shafer's [7] proposal, there are three parallel version of Apriori algorithm, Data Distribution (DD), Candidate Distribution, and Count Distribution (CD). Count Distribution and Data Distribution algorithms are placed under task parallelism and data parallelism where as the Candidate Distribution algorithm is the combination of and task and data parallelism [11].
V. APACHE HADOOP MAPREDUCE FRAMEWORK
The large-scale distributed batch processing infrastructure for parallel processing of big data on a huge cluster of valuable computers is called as Hadoop. [13]. Hadoop is an open source project of Apache [13]. This has implemented Google's File system [14] as Hadoop Distributed File System (HDFS) and Google's MapReduce [17] as Hadoop MapReduce programming.
5.1 Hadoop Distributed File System
A distributed file system that holds a large quantity of data in terabytes or petabytes scale, also provides fast and scalable access to such huge data, this is called as Hadoop Distributed File System (HDFS) [13]. This can store the files in a replicated manner across different machines. This also helps in providing fault tolerance and high availability during the execution of parallel applications [13].
HDFS file system is in a block-structured manner, this breaks a single file into fixed size blocks (default block size is 64MB) to store across several machines. Hadoop makes use of 2 types of machine working in a master-worker fashion. Those are, (i)NameNode as master machine (ii) number of DataNodes as worker machines. The work of NameNode is to assign block ids to the blocks of a file and stores metadata (file name, permission, replica, location of each block) of the file system in its main memory. This helps in providing quick access to the information stored. DataNodes are used to store and retrieve the replicated blocks of multiple files, these are individual machines in a cluster [13].
5.2 Hadoop MapReduce
In a large number of machines, a program that can be used for parallel processing of large volumes of data by breaking the work into independent tasks is called as MapReduce. The list processing languages e.g. LISP are the inspiration for MapReduce. MapReduce makes use of two list processing phrases: map and reduce. Based on the processing phrases, MapReduce program consists of two functions called as Mapper and Reducer. These runs on all machines in a cluster of Hadoop. The input and output of Mapper/Reducer functions must be in form of (key, value) pairs [13].
The input (k1, v1) for the function mapper is taken from HDFS and which helps in producing a list of intermediate (k2, v2) pairs. To reduce the communication cost of relocating intermediate outputs of mappers to reducers are applied from optional Combiner function. Mappers' output pairs are locally organized and gathered on same key and which feeds to the combiner to make local amount. The combiner's intermediate output pairs are hobbled and swapped between the machines to group all the pairs with the same key to a single reducer. This would be the single communication step that takes place and which is handled by the Hadoop MapReduce platform. There is no other communication that will be taking place between the mappers and the reducers. The new pairs (k3, v3) are produced when the reducer takes (k2, list (v2)) values as input and make sum of the values in list (v2), [13], [15].
All the parallelization, intermachine communication and fault tolerance are handled by run-time system. Hence the MapReduce is recognized as a simplified programming model. [17].
VI. APRIORI ALGORITHM ON HADOOP MAPREDUCE
There are 2 main tasks to implement an algorithm on MapReduce framework. Firstly we need to design two independent map and reduce functions for the algorithm and we would need to convert the datasets in the form of (key, value) pairs. All the mapper and reducer on different machines are executed in parallel fashion in the MapReduce programming but the final result will be obtained after the completion of reducer. If the algorithm is recursive, then we need to execute multiple segment of map-reduce to get the final result [20].
6.1 Traditional Apriori to MapReduce Based Apriori
Apriori algorithm is an iterative process. There are 2 main components of Apriori algorithm, candidate itemsets generation and frequent itemsets generation. In each scan of database, mapper creates local candidates and the reducer calculates the local count, results in frequent itemsets. On Hadoop, the count distribution parallel version of Apriori is best suited, whereas to instrument data distribution algorithm we need to control the distribution of data and this is automatically controlled by Hadoop. [6].
The first step of the algorithm where it needs to generate frequent 1-itemsets L1. Transactional database is broken into blocks using the HDFS and distribute it to all mappers running on the machines. Each transaction is transformed to (key, value) pairs, where key is the TID and value is the list of items i.e. transaction. Mapper does read one transaction at a time and output (key’, value’) pairs, where key’ stands for each item in transaction and value’ represents 1. The combiner job is to combine the pairs with the same key’ and makes the local totality of the values for each key’. The combiner's output pairs are shuffled and swapped to create a list of values associated with same key, as (key’, list (value’)) pairs. Reducers will take these pairs and calculates the values of respective keys. Reducers output (key’, value’’) pairs where key’ is item and value’’ is the support count >= minimum support, of that item [17], [14], [13]. Final frequent 1-itemsets L1 is obtained by merging the output of all reducers.
6.2 Analysis of Various Proposed Implementations of Apriori on MapReduce
In MapReduce framework, the various implementation of Apriori have been proposed since the beginning of MapReduce, introduced by Google. These algorithms are classified into two categories: 1-phase of map-reduce and k-phase of MapReduce [19]. Also, few algorithms will make use of all the three functions mapper, reducer and combiner while some used only mapper and reducer function.
6.3 Using Functionality of Combiner inside Mapper
In traditional algorithms mapper outputs a (itemset, 1) pair each time of execution, if the itemset is found in the transaction that is assigned to that mapper. F. Kovacs and J. Illes [13] proposed a different way to achieve this. In his algorithm, mapper finally outputs only (itemset, itemset.counter) pairs for one time for each itemset where the itemset.counter is local support of itemset to count. Inside mapper, the itemset.counter is incremented every time the execution loop, if it finds itemset in transaction assigned to mapper. In this way, mapper produces output (itemset, local support) and this is passed to the reducer as (itemset, list (local support)).
<table>
<thead>
<tr>
<th>Sl. No.</th>
<th>Techniques</th>
<th>Abstract</th>
<th>Pros</th>
<th>Cons</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>Apriori</td>
<td>Apriori property for Pruning and Breadth first search approach. Database is scanned for each time when a candidate itemset is generated. Execution time is considerable as time consumed in scanning the database for each candidate itemset generation. Data Format: <strong>Horizontal Storage Structure: Array.</strong></td>
<td>It uses large itemset property. It is easy to implement.</td>
<td>Many Candidate Itemsets. Too many passes over database and requires large memory space.</td>
</tr>
<tr>
<td>2</td>
<td>RARM</td>
<td>Depth first search on SOTrieIT to generate 1-Itemset & 2-Itemset. The database is scanned</td>
<td>No candidate itemsets are generated. Speeds up the</td>
<td>Difficult in interactive system mining. It is difficult to</td>
</tr>
</tbody>
</table>
### Comparative Analysis with other Pattern Algorithms
<table>
<thead>
<tr>
<th>Sl. No.</th>
<th>Techniques</th>
<th>Abstract</th>
<th>Pros</th>
<th>Cons</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td></td>
<td>only few times to construct a SOTrieIT Tree structure. Takes less execution time as compared to Apriori Algorithm and FP Growth algorithm. Data Format: <em>Horizontal</em>. Storage Structure: <em>Tree</em>.</td>
<td>process for generating candidate 1-itemset & 2-itemset.</td>
<td>use in incremental Mining.</td>
</tr>
<tr>
<td></td>
<td>ECLAT</td>
<td>Intersection of transaction ids to generate candidate itemset and it is a Depth first Search approach. Database is scanned few times (Best case=2). Execution time is less compared to Apriori algorithm Data Format: <em>Vertical</em>. Storage Structure: <em>Array</em></td>
<td>This does not need to scan the whole database each time as a candidate Itemset is generated as supports the count information will be obtained from previous Itemset.</td>
<td>It requires the virtual memory to perform the transformation.</td>
</tr>
<tr>
<td></td>
<td>FP-Growth</td>
<td>Divide and conquer method. Database is scanned only two times. Takes less time as compared to Apriori algorithm. Data Format: <em>Horizontal</em>. Storage Structure: <em>Tree (FP Tree)</em>.</td>
<td>Database is scanned only two times. No candidate generation.</td>
<td>FP-Tree is expensive to build and Consumes more memory.</td>
</tr>
<tr>
<td></td>
<td>ASPMS</td>
<td>This is a BSM (Branch Sort Method) using merge sort. Database is scanned only One time and takes less execution time as compared to FP growth algorithm. Data Format: <em>Horizontal</em>. Storage Structure: <em>Tree(ASP Tree)</em>.</td>
<td>Highly suitable for interactive mining and it requires less memory because of its Compression feature in ASPs tree.</td>
<td></td>
</tr>
</tbody>
</table>
### VII. ADVANTAGES AND LIMITATIONS OF MAPREDUCE
MapReduce operation is a flexible, efficient, and simple model that is used for large scale of data for computing different problems in datasets. As other techniques, MapReduce also has advantages and limitations and also its purely depends on, for solving which type of problems using it.
#### 7.1 Advantages
Some of the major advantages of MapReduce are as follows and are not only considered for data mining problems or pattern matching problems but are general for most of the type of data processing problems.
##### 7.1.1 Automatic Parallelization, Fault Tolerance, Data Distribution, and Workload Balance
The MapReduce runtime system makes the execution of mapper and reducer function on a number of machines parallelly. It splits the datasets into a specific number of fixed sized units and replicates with some factor of replication to provide the highest availability and no loss of the data. It shares the tasks between the busy nodes or from the slower nodes to the idle nodes, so that workload balancing can be achieved and throughput also can be increased. MapReduce method provides higher fault-tolerating capacity by re-executing a damaged task without re-executing the other tasks in the dataset. It re-assigns the tasks from unsuccessful nodes to active or idle nodes [15]. The developer or the programmer can give more concentration into algorithm without worrying about such operations.
##### 7.1.2 Reduced Network Bandwidth Consumption
Hadoop replicates datasets across multiple nodes, allowing data to be read from local disks and write single-copy staged data to local disks to save network bandwidth [17].
##### 7.1.3 Combining Computing Power and Distributed Storage
Hadoop provides a combined platform for the distributed storage system and high computing power.
##### 7.1.4 Extremely Scalable
MapReduce enables applications to run parallelly on a large Hadoop cluster having thousand different nodes and process a large scale of data say in terms of petabytes [12].
7.2 Limitations
Even though MapReduce has many advantages, but it also has some limitations which cannot be eliminated. We listed some of the major limitations which are also specific to some other algorithms like Apriori.
7.2.1 It is working on (key, value) Pairs
MapReduce model only works on data structures of the type (key, value) pairs. All input datasets must be converted into such type of structure.
7.2.2 Blocking Operation
The result of a map-reduce phase cannot be said complete without completion of reducer operation. In n-phase of map-reduce operation, transition to the next phase from the previous phase cannot be possible until all reducers operations have finished. Therefore, it cannot work on pipeline parallelism operation [15].
7.2.3 Implicit Data Distribution
The data distribution version of Apriori cannot be implemented on Hadoop because the distribution of data is automatically controlled by Hadoop [20]. As the count distribution version of Apriori does not exchange data and only exchange the count between nodes, therefore only this is suitable for Hadoop [21]
VIII. CONCLUSION
There are different frequent itemsets extraction algorithms available each with their own advantages and limitations. Basically, Apriori and FP Growth are the algorithms used for mining patterns most of the time. But each has their own drawbacks. In Apriori algorithm, database needed to be scanned frequently and this results in generating many candidate keys which intern increases the I/O and synchronization problems in datasets. These problems are prevalent over by FP Growth algorithm, but this algorithm again has the drawback of constriction of trees in in-memory.
The drawbacks of both Apriori and FP-Growth are taken under control by the approach of FIUT algorithm. FIUT scans the database only two times and reduces the search space. In FIUT, we do not need to traverse the entire tree to check leaves to find frequent items or patterns. Hadoop MapReduce paradigm system uses the hash-based algorithm by introducing some enhancement in Apriori algorithm. It trims out the itemsets and datasets by pruning rare itemsets. So the efficiency can be achieved. The applications of frequent itemset mining can be range from sentiment analysis, web mining, medical data extraction, knowledge-driven business decisions, to find accident patterns, web link analysis, market basket analysis etc.
Compared to other algorithms and methodologies for mining frequent itemsets in a large scale of data, Hadoop MapReduce paradigm has more advantages and most suitable for mining along with new approaches like FIUT algorithm.
IX. REFERENCES
[5] Jiawei Han • Hong Cheng • Dong Xin • Xifeng Yan, "Frequent pattern mining: current status and future Directions," Data Mining Knowl Discov, vol. 15, no. 1, p. 32, 2007.
[7] "Data Mining Algorithms In R/Frequent Pattern Mining/The FP-Growth Algorithm" Wikibooks, open books for an open world.
|
{"Source-Url": "https://ijcrt.org/papers/IJCRT1872098.pdf", "len_cl100k_base": 5569, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 18755, "total-output-tokens": 6899, "length": "2e12", "weborganizer": {"__label__adult": 0.00033783912658691406, "__label__art_design": 0.0003161430358886719, "__label__crime_law": 0.0006394386291503906, "__label__education_jobs": 0.001155853271484375, "__label__entertainment": 0.0001017451286315918, "__label__fashion_beauty": 0.00018990039825439453, "__label__finance_business": 0.0004982948303222656, "__label__food_dining": 0.0004014968872070313, "__label__games": 0.0009202957153320312, "__label__hardware": 0.0016527175903320312, "__label__health": 0.001003265380859375, "__label__history": 0.0003230571746826172, "__label__home_hobbies": 0.00016617774963378906, "__label__industrial": 0.0006756782531738281, "__label__literature": 0.0003409385681152344, "__label__politics": 0.00030350685119628906, "__label__religion": 0.0005173683166503906, "__label__science_tech": 0.260986328125, "__label__social_life": 0.00017881393432617188, "__label__software": 0.040740966796875, "__label__software_dev": 0.6875, "__label__sports_fitness": 0.0003180503845214844, "__label__transportation": 0.0004167556762695313, "__label__travel": 0.00020515918731689453}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 29677, 0.02702]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 29677, 0.64508]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 29677, 0.90399]], "google_gemma-3-12b-it_contains_pii": [[0, 6285, false], [6285, 13652, null], [13652, 19480, null], [19480, 23193, null], [23193, 28716, null], [28716, 29677, null]], "google_gemma-3-12b-it_is_public_document": [[0, 6285, true], [6285, 13652, null], [13652, 19480, null], [19480, 23193, null], [23193, 28716, null], [28716, 29677, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 29677, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 29677, null]], "pdf_page_numbers": [[0, 6285, 1], [6285, 13652, 2], [13652, 19480, 3], [19480, 23193, 4], [23193, 28716, 5], [28716, 29677, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 29677, 0.09901]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
ca2ad39c658548a16f65aca907c6816fb52b6e39
|
When it comes to providing reliable, flexible, and efficient object persistence for software systems, today's designers and architects are faced with many choices. From the technological perspective, the choice is usually between pure Object-Oriented, Object-Relational hybrids, pure Relational, and custom solutions based on open or proprietary file formats (for example, XML and OLE structured storage). From the vendor aspect Oracle, IBM, Microsoft, POET, and others offer similar but often-incompatible solutions.
This article is about only one of those choices, that is the layering of an object-oriented class model on top of a purely relational database. This is not to imply this is the only, best, or simplest solution, but pragmatically it is one of the most common, and one that has the potential for the most misuse.
We will begin with a quick tour of the two design domains we are trying to bridge: firstly the object-oriented class model as represented in the UML, and secondly the relational database model.
For each domain we look only at the main features that will affect our task. We will then look at the techniques and issues involved in mapping from the class model to the database model, including object persistence, object behavior, relationships between objects, and object identity. We will conclude with a review of the UML Data Profile (as proposed by Rational Software). Some familiarity with object-oriented design, UML, and relational database modeling is assumed.
The Class Model in the UML is the main artifact produced to represent the logical structure of a software system. It captures both the data requirements and the behavior of objects within the model domain. The techniques for discovering and elaborating that model are outside the scope of this article, so we will assume the existence of a well designed class model that requires mapping onto a relational database.
**The Class Model**
The class is the basic logical entity in the UML. It defines both the data and the behavior of a structural unit. A class is a template or model from which instances or objects are created at runtime. When we develop a logical model such as a structural hierarchy in UML we explicitly deal with classes. When we work with dynamic diagrams, such as sequence diagrams and collaborations, we work with objects or instances of classes and their inter-actions at run-time. The principal of data hiding or encapsulation is based on localization of effect. A class has internal data elements that it is responsible for. Access to these data elements should be through the class' exposed behavior or interface. Adherence to this principal results in more maintainable code.
Behavior
Behavior is captured in the class model using the operations that are defined for the class. Operations may be externally visible (public), visible to children (protected), or hidden (private). By combining hidden data with a publicly accessible interface and hidden or protected data manipulation, a class designer can create highly maintainable structural units that support rather than hinder change.
Relationships and Identity
Association is a relationship between two classes indicating that at least one side of the relationship knows about and somehow uses or manipulates the other side. This relationship may by functional (do something for me) or structural (be something for me). For this article it is the structural relationship that is most interesting: for example an Address class may be associated with a Person class. The mapping of this relationship into the relational data space requires some care.
Aggregation is a form of association that implies the collection of one class of objects within another. Composition is a stronger form of aggregation that implies one object is actually composed of others. Like the association relationship, this implies a complex class attribute that requires careful consideration in the process of mapping to the relational domain. While a class represents the template or model from which many object instances may be created, an object at run-time requires some means of identifying itself such that associated objects may act upon the correct object instance. In a programming language like C++, object pointers may be passed around and held to allow objects access to a unique object instance. Often though, an object will be destroyed and require that it be re-created as it was during its last active instance. These objects require a storage mechanism to save their internal state and associations into and to retrieve that state as required.
Inheritance provides the class model with a means of factoring out common behavior into generalized classes that then act as the ancestors of many variations on a common theme. Inheritance is a means of managing both re-use and complexity. As we will see, the relational model
has no direct counterpart of inheritance, which creates a dilemma for the data modeler mapping an object model onto a relational framework. Navigation from one object at run time to another is based on absolute references. One object has some form of link (a pointer or unique object ID) with which to locate or re-create the required object.
The Relational Model
The relational data model has been around for many years and has a proven track record of providing performance and flexibility. It is essentially set-based and has as its fundamental unit the 'table', which is composed of a set of one or more 'columns', each of which contains a data element.
Tables and Columns
A relational table is a collection of one or more columns, each of which has a unique name within the table construct. Each column is defined to be of a certain basic data type, such as a number, text, or binary data. A table definition is a template from which table rows are created, each row being an instance of a possible table instance. The relational model only offers a public data access model. All data is equally exposed and open to any process to update, query, or manipulate it. Information hiding is unknown.
Behavior
The behavior associated with a table is usually based on the business or logical rules applied to that entity. Constraints may be applied to columns in the form of uniqueness requirements, relational integrity constraints to other tables/rows, allowable values, and data types.
Triggers provide some additional behavior that can be associated with an entity. Typically this is used to enforce data integrity before or after updates, inserts, and deletes. Database stored procedures provide a means of extending database functionality through proprietary language extensions used to construct functional units (scripts). These functional procedures do not map directly to entities, nor have a logical relationship to them. Navigation through relational data sets is based on row traversal and table joins. SQL is the primary language used to select rows and locate instances from a table set.
**Relationships and Identity**
The primary key of a table provides the unique identifying value for a particular row. There are two kinds of primary keys that we are interested in: firstly the meaningful key, made up of data columns which have a meaning within the business domain, and second the abstract unique identifier, such as a counter value, which have no business meaning but uniquely identify a row. We will discuss this and the implications of meaningful keys later. A table may contain columns that map to the primary key of another table. This relationship between tables defines a foreign key and implies a structural relationship or association between the two tables.
**Summary**
From the above overview we can see that the object model is based on discrete entities having both state (attributes/data) and behavior, with access to the encapsulated data generally through the class public interface only. The relational model exposes all data equally, with limited support for associating behavior with data elements through triggers, indexes, and constraints. You navigate to distinct information in the object model by moving from object to object using unique object identifiers and established object relationships (similar to a network data model). In the relational model you find rows by joining and filtering result sets using SQL using generalized search criteria. Identity in the object model is either a run-time reference or persistent unique ID (termed an OID). In the relational world, primary keys define the uniqueness of a data set in the overall data space.
In the object model we have a rich set of relationships: inheritance, aggregation, association, composition, dependency, and others. In the relational model we can really only specify a relationship using foreign keys. Having looked at the two domains of interest and compared some of the important features of each, we will digress briefly to look at the notation proposed to represent relational data models in the UML.
The UML Data Model Profile
The Data Model Profile is a UML extension that supports the modeling of relational databases in UML. It includes custom extensions for such things as tables, data base schema, table keys, triggers, and constraints. While this is not a ratified extension, it still illustrates one possible technique for modeling a relational database in the UML.
**Tables and Columns**
A table in the UML Data Profile is a class with the «Table» stereotype, displayed as above with a table icon in the top right corner. Database columns are modeled as attributes of the «Table» class.
For example, the figure above shows some attributes associated with the Customer table. In the example, an object id has been defined as the primary key, as well as two other columns, Name and Address. Note that the example above defines the column type in terms of the native DBMS data types.
Behavior
So far we have only defined the logical (static) structure of the table; in addition we should describe the behavior associated with columns, including indexes, keys, triggers, procedures, and so on. Behavior is represented as stereotyped operations.
The figure below shows our table above with a primary key constraint and index, both defined as stereotyped operations:
Note that the PK flag on the column 'OID' defines the logical primary key, while the stereotyped operation "«PK» idx_customer00" defines the constraints and behavior associated with the primary key implementation (that is, the behavior of the primary key).
Adding to our example, we may now define additional behavior such as triggers, constraints and stored procedures as in the example below:
The example illustrates the following possible behavior:
1. A primary key constraint (PK)
2. A Foreign key constraint (FK)
3. An index constraint (Index)
4. A trigger (Trigger)
5. A uniqueness constraint (Unique)
6. A stored procedure (Proc) not formally part of the data profile, but an example of a possible modeling technique
7. Validity check (Check).
Using the notation provided above, it is possible to model complex data structures and behavior at the DBMS level. In addition to this, the UML provides the notation to express relationships between logical entities.
**Relationships**
The UML data modeling profile defines a relationship as a dependency of any kind between two tables. It is represented as a stereotyped association and includes a set of primary and foreign keys. The data profile goes on to require that a relationship always involves a parent and child, the parent defining a primary key and the child implementing a foreign key based on all or part of the parent primary key. The relationship is termed 'identifying' if the child foreign key includes all the elements of the parent primary key and 'non-identifying' if only some elements of the primary key are included. The relationship may include cardinality constraints and be modeled with the relevant PK-FK pair named as association roles. The illustration below illustrates this kind of relationship modeling using UML.
[Diagram of a relationship between child and parent tables]
An identifying relationship between child and parent, with role names based on primary to foreign key relationship.
The Physical Model
UML also provides some mechanisms for representing the overall physical structure of the database, its contents, and deployed location. To represent a physical database in UML, use a stereotyped
A component represents a discrete and deployable entity within the model. In the physical model, a component may be mapped on to a physical piece of hardware (a ‘node’ in UML). To represent schema within the database, use the «schema» stereotype on a package. A table may be placed in a «schema» to establish its scope and location within a database.
Mapping from the Class Model to the Relational Model
Having described the two domains of interest and the notation to be used, we can now turn our attention as to how to map or translate from one domain to the other. The strategy and sequence presented below is meant to be suggestive rather than proscriptive—adapt the steps and procedures to your personal requirements and environment.
1. **Model Classes**
Firstly we will assume we are engineering a new relational database schema from a class model we have created. This is obviously the easiest direction as the models remain under our control and we can optimize the relational data model to the class model. In the real world it may be that you need to layer a class model on top of a legacy data model—a more difficult situation and one that presents its own challenges. For the current discussion will focus on the first situation. At a minimum, your class model should capture associations, inheritance, and aggregation between elements.
2. **Identify persistent objects**
Having built our class model we need to separate it into those elements that require persistence and those that do not. For example, if we have designed our application using the Model-View-Controller design pattern, then only classes in the model section would require persistent state.
3. **Assume each persistent class maps to one relational table**
A fairly big assumption, but one that works in most cases (leaving the inheritance issue aside for the moment). In the simplest model a class from the logical model maps to a relational table, either in whole or in part. The logical extension of this is that a single object (or instance of a
4. **Select an inheritance strategy**
Inheritance is perhaps the most problematic relationship and logical construct from the object-oriented model that requires translating into the relational model. The relational space is essentially flat, every entity being complete in itself, while the object model is often quite deep with a well-developed class hierarchy. The deep class model may have many layers of inherited attributes and behavior, resulting in a final, fully featured object at run-time. There are three basic ways to handle the translation of inheritance to a relational model:
- Each class hierarchy has a single corresponding table that contains all the inherited attributes for all elements—this table is therefore the union of every class in the hierarchy. For example, Person, Parent, Child, and Grandchild may all form a single class hierarchy, and elements from each will appear in the same relational table;
- Each class in the hierarchy has a corresponding table of only the attributes accessible by that class (including inherited attributes). For example, if Child is inherited from Person only, then the table will contain elements of Person and Child only;
- Each generation in the class hierarchy has a table containing only that generation's actual attributes. For example, Child will map to a single table with Child attributes only.
5. There are cases to be made for each approach, but I would suggest the simplest, easiest to maintain and less error prone is the third option. The first option provides the best performance at run-time and the second is a compromise between the first and last. The first option flattens the hierarchy and locates all attributes in one table—convenient for updates and retrievals of any class in the hierarchy, but difficult to authenticate and maintain. Business rules associated with a row are hard to implement, as each row may be instantiated as any object in the hierarchy. The dependencies between columns can become quite complicated. In addition, an update to any class in the hierarchy will potentially impact every other class in the hierarchy, as columns are added, deleted or modified from the table.
The second option is a compromise that provides better encapsulation and eliminates empty columns. However, a change to a parent class may need to be replicated in many child tables. Even worse, the parental data in two or more child classes may be redundantly stored in many tables; if a parent's attributes are modified, there is considerable effort in locating dependent children and updating the affected rows.
The third option more accurately reflects the object model, with each class in the hierarchy mapped to its own independent table. Updates to parents or children are localized in the correct space. Maintenance is also relatively easier, as any modification of an entity is restricted to a single relational table also. The down side is the need to re-construct the hierarchy at run-time to accurately re-create a child class's state. A Child object may require a Person member variable to represent their model parentage. As both require loading, two database calls are required to initialize one object. As the hierarchy deepens, with more generations, the number of database calls required to initialize or update a single object increases.
It is important to understand the issues that arise when you map inheritance onto a relational model, so you can decide which solution is right for you.
6. **For each class add a unique object identifier**
In both the relational and the object world, there is the need to uniquely identify an object or entity. In the object model, non-persistent objects at run-time are typically identified by direct reference or by a pointer to the object. Once an object is created, we can refer to it by its run-time identity. However, if we write out an object to storage, the problem is how to retrieve the exact same instance on demand. The most convenient method is to define an OID (object identifier) that is guaranteed to be unique in the namespace of interest. This may be at the class, package or system level, depending on actual requirements.
An example of a system level OID might be a GUID (globally unique identifier) created with Microsoft's 'guidgen' tool; for example, {A1A68E8E-CD92-420b-BDA7-118F847B71EB}. A class level OID might be implemented using a simple numeric (for example, 32-bit counter). If an object holds references to other objects, it may do so using their OID. A complete run-time scenario can then be loaded from storage reasonably efficiently. An important point about the OID values above is that they have no inherent meaning beyond simple identity. They are only logical pointers and nothing more. In the relational model, the situation is often quite different.
Identity in the relational model is normally implemented with a primary key. A primary key is a set of columns in a table that together uniquely identify a row. For example, name and address may uniquely identify a 'Customer'. Where other entities, such as a 'Salesperson', reference the 'Customer', they implement a foreign key based on the 'Customer' primary key. The problem with this approach for our purposes is the impact of having business information (such as customer name and address) embedded in the identifier. Imagine three or four tables all have foreign keys based on the customer primary key, and a system change requires the customer primary key to change (for example to include 'customer type'). The work required to modify both the 'customer' table and the entities related by foreign key is quite large.
On the other hand, if an OID was implemented as the primary key and formed the foreign key for other tables, the scope of the change is limited to the primary table and the impact of the change is therefore much less. Also, in practice, a primary key based on business data may be subject to change. For example a customer may change address or name. In this case the changes must be propagated correctly to all other related entities, not to mention the difficulty
of changing information that is part of the primary key.
An OID always refers to the same entity no matter what other information changes. In the above example, a customer may change name or address and the related tables require no change. When mapping object models into relational tables, it is often more convenient to implement absolute identity using OID's rather than business related primary keys. The OID as primary and foreign key approach will usually give better load and update times for objects and minimize maintenance effort. In practice, a business related primary key might be replaced with:
1. A uniqueness constraint or index on the columns concerned
2. Business rules embedded in the class behavior
3. A combination of 1 and 2.
Again, the decision to use meaningful keys or OID's will depend on the exact requirements of the system being developed.
8. **Map attributes to columns**
In general we will map the simple data attributes of a class to columns in the relational table. For example a text and number field may represent a person's name and age respectively. This sort of direct mapping should pose no problem simply select the appropriate data type in the vendor's relational model to host your class attribute.
For complex attributes (in other words, attributes that are other objects) use the approach detailed below for handling associations and aggregation.
9. **Map associations to foreign keys**
More complex class attributes (in other words, those which represent other classes), are usually modeled as associations. An association is a structural relationship between objects. For example, a Person may live at an Address. While this could be modeled as a Person has City, Street and Zip attributes, in both the object and the relational world we are inclined to structure this information as a separate entity, an Address. In the object domain an address represents a unique physical object, possibly with a unique OID. In the relational, an address may be a row in an Address table, with other entities having a foreign key to the Address primary key.
In both models then, there is the tendency to move the address information into a separate entity. This helps to avoid redundant data and improves maintainability. So for each association in the class model, consider creating a foreign key from the child to the parent table.
10. **Map Aggregation and Composition**
Aggregation and composition relationships are similar to the association relationship and map to tables related by primary-foreign key pairs. There are however, some points to bear in mind. Ordinary aggregation (the weak form) models relationships such as a Person resides at one or more Addresses. In this instance, more than one person could live at the same address, and if the Person ceased to exist, the Addresses associated with them would still exist. This example parallels the many-to-many relationship in relational terminology, and is usually implemented as a separate table containing a mapping of primary keys from one table to the primary keys of another.
A second example of the weak form of aggregation is where an entity has use or exclusive ownership of another. For example, a Person entity aggregates a set of shares. This implies a Person may be associated with zero or more shares from a Share table, but each Share may be associated with zero or one Person. If the Person ceases to exist, the Shares become un-owned or are passed to another Person. In the relational world, this could be implemented as each Share having an 'owner' column which stored a Person ID (or OID).
The strong form of aggregation, however, has important integrity constraints associated with it. Composition, implies that an entity is composed of parts, and those parts have a dependent relationship to the whole. For example, a Person may have identifying documents such as a Passport, Birth Certificate, Driver's License, and so on. A Person entity may be composed of the set of such identifying documents. If the Person is deleted from the system, then the identifying documents must be deleted also, as they are mapped to a unique individual.
If we ignore the OID issue for the moment, a weak aggregation could be implemented using either an intermediate table (for the many-to-many case) or with a foreign key in the aggregated class/table (one-to-many case). In the case of the many-to-many relationship, if the parent is deleted, the entries in the intermediate table for that entity must also be deleted also. In the case of the one-to-many relationship, if the parent is deleted, the foreign key entry (in other words, 'owner') must be cleared.
In the case of composition, the use of a foreign key is mandatory, with the added constraint that on deletion of the parent the part must be deleted also. Logically there is also the implication with composition that the primary key of the part forms part of the primary key of the whole[]for example, a Person’s primary key may composed of their identifying documents ID’s. In practice this would be cumbersome, but the logical relationship holds true.
11. **Define relationship roles**
For each association type relationship, each end of the relationship may be further specified with role information. Typically, you will include the Primary Key constraint name and the Foreign Key Constraint name. Figure 6 illustrates this concept. This logically defines the relationship between the two classes. In addition, you may specify additional constraints (for example, {Not NULL}) on the role and cardinality constraints (for example, 0...n).
12. **Model behavior**
We now come to another difficult issue: whether to map some or all class behavior to the functional capabilities provided by database vendors in the form of triggers, stored procedures, uniqueness and data constraints, and relational integrity. A non-persistent object model would typically implement all the behavior required in one or more programming languages (for
example, Java or C++). Each class will be given its required behavior and responsibilities in the form of public, protected and private methods.
Relational databases from different vendors typically include some form of programmable SQL based scripting language to implement data manipulation. The two common examples are triggers and stored procedures. When we mix the object and relational models, the decision is usually whether to implement all the business logic in the class model, or to move some to the often more efficient triggers and stored procedures implemented in the relational DBMS. From a purely object-oriented point of view, the answer is obviously to avoid triggers and stored procedures and place all behavior in the classes. This localizes behavior, provides for a cleaner design, simplifies maintenance and provides good portability between DBMS vendors.
In the real world, the bottom line may be scaling to 100's or 1000's of transactions per second, something stored procedures and triggers are purpose designed for. If purity of design, portability, maintenance and flexibility are the main drivers, localize all behavior in the object methods.
If performance is an over-riding concern, consider delegating some behavior to the more efficient DBMS scripting languages. Be aware though that the extra time taken to integrate the object model with the stored procedures in a safe way, including issues with remote effects and debugging, may cost more in development time than simply deploying to more capable hardware.
As mentioned earlier, the UML Data Profile provides the following extensions (stereotyped operations) with which you can model DBMS behavior:
- Primary key constraint (PK)
- Foreign key constraint (FK)
- Index constraint (Index)
- Trigger (Trigger)
- Uniqueness constraint (Unique)
- Validity check (Check).
13. Produce a physical model
In UML, the physical model describes how something will be deployed into the real world—the hardware platform, network connectivity, software, operating system, dll's and other components. You produce a physical model to complete the cycle—from an initial use case or domain model, through the class model and data models and finally the deployment model. Typically for this model you will create one or more nodes that will host the database(s) and place DBMS software components on them. If the database is split over more than one DBMS instance, you can assign packages («schema») of tables to a single DBMS component to indicate where the data will reside.
Conclusion
This concludes this short article on database modeling using the UML. As you can see, there are quite a few issues to consider when mapping from the object world to the relational. The UML provides support for bridging the gap between both domains, and together with extensions such as the UML Data Profile is a good language for successfully integrating both worlds.
For more information about UML, visit the Sparx Systems Web site or email gsparks@sparxsystems.com.au.
|
{"Source-Url": "https://www.edn.com/Pdf/ViewPdf?contentItemId=4196944", "len_cl100k_base": 5762, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 23113, "total-output-tokens": 6308, "length": "2e12", "weborganizer": {"__label__adult": 0.00026035308837890625, "__label__art_design": 0.00036454200744628906, "__label__crime_law": 0.00023698806762695312, "__label__education_jobs": 0.0005965232849121094, "__label__entertainment": 2.819299697875977e-05, "__label__fashion_beauty": 9.876489639282228e-05, "__label__finance_business": 0.0002005100250244141, "__label__food_dining": 0.00022661685943603516, "__label__games": 0.0002923011779785156, "__label__hardware": 0.0004224777221679687, "__label__health": 0.0002422332763671875, "__label__history": 0.00013720989227294922, "__label__home_hobbies": 6.186962127685547e-05, "__label__industrial": 0.0002868175506591797, "__label__literature": 0.00013005733489990234, "__label__politics": 0.00013828277587890625, "__label__religion": 0.00027871131896972656, "__label__science_tech": 0.004398345947265625, "__label__social_life": 4.935264587402344e-05, "__label__software": 0.005397796630859375, "__label__software_dev": 0.9853515625, "__label__sports_fitness": 0.00017309188842773438, "__label__transportation": 0.00030303001403808594, "__label__travel": 0.00013446807861328125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 29722, 0.00806]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 29722, 0.80654]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 29722, 0.92612]], "google_gemma-3-12b-it_contains_pii": [[0, 2704, false], [2704, 4899, null], [4899, 7004, null], [7004, 9621, null], [9621, 10694, null], [10694, 12495, null], [12495, 14538, null], [14538, 16721, null], [16721, 20680, null], [20680, 23057, null], [23057, 26692, null], [26692, 29240, null], [29240, 29722, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2704, true], [2704, 4899, null], [4899, 7004, null], [7004, 9621, null], [9621, 10694, null], [10694, 12495, null], [12495, 14538, null], [14538, 16721, null], [16721, 20680, null], [20680, 23057, null], [23057, 26692, null], [26692, 29240, null], [29240, 29722, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 29722, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 29722, null]], "pdf_page_numbers": [[0, 2704, 1], [2704, 4899, 2], [4899, 7004, 3], [7004, 9621, 4], [9621, 10694, 5], [10694, 12495, 6], [12495, 14538, 7], [14538, 16721, 8], [16721, 20680, 9], [20680, 23057, 10], [23057, 26692, 11], [26692, 29240, 12], [29240, 29722, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 29722, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
3f4b68ddea0b6c30d888eab7c2c52a84a2c9b184
|
Are Your Lights Off? Using Problem Frames to Diagnose System Failures
Thein Than Tun
Michael Jackson
Robin Laney
Bashar Nuseibeh
Yijun Yu
5th June 2009
Department of Computing
Faculty of Mathematics and Computing
The Open University
Walton Hall,
Milton Keynes
MK7 6AA
United Kingdom
http://computing.open.ac.uk
Abstract
This paper reports on our experience of investigating the role of software systems in the power blackout that affected parts of the United States and Canada on 14 August 2003. Based on a detailed study of the official report on the blackout, our investigation aimed to bring out requirements engineering lessons that can inform development practices for dependable software systems. Based on the assumption that the causes of failures are rooted in the complex structures of software systems and their world contexts, we decided to deploy and evaluate a framework that looks beyond the scope of software and into its physical context, and directs attention to places in the system structures where failures are likely to occur. We report that (i) Problem Frames were effective in diagnosing the causes of failures and documenting the causes in a schematic and accessible way, and (ii) errors in addressing the concerns of bidable domains, model building problems, and monitoring problems had contributed to the blackout.
1 Introduction
In mature branches of engineering, failures and “the role played by reaction to and anticipation of failure” are regarded as essential for achieving design success [13]. Identification of the causes of past system failures, their organisation and documentation in a way accessible by engineers within an engineering community, and the application of such knowledge of failures when designing future systems, all play a central role in establishing “normal design” practices [17]. Although there have been several excellent reports on high-profile system failures involving software systems [6, 8, 11, 10], development practices for dependable systems have not exploited input from incident or accident investigations in a systematic way [2, 14]. This work is a small step in the direction of mending the development practices.
Requirements Engineering (RE) is concerned with defining the behaviour of required systems, and any error introduced or prevented early in the development contributes to the system dependability. In this respect, RE has a valuable role to play in systematising and documenting causes of past failures, and utilising this systematised knowledge in the development of future systems. In the same way that system failures can be attributed to programming, design, and human/operational errors, it is possible to attribute certain failures to RE errors. RE errors may be due to missing requirements, incorrect assumptions about the problem context, weak formulation of requirements and unexpected interactions between requirements.
Although the broader context—such as the organisational settings, regulatory regimes and market forces—often plays an important role in failures, we deliberately focus on the role of the software system in its physical context in order to bring out clear lessons for requirements engineers. Therefore, a framework is needed for investigating failures, which looks beyond the scope of software and into its physical context, and directs attention to places in the system structures where failures are likely to occur.
In this paper, we report on our experience of using Problem Frames [5] to identify, organise and document knowledge about the causes of past system failures. In the Problem Frames framework, potential causes of failures—known as “concerns”—are named and associated with a specific pattern of software problem, a style of problem composition, a type of problem world domain, the requirement and the specification. An instantiation of a pattern, for instance, will immediately raise the need to address certain concerns in the sys-
tem structures. This is, in a sense, similar to generating “verification conditions” for a program in order to prove its correctness with respect to the specification [1]. In this case, concerns raised will have to be discharged by requirements engineers, perhaps in collaboration with other stakeholders.
The rest of the paper is organised as follows. Section 2 gives an overview of the power blackout case study, the methodology used in the investigation, and some of the key principles of Problem Frames. The role of the software systems in the blackout is described and analysed in Section 3. Related work is discussed in Section 4. Section 5 summarises the findings.
2 Preliminaries
This section provides an overview of our case study, the research methodology used to investigate the failures, the conceptual framework of Problem Frames, and the expected outcome of our study.
2.1 2003 US-Canada Electricity Blackout
The electricity blackout that occurred on 14 August, 2003 in large parts of the Midwest and Northeast United States and Ontario, Canada, affected around 50 million people, according to the official report by the U.S.–Canada Power System Outage Task Force [16]. The outage began around 16:00 EDT (Eastern Daylight Time), and power was not fully restored for several days in some parts of the United States. The effect of the outage could be seen in satellite images of North America, whilst financial losses reportedly ran into billions of US dollars. The official report concluded that “this blackout could have been prevented”, and software failures leading to the operator’s reliance on outdated information was identified as one of the two “most important causes” [16, p. 46] (the other cause being the loss of key power transmission lines due to contact with trees).
2.2 Methodology
Investigating real-life system failures is difficult not least because of the size and technical complexity of these systems and limited availability of verifiable information about the failures and the systems involved [6]. Even when it is possible to master these difficulties, it is still a challenge to locate exactly when in the development an error was introduced [11]. The official report on the blackout makes clear that factors such as the sagging of power lines, overgrown trees, poor communication, and lack of personnel training all contributed to the blackout.
Since our interest was to learn requirements engineering lessons, rather than say, “to sketch arguments for and against deregulation as a cause of the blackout” [7], our methodology for investigating failures examined the chain of events leading up to the failure, and isolated the role of software systems in the failure. We ascertained what the components of system did, what they should have done, and how it would have been possible to identity the causes in the requirements engineering stage. Therefore, a framework was needed that allowed us to structure the potential causes of failures in a schematic way.
2.3 Problem Frames
The Problem Frames framework [5] is based on certain principles, some of which are relevant to the discussion.
First, the framework encourages a systematic separation of descriptions into requirements, problem world context and specifications. For example, Figure 1 shows a high-level description of a type of software problem known as Commanded Behaviour Frame. In this problem, a software system, Control Machine, is required to apply control on a domain in the physical world, the Controlled Domain, according to the commands of a human agent, the Operator. Exactly how the Controlled Domain should behave, or what property it must have, when the Operator issues commands is described by the Commanded Behaviour Requirement. Therefore the requirement states the relationship between the operator command OCommand at the interface a_O, and the behaviour and property of the controlled domain CDBehaviour and CDProperty at the interface a_CD.
Description of the operator behaviour is concerned with the relationship between OInput at the interface b_O and OCommand at the interface a_O, namely what input the operator produces when a command is issued. Similarly, description of the Controlled Domain is concerned with the relationship between CMAction at the interface a_CM and CDBehaviour and CDProperty at the interface a_CD, namely what behaviour or property the controlled domain produces when machine actions are performed. The Operator and the Controlled Domain constitutes the problem world context of the Control Machine.
Figure 1. The Commanded Behaviour Frame
The specification, description of the Control Machine, is concerned with the relationship between Input at the interface b, O and a, CM at the interface a, CD, namely what actions the machine must perform when operator input is observed.
The operator may be a lift user and the controlled domain, a lift. The requirement will state how the lift should behave when the lift user issues commands. The specification will state what operations the lift controller will perform when commands are received.
Second, this framework emphasises the need to understand the physical structure of the problem world context, and the behaviour of the domains involved. Third, the framework is based on recurring patterns of software problems, called frames. Each frame captures “concerns” of a certain type of software problems. For instance, the main concern of the “Commanded Behaviour” frame is to ensure that the system obeys the operator commands in imposing control on the behaviour of the system. An instantiation of a frame implies generation of certain conditions that need to be discharged. Fourth, the framework provides a rich scheme for categorising and recording causes of failures. For instance, there are concerns specific to problem world domain such as reliability, identity and breakage; there are frame concerns such as that of the required behaviour frame; and there are composition concerns such as conflict, consistency and synchronisation.
Therefore, we hypothesised that these principles of the Problem Frames framework provide an appropriate foundation for diagnosing failures involving software systems.
2.4 Expected Outcomes
There were two expected outcomes of this study. First, to establish whether Problem Frames are appropriate for investigating systems failures in terms of (i) locating causes of failure in the system structures, and (ii) recording them in a schematic way accessible by engineers within a community. Second, to identify causes of the blackout and either confirm them as known concerns or expand the repertoire of known concerns.
3 The Case Study
We now discuss two software-related failures that contributed significantly to the blackout. We briefly recount the chain of events leading to the blackout described in the official report, and then discuss how Problem Frames were applied to diagnose the causes of failures and record the causes of failures.
3.1 Problem #1: State Estimator and Real Time Contingency Analysis
The infrastructure of the electric systems are large and complex, comprising many power generation stations, transformers, transmission lines, and individual and industrial customers. Providing reliable electricity through “real-time assessment, control and coordination of electricity production at thousands of generators, moving electricity across an interconnected network of transmission lines, and ultimately delivering the electricity to millions of customers by means of a distribution network” is a major technical challenge [16].
Reliability coordinators and control operators use complex monitoring systems to collect data about the status of the power network. In addition they use a system called State Estimator (SE) to improve the accuracy of the collected data against the mathematical model of the power production and usage. When the divergence between the actual and predicted model of power production and usage is large, State Estimator will “produce a solution with a high mismatch”. Information from the improved model is then used by various software tools, including Real Time Contingency Analysis (RTCA), to evaluate the reliability of the power system, and alert operators when necessary, for instance when the power production is critically low. This evaluation can be done periodically or on demand of the operator.
“On August 14 at about 12:15 EDT, MISO’s [Midwest Independent System Operator] state estimator produced a solution with a high mismatch (outside the bounds of acceptable error). This was traced to an outage of Cinergy’s Bloomington-Denais Creek 230-kV line. […] However, to troubleshoot this problem the analyst had turned off the automatic trigger that runs the state estimator every five minutes. After fixing the problem he forgot to re-enable it, so although he had successfully run the SE and RTCA manually to reach a set of correct system analyses, the tools were not returned to normal automatic operation. Thinking the system had been successfully restored, the analyst went to lunch.
The fact that the state estimator was not running automatically on its regular 5-minute schedule was discovered about 14:40 EDT.”
When the automatic trigger was subsequently re-enabled, the state estimator produced a solution with a high mismatch due to further developments on vari-
In summary, the MISO state estimator and real time contingency analysis tools were effectively out of service between 12:15 EDT and 16:04 EDT. This prevented MISO from promptly performing precontingency “early warning” assessments of power system reliability over the afternoon of August 14.
3.1.1 Problem Analysis
Based on this information, we constructed several problem diagrams to analyse relationships between domains mentioned in the description. Figure 2 shows a composite of two problem diagrams. The problem of State Estimator is to produce RevisedData for the Enhanced Electrical System Model of the grid, based on Status-Data, and Estimates produced by the Mathematical Model. In Problem Frames, this type of problem is known as a “model building problem”. The problem of RTCA System is to examine RevisedData and raise appropriate alerts on the Display Screen used by the Operator. This type of problem is known as an “information display problem”.
3.1.2 A Requirements Engineering Error?
On August 14, when the SE could not produce a consistent model, the operator turned off the automatic trigger of the SE in order to carry out maintenance work. Figure 3 shows the problem diagram, where the Maintenance Engineer uses the machine SE Trigger to turn on or turn off the State Estimator. This problem fits the Commanded Behaviour Frame shown in Figure 3. Part of the requirement here is to ensure that when the engineer issues the command OffNow, the SE should cease running in due course: OffNow → ¬Running.
When the maintenance work was done, the engineer forgot to re-enable the SE, leaving the electrical system model which the operators rely on, outdated. The resulting reliance by the operator on the outdated information was a significant contributing factor to the blackout.
Clearly, the maintenance engineer should not have forgotten to re-engage the monitoring systems, and as a result, the problem would not have risen. However, there is more to the problem than this being a “human error”. Perhaps the fallibility of human operators should have been better recognised in the system’s model of the world context.
3.1.3 Naming and Categorising Concerns
A key part of the problem is the requirement that says that the operator commands always have precedence over the system actions (OffNow → ¬Running). This requirement relies on the world assumption that the biddable domain—i.e., a human agent such as the maintenance engineer—always gives the correct commands. In this case, the requirement should be formulated as EngineerReliable ∧ OffNow → ¬Running. The Commanded Behaviour frame recognises that the operator is a biddable domain, whose behaviour is non-causal and may not be reliable. Therefore, the operator always giving the correct command may be too strong a condition to discharge. This gives rise to two concerns: one related to the biddable domain and the other, related to the Commanded Behaviour frame.
We will call the concern related to the biddable domain the reminder concern, which raises the following conditions to discharge: (i) Whenever the operator overrides the system operations, which system agent(s) should be reminded about the override? (ii) How long should the override last? (iii) What happens when the length of time expires? In the case of the blackout, this may be translated into a requirement that says (i)
whenever the SE has stopped, the system should remind the operator of the SE status and how long it has had that status, and (ii) at the end of a maintenance procedure, the system should remind the engineer of the SE status. Such a reminder could make the engineer’s behaviour more reliable and perhaps could have helped prevent the failure.
A concern related to the Commanded Behaviour frame is whether the system should ignore the operator commands and take control of the system under certain circumstances. We will call this the system precedence concern. This may mean that the system should monitor the actions by the biddable domain, and intervene when the domain does not seem to be reliable (¬EngSeemsReliable). In that case, the requirement should be formulated as: EngSeemsReliable ∧ OffNow → ¬Running and ¬EngSeemsReliable ∧ OffNow → false. EngSeemsReliable is false, for instance, when the maintenance procedure takes longer than 2 hours.
Another key part of the problem is related to the issue of fault-tolerance in information display: what happens when the input the system receives from the analogous model is unexpected. This may be due to an incorrect data type or of an untimely input from the analogous model. We will call this the outdated information concern. Pertinent questions in the case of blackout are: 1) Can RTCA know that the Improved Electrical System Model is outdated? 2) What should it do about it? Had requirements engineers asked such questions, it could have lead to a requirement such as “The Improved Electrical System Model must have a timestamp of when it was last updated successfully” and “If the Improved Electrical System Model is older than 30 minutes, the RTCA system should alert the operator that the electrical system model is now outdated”. This will at least warn the operator of the improved electrical system model not to rely on the information.
3.2 Problem #2: Alarm and Event Processing Routine (AEPR) System
According to the official report, another significant cause of the blackout was due, in part, to the Alarm and Event Processing Routine (AEPR) system, “a key software program that gives grid operators visual and audible indications of events occurring on their portion of the grid” [16].
“Alarms are a critical function of an EMS [Energy Management System], and EMS-generated alarms are the fundamental means by which system operators identify events on the power system that need their attention. Without alarms, events indicating one or more significant system changes can occur but remain undetected by the operator. If an EMS’s alarms are absent, but operators are aware of the situation and the remainder of the EMS’s functions are intact, the operators can potentially continue to use the EMS to monitor and exercise control of their power system.
In the same way that an alarm system can inform operators about the failure of key grid facilities, it can also be set up to alarm them if the alarm system itself fails to perform properly. FE’s EMS did not have such a notification system.”
The problem of alerting the Grid Operator of the grid status, ascertained from the Grid & Sensors is shown in Figure 4. This problem fits a type of problem known as the Information Display Frame. The requirement is to raise an alarm to the operator (GOAlertedGrid) if and only if there are events on the grid that threaten the system reliability (GridOK): ¬GridOK ↔ GOAlertedGrid. The specification of AEPR could be to raise an alert (RaiseAlert) if and only if danger is detected on the grid (DangerDetected): DangerDetected ↔ RaiseAlert. In the case study, the AEPR system failed silently, leading the operators to continue to rely on outdated information, and was one of “the most important causes” of the blackout.
3.2.1 A Requirements Engineering Error?
The official report is very clear about the fact that there was a missing requirement “to monitor the status of EMS and report it to the system operators.” In fact, this lesson could have been learnt from a requirement in the British Standard 5839 on the fire detection and fire alarm systems for buildings [15].
In some dwellings, electricity supplies may be disconnected because the occupiers are unable to pay for supplies. Disconnection may be at
3.2.2 Naming and Categorising Concerns
The cause of this failure can be called a silent failure of alarm systems. Addressing this concern could raise questions such as: What happens if AEPR fails silently? Is it possible to detect such failures? What should be done when such failures are detected. This could have led the designers to the requirement that the system should monitor the behaviour of AEPR and raise an alarm when AEPR is thought to have failed. Figure 5 shows a problem diagram in which a wrapper intercepts the input to and output from the AEPR and when AEPR fails to respond as expected, a separate alarm is raised (GOAlertedAEPR). The wrapper AEPR Monitor can pass on danger detection from the grid to AEPR (DangerDetected@bGS ↔ DangerDetected@b’AM) and pass on the alert trigger from AEPR to the grid operator (RaiseAlert@aA ↔ RaiseAlert@a’AM). Then the requirement to alert silent failure of AEPR is ¬GridOK ∧ ¬GOAlertedGrid ↔ GOAlertedAEPR. The specification for AEPR Monitor is DetectDanger@bGS ∧ ¬RaiseAlert@a’AM ↔ RaiseSecondaryAlert@a’AM. An implementation of a specification such as this could have prevented the failure.
Figure 5. Alert AEPR Status
According to this standard, monitoring systems may fail, in this case due to power failure, therefore a secondary independent source of power is required. In addition, when the source of power is switched from electric to battery, the system usually raises an alarm.
4 Related Work
There have been several studies of software-related failures. Leveson, for instance, carried out several studies of software-related accidents, including those involving the medical electron accelerator Therac-25 [8]. Similarly, Johnson has contributed an extensive literature on system accidents and incidents [6]. However, those studies of system failure of which we are aware have not been based on a clear conceptual structure for identifying, classifying, and recording the lessons learned at the level of detail appropriate for use by software engineers. For instance, three software engineering lessons Leveson and Turner [8] draw from the Therac-25 accidents include: “Documentation should not be an afterthought”, “Software quality assurance practices and standards should be established”, and “Designs should be kept simple”. Johnson investigated this power blackout in order to “sketch arguments for and against deregulation as a cause of the black-out” [7]. In this paper, we applied a systematic approach to learning software engineering lessons, structured and described in ways that software engineers can relate to specifically.
Several variants of the Failure Modes and Effect Analysis (FMEA) method have been developed and applied in the development of dependable systems. Lutz and Woodhouse [9], for instance, applied a FMEA-based method to identify critical errors in requirements documents of two spacecraft systems. Similar application of FMEA-like methods to software systems have been reported in [3, 12]. Our work is complementary to such methods, in the sense that we are concerned with identifying, structuring and documenting past software failures, which can then be used to narrow the search space for failure analysis.
5 Summary
Our experience of using Problem Frames to investigate system failures involving software systems showed that the framework of Problem Frames was appropriate for identifying causes of system failures and documenting the causes in a schematic and accessible way. The suggestion by the framework that requirements engineers should “look out” into the physical world, rather than “look into” the software was useful in directing and focusing the attention, because many of the causes of failures originated in the physical world context.
The separation of descriptions into requirements, problem world context and the specification enabled us to locate sources of failures in specific descriptions. Some failures were related to the requirements (such as missing requirements) and others to the problem world context (such as mismatch between the assumed
and actual behaviour of the problem world domains). Furthermore, associating concerns to the requirement, problem world context, frame, domain type, style of composition, and the specifications provides a good basis for recording concerns in a schematic way.
In summary, specific lessons learnt from the blackout case study are: (i) a further specialisation of the reliability of the biddable domain, called the reminder concern, (ii) a further specialisation of the concern of the Commanded Behaviour frame where the system may have to take precedence over the operator action, called the system precedence concern, (iii) a further specialisation of the Information Display frame called the outdated information concern, and (iv) the silent failure concern related to the monitoring systems.
References
|
{"Source-Url": "http://computing-reports.open.ac.uk/2009/TR2009-07.pdf", "len_cl100k_base": 5037, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 24740, "total-output-tokens": 6199, "length": "2e12", "weborganizer": {"__label__adult": 0.0003445148468017578, "__label__art_design": 0.0005331039428710938, "__label__crime_law": 0.0006852149963378906, "__label__education_jobs": 0.0025882720947265625, "__label__entertainment": 0.0001068115234375, "__label__fashion_beauty": 0.00017833709716796875, "__label__finance_business": 0.0006046295166015625, "__label__food_dining": 0.0003604888916015625, "__label__games": 0.000705718994140625, "__label__hardware": 0.00394439697265625, "__label__health": 0.0007414817810058594, "__label__history": 0.0003705024719238281, "__label__home_hobbies": 0.00019371509552001953, "__label__industrial": 0.0015230178833007812, "__label__literature": 0.0003864765167236328, "__label__politics": 0.00028514862060546875, "__label__religion": 0.0003712177276611328, "__label__science_tech": 0.236083984375, "__label__social_life": 0.0001232624053955078, "__label__software": 0.0212860107421875, "__label__software_dev": 0.7275390625, "__label__sports_fitness": 0.0002046823501586914, "__label__transportation": 0.0008416175842285156, "__label__travel": 0.00017070770263671875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28320, 0.03577]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28320, 0.66417]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28320, 0.93018]], "google_gemma-3-12b-it_contains_pii": [[0, 315, false], [315, 3968, null], [3968, 8571, null], [8571, 13364, null], [13364, 16741, null], [16741, 21024, null], [21024, 25103, null], [25103, 28320, null]], "google_gemma-3-12b-it_is_public_document": [[0, 315, true], [315, 3968, null], [3968, 8571, null], [8571, 13364, null], [13364, 16741, null], [16741, 21024, null], [21024, 25103, null], [25103, 28320, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28320, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28320, null]], "pdf_page_numbers": [[0, 315, 1], [315, 3968, 2], [3968, 8571, 3], [8571, 13364, 4], [13364, 16741, 5], [16741, 21024, 6], [21024, 25103, 7], [25103, 28320, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28320, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
c6a305c976859f7415f9a4ce5e91a36886937106
|
[REMOVED]
|
{"len_cl100k_base": 6558, "olmocr-version": "0.1.53", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 23053, "total-output-tokens": 7271, "length": "2e12", "weborganizer": {"__label__adult": 0.0004506111145019531, "__label__art_design": 0.00045228004455566406, "__label__crime_law": 0.0029811859130859375, "__label__education_jobs": 0.0009388923645019532, "__label__entertainment": 0.00011163949966430664, "__label__fashion_beauty": 0.00020372867584228516, "__label__finance_business": 0.00122833251953125, "__label__food_dining": 0.0003066062927246094, "__label__games": 0.0008463859558105469, "__label__hardware": 0.00455474853515625, "__label__health": 0.0004935264587402344, "__label__history": 0.00028228759765625, "__label__home_hobbies": 0.00019538402557373047, "__label__industrial": 0.0011758804321289062, "__label__literature": 0.0002397298812866211, "__label__politics": 0.0004758834838867187, "__label__religion": 0.0003714561462402344, "__label__science_tech": 0.190673828125, "__label__social_life": 0.00014221668243408203, "__label__software": 0.05316162109375, "__label__software_dev": 0.73974609375, "__label__sports_fitness": 0.0002651214599609375, "__label__transportation": 0.000576019287109375, "__label__travel": 0.00018870830535888672}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33619, 0.02645]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33619, 0.68772]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33619, 0.91603]], "google_gemma-3-12b-it_contains_pii": [[0, 3666, false], [3666, 8901, null], [8901, 13650, null], [13650, 19229, null], [19229, 23342, null], [23342, 28906, null], [28906, 33619, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3666, true], [3666, 8901, null], [8901, 13650, null], [13650, 19229, null], [19229, 23342, null], [23342, 28906, null], [28906, 33619, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33619, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33619, null]], "pdf_page_numbers": [[0, 3666, 1], [3666, 8901, 2], [8901, 13650, 3], [13650, 19229, 4], [19229, 23342, 5], [23342, 28906, 6], [28906, 33619, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33619, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
d48526c24c2d336f6718d806b2cb53e5ca8c6630
|
Implementation of a Bayesian Engine for Uncertainty Analysis
Leng Vang
Curtis Smith
Steven Prescott
August 2014
The INL is a U.S. Department of Energy National Laboratory operated by Battelle Energy Alliance
DISCLAIMER
This information was prepared as an account of work sponsored by an agency of the U.S. Government. Neither the U.S. Government nor any agency thereof, nor any of their employees, makes any warranty, expressed or implied, or assumes any legal liability or responsibility for the accuracy, completeness, or usefulness, of any information, apparatus, product, or process disclosed, or represents that its use would not infringe privately owned rights. References herein to any specific commercial product, process, or service by trade name, trade mark, manufacturer, or otherwise, do not necessarily constitute or imply its endorsement, recommendation, or favoring by the U.S. Government or any agency thereof. The views and opinions of authors expressed herein do not necessarily state or reflect those of the U.S. Government or any agency thereof.
Implementation of a Bayesian Engine for Uncertainty Analysis
Leng Vang
Curtis Smith
Steven Prescott
August 2014
Idaho National Laboratory
Idaho Falls, Idaho 83415
Prepared for the
U.S. Department of Energy
Office of Nuclear Energy
Under DOE Idaho Operations Office
Contract DE-AC07-05ID14517
SUMMARY
In probabilistic risk assessment, it is important to have an environment where analysts have access to a shared and secured high performance computing and a statistical analysis tool package. As part of the advanced small modular reactor probabilistic risk analysis framework implementation, we have identified the need for advanced Bayesian computations. However, in order to make this technology available to non-specialists, there is also a need of a simplified tool that allows users to author models and evaluate them within this framework. As a proof-of-concept, we have implemented an advanced open source Bayesian inference tool, OpenBUGS, within the browser-based cloud risk analysis framework that is under development at the Idaho National Laboratory.
This development, the “OpenBUGS Scripter” has been implemented as a client side, visual web-based and integrated development environment for creating OpenBUGS language scripts. It depends on the shared server environment to execute the generated scripts and to transmit results back to the user. The visual models are in the form of linked diagrams, from which we automatically create the applicable OpenBUGS script that matches the diagram. These diagrams can be saved locally or stored on the server environment to be shared with other users.
FIGURES
Figure 1: Visual OpenBUGS approach........................................................................................................ 5
Figure 2: Client-side application for creating the Bayesian analysis models. ............................................. 6
Figure 3: Menu Navigation.......................................................................................................................... 8
Figure 4: Tools Bar navigator ..................................................................................................................... 8
Figure 5: Pallet of script shapes ................................................................................................................. 9
Figure 6: Script Code panel ....................................................................................................................... 10
Figure 7: select a shape out on the Shapes pallet....................................................................................... 15
Figure 8: Create shape prompting for a name............................................................................................ 15
Figure 9: Selecting a shape to show the connection icon.......................................................................... 16
Figure 10: Connecting shapes together..................................................................................................... 16
Figure 11: completing the connection....................................................................................................... 16
Figure 12: Example of nested and code block script diagram.................................................................... 17
<table>
<thead>
<tr>
<th>ACRONYMS</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>API</td>
<td>Application Programming Interface</td>
</tr>
<tr>
<td>BUGS</td>
<td>Bayesian inference Using Gibbs Sampling</td>
</tr>
<tr>
<td>CSS3</td>
<td>Cascade Style Sheet version 3</td>
</tr>
<tr>
<td>DAL</td>
<td>Data Access Layer</td>
</tr>
<tr>
<td>ECMAScript</td>
<td>European Computer Manufacturers Association Script</td>
</tr>
<tr>
<td>HTML5</td>
<td>Hypertext Markup Language version 5</td>
</tr>
<tr>
<td>HTTPS</td>
<td>Hypertexts Transfer Protocol Secured layer</td>
</tr>
<tr>
<td>IDE</td>
<td>Integrated Development Environment</td>
</tr>
<tr>
<td>INL</td>
<td>Idaho National Laboratory</td>
</tr>
<tr>
<td>MCMC</td>
<td>Markov chain Monte Carlo</td>
</tr>
<tr>
<td>PC</td>
<td>Personal Computer</td>
</tr>
<tr>
<td>VOBS</td>
<td>Visual OpenBUGS Scripter</td>
</tr>
</tbody>
</table>
Implementation of a Bayesian Engine for Uncertainty Analysis
1. BACKGROUND
As a part of modern risk analysis, there is a need to have a statistics analysis tools installed on a centralized high performance computing environment in which is to be shared with collaborating analysts. Often times, collaborators do not have readily available their own high performance environment and may want to make use of shared computational resources. However they may not have or are not allowed to have direct access INL internal networking servers to access necessary resource tools. Consequently, the implementation of a shared resources in a cloud-based environment can help to improve collaboration.
2. VISUAL OPENBUGS SCRIPTER
2.0 What is OpenBUGS?
OpenBUGS is an open-source software package for performing Bayesian inference Using Gibbs Sampling (Ref 1). The scripting language uses in OpenBUGS is called ‘BUGS’, a dialect of the open source statistical package language called ‘R’. OpenBUGS has a desktop application with limited scripting capability. It also has an accessible batch command line mode. This later function capability is what we used for this project.
In the context of PRA, where we use probability distributions to represent our state of knowledge regarding parameter values in the models, Bayes’ Theorem gives a posterior (or updated) distribution for the parameter (which may be a vector) of interest, in terms of the prior distribution and the observed data, which in the general continuous form is written as:
$$\pi_1(\theta | x) = \frac{f(x | \theta) \pi(\theta)}{\int f(x | \theta) \pi(\theta) d\theta}.$$
In this equation, \(\pi_1(\theta | x)\) is the posterior distribution for the parameter of interest, denoted by \(\theta\). The posterior distribution is the basis for all inferential statements about \(\theta\), and will form the basis for model validation approaches. The observed data enters via the likelihood function, \(f(x | \theta)\), and \(\pi(\theta)\) is the prior distribution of \(\theta\).
The denominator of Bayes’ Theorem is sometimes denoted \(f(x)\), and is called the marginal or unconditional distribution of \(x\). Note that it is a weighted average distribution, with the prior distribution for \(\theta\) acting as the weighting function. The term \(f(x)\) is also referred to as the predictive distribution for \(X\).
The likelihood, \( f(x|\theta) \), is most often (in many different PRA applications) binomial, Poisson, or exponential.\(^a\) Below, we describe an OpenBUGS application that is frequently found in PRA applications.
OpenBUGS is able to evaluate very complex multidimensional and hierarchical problems, where closed-form solutions are not possible. OpenBUGS uses a Markov chain Monte Carlo (MCMC) approach to sample directly from the joint posterior distribution. Example problems that are are solvable with OpenBUGS include uncertain data problems, analysis involving non-conjugate prior distributions, and hierarchical models with many parameters.
OpenBUGS provides a “programming-style” scripting language to specify the model to be evaluated. The script is then compiled in order to process the model and associated data. Rather than require analysts to write the BUGS script, the diagramming tool described in this document provides an abstraction of a complicated model – this diagram is then translated into BUGS script and run on the server via the cloud-based framework.
As an example of the types of problems, and associated scripts that are required, we will evaluate a simple case of two pumps and assume the following operational (run) data:
<table>
<thead>
<tr>
<th>Failed Test</th>
<th>Pump A</th>
<th>Pump B</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<td>2</td>
<td>0</td>
<td>1</td>
</tr>
<tr>
<td>3</td>
<td>1</td>
<td>0</td>
</tr>
<tr>
<td>4</td>
<td>1</td>
<td>1</td>
</tr>
</tbody>
</table>
Total Failures | 2 | 3
Operational time (hours) | 920 | 920 |
We will need to use this data to estimate a failure rate (this failure rate would be used in a PRA to predict failures of pumps). Since this is Poisson data, we could assume a Jeffreys noninformative prior on the failure rate to obtain the posterior distribution directly. However, to help illustrate the use of OpenBUGS, we will evaluate two cases, one with the Jeffreys noninformative prior and one with a non-conjugate prior.
A general BUGS script usually has three parts: 1) comments, which are preceded by “#”, 2) the “model” section, and 3) a “data” section. For our first example, we will need to use the Poisson model and a gamma (Jeffreys) distribution.
The BUGS script for the first case (Jeffreys prior) appears as:
\(^a\) When dealing with repair times and recovery times, the likelihood function may be lognormal, Weibull, or gamma. If the observed variable is an unavailability, the likelihood may be a beta distribution.
# Pump Fail To Run - Five failures out of 1840 hours of operation.
```r
model {
mu <- lambda * OperatingTime
pump.ftr ~ dpois(mu) # Poisson model
lambda ~ dgamma(0.5, 0.00001) # Approximate Jeffreys prior for Poisson data
ftr.post ~ dpois(mu) # Predicted number of failures from posterior
}
data
list(pump.ftr = 5, OperatingTime = 1840)
```
The **results** of the analysis are:
<table>
<thead>
<tr>
<th></th>
<th>mean</th>
<th>s.dev.</th>
<th>median</th>
</tr>
</thead>
<tbody>
<tr>
<td>ftr.post</td>
<td>5.492</td>
<td>3.294</td>
<td>5.0</td>
</tr>
<tr>
<td>lambda</td>
<td>0.002986</td>
<td>0.001268</td>
<td>0.002808</td>
</tr>
</tbody>
</table>
These numerical results would be available for other uses in the PRA model, for example to populate basic events representing pump failures in a simulation or fault tree model.
Now for the second case, let us assume that we believed that the failure rate (prior to collecting any data) was between 0.001 and 0.01 per hour. We did not have any preference over values in this range – thus we could assume that a uniform prior over this range is applicable (note that this is not a conjugate prior). The OpenBUGS script for this case would be constructed by modifying the prior line:
```r
lambda ~ dunif(0.001, 0.01) # Uniform prior
```
Moreover, the results of such a modification are:
<table>
<thead>
<tr>
<th></th>
<th>mean</th>
<th>s.dev.</th>
<th>median</th>
</tr>
</thead>
<tbody>
<tr>
<td>lambda</td>
<td>0.002794</td>
<td>0.001172</td>
<td>0.002592</td>
</tr>
</tbody>
</table>
The cloud-based Bayesian engine tool described in this document removes the need for analysts to write BUGS script – instead they would describe a model using the diagramming approach discussed in Section 3. The analysis for the model would then be run automatically using cloud-based resources.
2.1 What is Visual OpenBUGS Scripting?
Visual OpenBUGS Scripting is an Integrated Development Environment (IDE) in a web application that allow scripting of the BUGS script using drag-and-drop visual diagraming tools. The application has built-in a pallet of diagraming shapes with each represents a language’s syntax such as keyword, property, group, or relationships that tie the shapes together; all to generate script code block ready to be submitted to OpenBUGS for execution. The application also has built-in capability to manage user’s scripts by either stored them locally on user’s local drive or to upload for storage on a server.
3. ANALYSIS AND DESIGN
3.1 Bayesian Engine Concept
Visual OpenBUGS Scripter (VOBS) has been designed to be flexible and enables user access from most major computing platforms, such as Microsoft Windows, Linux, Android or Apple OS with a single codebase. Being flexible allows users with limited computing power to offload heavy calculations to a shared high performance computing environment where OpenBUGS is installed and can be much more efficient.
A viable solution is to develop a web-based application as a web service running on a server computer that has a direct communication to OpenBUGS as well as a database management system which is also installed on a high performance computer server. The client application is developed as a single page application using browser technology. The benefit of being a web application is that users do not need to download and install the application in order to use it. The client application also has capabilities to allow the user to build a script visually and to submit it to the server for execution. The result status of the analysis is communicated back to the client application for users’ progress monitoring. Users may maintain their own script code by saving locally (client PC) or may opt to save to the server with associated user account. Predefined scripts may be saved as a library for later re-use. These libraries may be saved locally as a private library or uploaded to the server to share with other analysts as shown Figure 1.
Figure 1: Visual OpenBUGS approach.
3.2 Design
To have a positive user experience with an interactive nature of a drag-and-drop user interface, as well as being able to run inside a web browser, an application framework and supporting libraries are required. To leverage users’ experience from the existing desktop OpenBUGS application, a single page web application (SPA) approach is used. In order to facilitate a minimum development time, we used a commercial library called mxGraph (developed by jGraph Inc.) in order to support the application framework and diagramming functionality. The script drawing module of the application is a 100% web-based client side program with limited access for basic data storage. Consequently, it requires a server side program for a more sophisticated data structure storage and to provide direct access to the shared OpenBUGS application (where the analysis is performed). The communications between the client and server application are handled through secured layer of HTTPS protocol as web service calls. This client-side program is illustrated in Figure 2.
Figure 2: Client-side application for creating the Bayesian analysis models.
3.2.1 Server Application Program
The server program consists of database management system for maintaining user accounts and associated scripts, diagrams representing models, and the analysis results of calculations when necessary. It requires a server application for managing the database and is responsible for handling of data retrieval and saving user data. It also serves to communicate all requests to and from the client application. In our prototype, the database storage is designed for Microsoft SQL Server 2012. The server application is a web-service based and is developed using Microsoft .NET with C# as the programming language.
3.2.1.1 Data Access Layer
The Data Access Layer (DAL) is a module that is responsible for handling data query requests into the underlying database and relays the queried data to the Service Layer. It also is responsible for handling saving and updating requests from the Service Layer to update or save data records into the database.
3.2.1.2 Service Layer
The Service Layer handles requests to and from the client application. It also is responsible to query the Data Access Layer for retrieve, update, and to save user data.
3.2.1.3 Exposed functions
The Service Layer defines and exposes functions that are accessible by the client application. These functions are specific and are called by the client for handling individual requests, for example get_userAccount, update_UserAccount, save_Library, etc.
3.2.2 Client Application Stack
The client application represents the bulk of the development. It uses the mxGraph library extensively to create the entire user interface. It is written with 100% JavaScript, including the dependent libraries. The user is not required to pre-install any dependent tools in order to use it other than the minimum requirement mentions below. However, a user must create an account with the server if he/she wishes to upload libraries to be stored or shared with the community.
The client user interface resembles a desktop application with multi-document windows, menus, tools bars, and side panels with pallets of diagraming shapes, properties, and generated OpenBUGS script code.
The client application is developed with forward compatibility features that dependent on current technologies to function well. A minimum requirement is a browser that support:
- HTML5
- CSS3
- JavaScript ECMAScript 5.1
In Appendix A, we provide examples of use cases scenarios in order to demonstrate how the client application is used.
3.2.2.1 Menus Navigator
The menu (see Figure 3) is the main navigator for most functionality within the application. It contains options and sub-navigational options:
1. File – Document functions to create New, Open, Save, Save As, Save as Library, Import, Export, Page Setup and Print diagram.
2. Edit – Allows basic editing of a shape including: Undo, Redo, Cut, Copy, Paste, Delete, Edit Data, Select Vertices, Select Edges, Select All.
3. View – Basic diagram zooming and page fitting.
4. Format – Built-in shape shading and visual decoration functions.
5. Text – Allows modification to fonts, colors, alignments and text displacement.
6. Options – Allows setting of interaction options.
Figure 3: Menu Navigation
3.2.2.2 Tools Bar Navigator
Tools Bar contains duplicated navigational functions found in the menu navigation. The purpose of the Tools Bar is to list those function that are in frequent used as well as to provide quick access to them. Figure 4 shows an example of the Tools Bar containing options found in various locations in the menu for Undo, Redo, Delete (trash bin), Zoom 1:1, Zoom in, Zoom out, Font family, Font size, Font Style (Bold, Italic, Underline, alignment and color), connection line style, connection start node, connection end node symbols, line color, insert image, flood fill, fill shading and shape shadowing.
The Tools Bar is dynamically context sensitive dependent on users’ activities on various part on the screen or functionality being performed.
Figure 4: Tools Bar navigator
3.2.2.3 Drawing Surface
The main panel (to the right of the client window) is the Drawing Surface where the user can place or drop a shape or make connections. The Drawing Surface accepts items dropped from the Shapes pallet and the Library list. It also accepts user direct interactions as discussed later in the User Cases under the Appendix section. The Drawing Surface is the large blank area of the screen (see Figure 2).
3.2.2.4 Shapes Pallet
The Shapes Pallet contains various shapes, each representing a specific role corresponding to the BUGS scripting language. Each shape may have multiple distinct rules governing connecting and dependencies.

The following defines shapes and rules:
1. Module shape – a container shape for grouping of shapes. Containers do not accept incoming connector nor do they have outgoing connector. Module can be nested within another container but it does not generate any code.
2. Package shape – another scope/container shape, however if the <lower bound> and/or <upper bound> properties are provided, it generates loop code. Containers do not accept incoming connector nor do they have outgoing connector. Package may be nested within another Package or Modules.
3. Procedure shape – a function representation shape. It may have zero or more incoming connectors but those connector’s source can only be either Attribute or Parameter shapes. It may also have one outgoing connector, but the target must be a Result shape.
4. Attribute shape – a shape to declare a variable. Attribute may have zero or one incoming connector and the source must be either another attribute or Result shape. It may have zero or more outgoing connectors but the connecting target must be either another attribute shape, a parameter shape or a Procedure shape.
5. Parameter shape – a shape to represent attributes while passing information into functions. A parameter may have zero or exactly one incoming connector with the source from either an Attribute shape or a Result shape and it may have zero or one outgoing connector with the target must be a Procedure shape.
6. Result shape – it represent a returning value from a function call. As return value, it may have zero or one incoming connector with the source a Procedure shape. It may also have one or more outgoing connector with the targets either an Attribute shape or a Parameter shape.
7. Initiator shape – a start state indicator. Initiator may not have any incoming connector and may have one outgoing connector and the target must be a Procedure shape.
3.2.2.5 Attribute Properties
The Properties panel on the Side bar displays the selected shape’s type and associating properties with their current assigned default values.
3.2.2.6 Script Code
The Code panel (also on the Side Bar) displays the generated BUGS code block based on the current selected shape or the entire diagram if no shape is selected. The code for the entire diagram is what will be sent to the server for execution or to be saved.
Figure 6: Script Code panel
4. FUTURE WORK
The application at this stage is a prototype version to prove the concept of implementing a Bayesian analysis engine in a cloud-based shared environment. In order to make the application fully functional and in a usable quality, there are some features and capabilities, both in the server and the client, which have yet to be implemented.
4.0 Server Application
All of the following features for the server application are necessary to make the application functional in a cloud-based framework.
1. Design database schema for user account management (account, code, libraries, results, etc.)
2. Identify and implement necessary exported functions as interface to the client app.
3. Create a module to interface to OpenBUGS runtime for scripts execution and update monitor purposes.
4. Create a data access layer module to allow the server application to query and store data to the database.
4.1 Client Application
Some features listed here are required; others are optional to make the client app functional. Specifically features 1 through 6 are required, but the rests are features that would make the application more efficient or just easier to use.
1. Support the full features of BUGS language needed for risk assessment.
2. Support external “Data” inclusion in order to import data structures into the Bayesian analysis.
3. Add a menu item [Run] and create progress view to monitor updates from server while processing script.
4. Create authentication user interfaces and management.
5. Upload the script library to the server.
6. Save/load script/diagram to and from server respectively.
7. Allow editing custom scripts and verification (i.e., error checking) of these scripts.
8. Support multiple custom library modules (currently it supports a single library module).
10. Abstracting diagram layers. Viewing diagram at different level of complexity, i.e. zoom out shows only groups and container versus zoom in to show detail of each container group.
11. Allow to edit the selected shape’s attributes value directly in the Properties panel on the Side Bar.
12. Allow editing of the generated code or add custom code on the Code panel.
13. Ability to draw a diagram from an existing script.
5. CONCLUSIONS
Once this application is fully developed, it provides analysts the capability to focus on models and submit them to be executed in a shared high performance computing environment. It will also provide a diagramming approach to Bayesian model creation without the steep learning curve of the underlying scripting language. The infrastructure of the framework to support this application is designed not only to support BUGS scripting language, but is also applicable to support other scripting languages if needed. For example, there are other generalized open source statistical software package, such as R, that with this diagramming tool could be made to support it as well.
6. REFERENCES
There are many tasks that started with the user initiating an element on the application, such as menus navigator, Tools bar and Side bar. Some tasks may be a simple click, while others required the user to click + hold and move the mouse to certain location then release (drag-and-drop). All menu items are single click actions. The Shapes pallet supports either single click or drag-and-drop. The Library panel supports drag-and-drop only. Still other panels do not response to user inputs.
Saving and loading uses HTML5 File API capability and it is effecting files on the user’s local PC. In the case of script libraries, the local database is governed by the browser user’s profile setting. The effect on the locally stored libraries is always volatile. It means the local database is controlled by the browser’s history cache and if users were to switch to a different browser, log onto the PC as different user, or clear out browser history, the previous locally saved database, namely the libraries, will no longer be available. It is a built-in security feature of the browser itself. For this volatile effect, there are built-in functions to allow import and export the library stored in local database into a non-volatile location as a library file (*.libx) on the user’s local hard drive.
Also in order to save file with a custom name, the browsers “always prompt to download” feature must be turned on.
NOTE: saving a file on user’s local PC is saved onto the browser’s default download path configurable through the browser’s settings.
A.1.1 Save/Load
To save current diagram:
Click on menu [file] -> [Save] -> dialog open: Type name -> click [Save].
Press Ctrl + S -> if new diagram -> dialog open: Type name -> click [Save]
If existing diagram -> automatic update.
Open a saved diagram:
Click on menu [File] -> [Open...] -> file dialog open: locate a file [.xml] -> click [Open].
A.1.2 Save as library
Click on menu [File] -> [Save As Library...] -> dialog open: Type in name -> click [Save].
The new library appears in the Library panel list on the Side Bar. Note: the save to library will save the entire diagram as a library not just the selected shapes.
NOTE: The library is stored in the user’s local PC using a local client database called IndexedDB. As mentioned above, this client database is volatile to browser history management.
A.1.3 Import/Export library
To import a saved library from file: importing a library from file will replace currently loaded ones.
Expand the Side Bar/Library panel then right click on a library name like “OpenBUGS Functions” [Library Name -> Import Library -> dialog open: select a file [.libx] -> [Open].
To export the current libraries to a file:
Expand the Side Bar/Library panel then right click on a library name like “OpenBUGS Functions” [Library name -> Export Library to File -> dialog open: Type in name -> click [Save].
A.2 Create diagram
To create diagram with shapes:
Creating diagram is done through shapes pallet on the Side Bar (Figure 7). Clicking a shape will create the shape and placing it at the upper left corner on the Drawing Surface. Drag-N-drop a shape onto a specific location on the Drawing Surface to create the shape at that location or within another container shape. Once the shape is created (Figure 8), it will prompt user for a name. Click [OK] to accept and [Cancel] to abort the shape. Noted that a shape created or move inside a container such as a Module shape or a Package shape, it cannot be moved outside of the container. This function limitation will be changed in the future.
Figure 7: select a shape out on the Shapes pallet
Figure 8: Create shape prompting for a name
On clicking [OK] the application checks the following:
1. Check to ensure that the new name is properly named; it checks that a name cannot start with numeric or any symbol characters except underline (_).
2. Make sure there isn’t already another same name with same shape on the same scope as the one being created – duplication.
3. If 1 or 2 is violated, it prompts user for a new name and repeat check.
To connect shapes.
Click on a shape to select it. A yellow arrow (Figure 9) is shown to the right while the shape is selected. Some shapes do not allow connection and no edge-icon is shown. Click and hold on the edge-icon and move the mouse to another shape (Figure 10). Release the mouse and the connection is established (Figure 11), but before the connection is made permanent, it checks the following:
1. Check for any shapes involve the connection for rule violation.
2. If violation occurred, only the first violated rule message is shown and the connection is removed.
3. If no violation occurred, it makes the connection permanent.
Figure 9: Selecting a shape to show the connection icon
Figure 10: Connecting shapes together
Figure 11: completing the connection
Container shapes may be nested and can be contain as a module or group as loop block in a Package shape as show in Figure 12. These group and block control code generation.
Figure 12: Example of nested and code block script diagram
NOTE: as each shape is added or removed and a connection is established or removed, the Code Panel is updated to show the resulting script generated.
A.3 Force script generation
Clicking on a shape or a connection will show the script for that specific shape. Deselect by clicking on the Drawing Surface results in generating the script for the entire diagram.
A.4 Execution
At any point, if there is code generated successfully, clicking on the [Run] on the menu or Tools Bar will submit the script to the server. It also open a monitor viewer window to show the progress status.
|
{"Source-Url": "https://inldigitallibrary.inl.gov/sites/sti/sti/6269611.pdf", "len_cl100k_base": 6594, "olmocr-version": "0.1.50", "pdf-total-pages": 24, "total-fallback-pages": 0, "total-input-tokens": 39760, "total-output-tokens": 7465, "length": "2e12", "weborganizer": {"__label__adult": 0.0003161430358886719, "__label__art_design": 0.0006070137023925781, "__label__crime_law": 0.0004067420959472656, "__label__education_jobs": 0.0012302398681640625, "__label__entertainment": 0.00010883808135986328, "__label__fashion_beauty": 0.00016164779663085938, "__label__finance_business": 0.000507354736328125, "__label__food_dining": 0.0004563331604003906, "__label__games": 0.0005526542663574219, "__label__hardware": 0.0012226104736328125, "__label__health": 0.0006160736083984375, "__label__history": 0.00036525726318359375, "__label__home_hobbies": 0.00013387203216552734, "__label__industrial": 0.0011768341064453125, "__label__literature": 0.00027942657470703125, "__label__politics": 0.0002753734588623047, "__label__religion": 0.0004267692565917969, "__label__science_tech": 0.1868896484375, "__label__social_life": 0.00011920928955078124, "__label__software": 0.022491455078125, "__label__software_dev": 0.78076171875, "__label__sports_fitness": 0.0002658367156982422, "__label__transportation": 0.0005211830139160156, "__label__travel": 0.00019812583923339844}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 31368, 0.03704]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 31368, 0.38258]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 31368, 0.8762]], "google_gemma-3-12b-it_contains_pii": [[0, 211, false], [211, 1070, null], [1070, 1366, null], [1366, 2684, null], [2684, 2684, null], [2684, 4398, null], [4398, 5593, null], [5593, 7970, null], [7970, 10437, null], [10437, 12097, null], [12097, 12740, null], [12740, 14276, null], [14276, 15421, null], [15421, 17939, null], [17939, 19468, null], [19468, 21608, null], [21608, 22540, null], [22540, 24799, null], [24799, 25492, null], [25492, 25671, null], [25671, 28042, null], [28042, 29366, null], [29366, 30550, null], [30550, 31368, null]], "google_gemma-3-12b-it_is_public_document": [[0, 211, true], [211, 1070, null], [1070, 1366, null], [1366, 2684, null], [2684, 2684, null], [2684, 4398, null], [4398, 5593, null], [5593, 7970, null], [7970, 10437, null], [10437, 12097, null], [12097, 12740, null], [12740, 14276, null], [14276, 15421, null], [15421, 17939, null], [17939, 19468, null], [19468, 21608, null], [21608, 22540, null], [22540, 24799, null], [24799, 25492, null], [25492, 25671, null], [25671, 28042, null], [28042, 29366, null], [29366, 30550, null], [30550, 31368, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 31368, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 31368, null]], "pdf_page_numbers": [[0, 211, 1], [211, 1070, 2], [1070, 1366, 3], [1366, 2684, 4], [2684, 2684, 5], [2684, 4398, 6], [4398, 5593, 7], [5593, 7970, 8], [7970, 10437, 9], [10437, 12097, 10], [12097, 12740, 11], [12740, 14276, 12], [14276, 15421, 13], [15421, 17939, 14], [17939, 19468, 15], [19468, 21608, 16], [21608, 22540, 17], [22540, 24799, 18], [24799, 25492, 19], [25492, 25671, 20], [25671, 28042, 21], [28042, 29366, 22], [29366, 30550, 23], [30550, 31368, 24]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 31368, 0.11739]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
f50f1219bd7bc66ae272ef49a8023d023f5527ea
|
A Policy-Based Architecture for Container Migration in Software Defined Infrastructures
Original
Availability:
This version is available at: 11583/2752093 since: 2019-09-17T11:09:54Z
Publisher:
IEEE
Published
DOI:10.1109/NETSOFT.2019.8806659
Terms of use:
openAccess
This article is made available under terms and conditions as specified in the corresponding bibliographic description in the repository
Publisher copyright
IEEE postprint/Author's Accepted Manuscript
©2019 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collecting works, for resale or lists, or reuse of any copyrighted component of this work in other works.
(Article begins on next page)
Abstract—Software-Defined Networking (SDN) is a paradigm that enables easier network programmability based on separation between network control plane and data plane. Network Function Virtualization (NFV) is another recent technology that has enabled design, deploy, and management of softwareized networking services. The vast majority of SDN and NFV based architectures, whether they use Virtual machines (VMs) or Lightweight Virtual Machines (LVMs), are designed to program forwarding, probably the most fundamental among all network mechanisms.
In this paper instead we demonstrated that there are other (as important) networking mechanisms that need programmability. In particular, we designed, implemented and extensively tested an architecture that enables policy-programmability of (live) migration of LVMs. Migration is used for maintenance, load balancing, or as a security mechanism in what is called Moving Target Defence (a virtual host migrates to hide from an attacker).
Our architecture is based on Docker and it is implemented within a Software-Defined Infrastructure. Migration mechanism can be set easily by means of configuration file, to make a novel policy-based architecture. We evaluated the performance of our system in several scenarios, over a local Mininet-based testbed. We analyzed the tradeoff between several Load Balancing policies as well as several Moving Target Defense solutions inspired by network coding.
Index Terms—software defined networking, container migration, moving target defense.
I. INTRODUCTION
The recent surge in popularity of Cloud Computing and Internet of Things (IoT) has resulted in a number of IoT networks, widely deployed. As new technologies showing up, today’s network is much harder and more complex to manage and monitor. Thus, new network solutions come up. For instance, Software Defined Networking (SDN) is the latest network paradigm to solve the complexity of networking. It provides the benefits by detaching networking control layer and data layer, providing the possibility to use powerful central commands to meet the requirements of underlying demand data planes. Instead, Network Functions Virtualization (NFV) is a new method to design, deploy, and manage networking services. Virtual Machines (VMs) are widely used to implement NFV. Despite VMs, Lightweight Virtual Machines (LVM), such as Dockers, are a more efficient solution. The Docker technology allows the true independence between application, infrastructure, developers, and IT Ops. It enables creating a model for better collaboration and innovation.
This work was done in the Computer Science Department at Saint Louis University, USA.
Why a policy-based programmable migration mechanism is needed? Based on these new network solutions, migration is a new solution widely used in cloud network structure and data center. Migration is the movement of a virtual machine from one physical host to another, it happens without the awareness of end users. It can achieve networking maintainance, load balancing, network failure repair for providing an always available system. Apart from these, it can also be used as a security moving target defense strategy.
Nowadays migration solutions mostly focus on VMs [1], and Virtual Routers (VR) [2]. Besides, they are usually in ad-hoc environment, concerning a specific policy of the migration mechanisms; for instance, loading balancing [3] or energy optimization [4]. There is less concern on container migration. The container is known as the lightweight virtual machine. It does not virtualize only the hardware, but also the operating system. Comparing with the virtual machine, it is much lighter. If there is a high requirement with respect to the speed for migration, container migration could be a good solution.
Virtualization is a way that enables network programmability, and software defined networking is a good example. Above control plane, it is flexible and easy to develop applications such as routing, access control, etc. But it is only good for forwarding mechanism. In addition, network protocols are usually designed in the ad-hoc fashion. Different versions of TCP or routing exist, some of them are suitable for bandwidth sensitive applications, some are for delay sensitive applications, some aim to achieve security, some aim to provide better performance. There is no one-size-fits-all, a policy-based programmable migration mechanism is needed.
Our contribution. We designed a policy-programmable container migration architecture based on Docker. The policy-based architecture allows us to change policies with a simple configuration file, so programming the migration mechanism is easy. Second, we test security and load balancing policies within our SDN-based prototype over Mininet. Third, we designed and evaluated novel Moving Target Defense (MTD) solutions inspired by network coding.
The policy-based migration system can do software defined measurement based on the network traffic statistics obtained through SDN controller. We developed our algorithms to make migration decision and applied it on two use cases. The first is Load Balancing that we feature with 3 policies: bandwidth-based, shortest path, random. The second is Moving Target
Defense, where novel solutions are inspired by network coding, that we feature also with three policies: Shamir, Digital Fountain, and Pseudo Random function.
The paper is organized as follows. In section II, we discuss related migration solutions. Section III describes our migration system architecture. In section IV we present two use cases: load balancing, moving target defense. Section V illustrates the experimental validation results we obtained. In the end, the work is concluded in Section VI.
II. RELATED WORK
Several network migration solutions exist nowadays, and a considerable work has been done concerning live VM migration [5]–[7]. In addition, there are a set of papers in which the authors compare and analyze the possible factors that could affect the migration performance. In [8]–[10], the authors examined the major issues of virtual machine live migration with some metrics, e.g., downtime, total migration time, also classifying the techniques and comparing the different solutions. However, containers (lightweight virtual machine (LVMs)) are showing up as recent virtualization technique, they don’t virtualize only the hardware infrastructure but also the operating system. Recently, new attempts to use containers instead of VMs have been proposed [11]. They focus on reducing migration time, with no concern about the network traffic situation. In our work, we concentrate on container migration, because compared to VM it is lighter and the migration can be faster than migrating a virtual machine. Our policy-based system performs migration adapting different application needs by just changing a configuration file. This is the first attempt, to the best of our knowledge, to build a architecture to enable programmable migration mechanism.
Moving Target Defense (MTD) is a new security paradigm. Instead of defending unchanging infrastructure by detecting, preventing, monitoring, tracking and remedying threats, moving target defense makes the attack surface dynamic. Many attempts have been proposed to achieve security through MTD. For instance, U-TRI adopts a randomly changing identifier to replace the original static data link layer address [12]. They defend traffic privacy by obfuscating the identifiers in network and transport layer. A different approach is used in WebMTD, that randomizes certain attributes of web elements to differentiate the application code from injected code and disallow its execution [13]. Besides, a more general solution is Mutated Policies [14]. It is an attribute-based defense strategy for access control that carefully selects the attributes that uniquely identify the entities involved. Then it randomly mutates the original access policies over time by adding additional policy rules constructed from the newly-identified attributes.
In our migration system, we move the container from one host to another one, to guarantee that the hosted machine IP address keeps changing. Then, we improve different algorithms existing with information of the network, integrating polynomial concept with a novel algorithm such as digital fountain mechanism.
Fig. 1: System Architecture and Components, the green blocks are our contribution.
III. ARCHITECTURE DESIGN
In this section, we focus on the system architecture design and the function of each component. We built a programmable policy-based migration system, to provide flexibility for adapting different application needs by just changing one parameter in the configuration file. Besides, the system is designed to collect network traffic statistics leveraging an SDN controller, which applies software-defined measurements on the basis of these statistics. Thus, a more accurate migration decision is made by adding information about the network. As a consequence, a container can be migrated from a source host to a destination host within a cloud-edge network with a programmable policy-based mechanism. A well-designed migration system should be able to answer three questions: (i) which container should be migrated, (ii) when migration should happen, (iii) where to migrate. Following those questions, we designed our system architecture.
A. System Architecture Overview
Figure 1 shows the general architecture and key components of the system. There are 4 main component blocks designed for migration system: Database & Virtual Information Base (VIB), Software defined measurement, Migration Manager, and Migration Daemon.
(1) Database & VIB is designed to store the network traffic statistics. (2) Software Defined Measurement collects network traffic statistics through an SDN controller (Floodlight) and stores the data in an SQL-based database. We use two data collectors, one for bandwidth and one for packet aggregate. The Bandwidth collector is used to measure the bandwidth consumption per switch port. On the other hand, the Aggregate collector is used to get the number of packets per switch. Both measurements are collected at a customizable standard frequency. We use these measurements as input of our controller to detect traffic or switch overloads and start a migration process. We also use the information collected in the database to decide what is the destination for the migrating LVM, according to a programmable policy. Software defined
measurement system enables the simplicity and flexibility in collecting network traffic statistics. (3) Migration Manager monitors the process and makes migration decisions. In a configuration file, we specify a set of threshold parameters and the policy name. In our prototype we implemented two sets of policies for two use cases: Load Balancing and Moving Target Defense. Users can, however, easily implement their own policies. This component includes a Migration Brain, which executes the policy specified in the configuration file. (4) Migration Daemon is the process running on hosts, and handles the migration process. We use the Docker API to create, start, stop, take the memory and storage snapshot of the current container status. A schema of our prototype implementation is shown in 2.
B. Migration Model and Protocol
Migration manager makes the migration decision and communicates the destination host to the source host. When the source receives the command and the migration destination IP address, it starts the migration process. We defined a Migration Protocol used to execute such migration. First, Migration Manager makes migration decision and communicates it to the Source Host with a “MIGRATE” command. At this point, Migration Source Host takes the snapshots and stores the image files of the current running container (docker checkpoint). After that, it transfers the container image files to the Destination Host. During this communication, the source host does not stop providing the service. The communication between Source and Destination Host starts with a “RESTART” command sent by the source. This message is followed by the information about container image files. Once Destination has received all the required details, it restart the container. After the service starting, Destination sends “SUCCESS” command to the migration source host. Then, the TCP connection will be closed between all the parties involved. Also, source host stops the container providing the service. In the end, the routing is redirected to the migration destination host.
Our programmable migration framework enables to chose the destination host according to different criteria. In such a way an administrator is able to choose different policies for different use cases.
IV. Migration Policy Tradeoff and Use Cases
In this section, we explain our migration system on two use cases: Load Balancing and Moving Target Defense. The policies used in each use case will be listed and compared.
A. Use Case 1: Load Balancing
This application allows migration by monitoring the network traffic. The destination host is selected according to different criteria, and we focused on three policies to select the destination:
- **Random**: destination host is selected at random.
- **Bandwidth-based**: destination host is the host with the maximum available outgoing bandwidth. We define this value as the minimum link capacity of the links in the path.
- **Shortest Path**: leveraging Floodlight controller we are able to get the network topology and compute the shortest path for each couple of nodes.
B. Use Case 2: Moving Target Defense
Moving Target Defense is a paradigm whose idea is to make the attack surface more dynamic. During the setup phase, (private) key(x,y) and a lookup table are distributed to each host. The lookup table is encrypted with a master secret for protecting the migration destination host. This table is an hash table associating to each index the destination host IP address. At the migration stage, our system provides to the source host a random number R to combine with the key as the input of a hash function:
\[
hash(x) = Hash(R + X * Y) \% (N + 1),
\]
where \( N \) is the number of hosts, \( R \) is the random number, \( (X,Y) \) is a key represented as a point and \( \% \) is the modulo function. The value obtained from the hash function is the index of the lookup table. Here, three policies are used to share a secret:
- **Shamir**: This policy is inspired by Shamir’s method [15]: a secret is divided into K parts, and each participant has its own unique part. To get the secret key, a host needs to authenticate with some or all other hosts. The migration source host has to ask K disjoint hosts for K different keys to reconstruct the key and decrypt the lookup table. K is specified in the configuration file.
- **Digital Fountain**: The migration source host needs to ask to K hosts for K keys, not necessarily disjoint. In our implementation we pick these K hosts probabilistically, using the following formula:
\[
P(i, k) = \frac{1}{\sum_{j=1, j \neq i}^{n} \frac{1}{\text{latency}(i,j)}},
\]
Fig. 3: Network topology with heterogeneous link capacity
where \( i \) is the source host, \( k \) is the random host, and \( P(i, k) \) is the probability that host \( k \) is selected for asking the key. The host which has a smaller latency has a higher probability of being selected. This means that closer hosts may be contacted multiple times for the key.
**Random**: The destination host is selected by using a pseudo random function. We use this policy as a benchmark.
At the beginning, Migration Manager distributes different encrypted lookup table with the information required for the algorithms to each host. The manager generates also a set of key(x,y) for each host. Hence each host has a part of the information to decrypt the lookup table. Then, Migration manager sends a random number to the source host, it applies hash(x), and the result is the migration destination host index i. According to the policy specified in the configuration file, different strategy are used for decrypting the table. In case of Digital Fountain the same host can be contacted many times, since the one which has the shorter path will have the higher probability to be chosen. On the other hand, in Shamir the host asks to k disjoint hosts the key pair in order to decrypt the lookup table. After getting the k keys, the migration source host applies the algorithm (Digital Fountain or Shamir) to get the master secret \( S \). Hence, the source host decrypt the lookup table using \( S \), get the migration destination host IP, and start the migration process.
V. EXPERIMENTAL VALIDATION
In this section, we test our system in a Mininet testbed, evaluating the two use cases and all the policies. The results are obtained using a Ubuntu Intel i7-6500U @ 2.50GHZ, 8.00 GB RAM, 64-bits.
A. Use Case1: 3 policies evaluation for Load Balancing
**Scenario 1: Link capacity is heterogeneous**. The topology we used as testbed is shown in Figure 3, where the link capacity varies among the links. H1 executes a docker container running iperf client, while H2 will be the source host and executes a docker container running the iperf server. The migration decision is different according to the chosen policy.
1) **Bandwidth-based policy**: H4 is selected as the destination host with a minimum bandwidth on its path of 10 Mbps.
2) **Shortest path policy**: H3 is selected as the destination host because of just 2 switches in between.
**TABLE I**: Migration time of 3 policies in Load Balancing task for Scenario 1 on Mininet.
<table>
<thead>
<tr>
<th></th>
<th>Bandwidth (s)</th>
<th>Shortest Path (s)</th>
<th>Random (s)</th>
</tr>
</thead>
<tbody>
<tr>
<td>Heterogeneous</td>
<td>9.1 ± 0.15</td>
<td>40.1 ± 0.15</td>
<td>23.6 ± 7.12</td>
</tr>
</tbody>
</table>
3) **Random Policy**: the destination host is randomly selected among the free hosts set: (H3, H4, H5).
Figure 4 shows the bandwidth consumption during the migration process. **Bandwidth consumption** value is the sum of sent and received bandwidth for the migration source and destination host. In the first period (up to 125s) the migration procession is not started yet, so on the source host (red line) the traffic is related to the docker container running the iperf server. After 125s the traffic on the switch is detected as too high and the migration process starts. During this period, the source host generates not only traffic data for the iperf client, but also the traffic data for the container migration. As a consequence, the destination host (blue line) starts to receive the migration files, so bandwidth consumption starts increasing. Then after migration process is done (150s), the source host (red line), does not run the iperf server anymore, so there is no more traffic. On the other hand, the destination host (blue line) starts to run the iperf server after migration.
In order to evaluate the time necessary for the migration process, we run 20 times the procedure for 3 the policies as shown in Table I. The time is the sum of time to make the decision and to make the migration. Table I shows that bandwidth policy provides the best trade-off between network load balancing and the migration time. Bandwidth policy takes the advantage of the bigger link bandwidth, so the migration time is much smaller than shortest path policy, random policy. The confidence interval for bandwidth and shortest path is very small. This happens because the migration decision made for both use cases is determinate, H4 for bandwidth policy, H3 for shortest path. On
TABLE II: Migration time of 3 policies in Load Balancing task for Scenario 2 on Mininet.
<table>
<thead>
<tr>
<th>Homogeneous</th>
<th>Bandwidth (s)</th>
<th>Shortest Path (s)</th>
<th>Random (s)</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>18.6 ± 0.91</td>
<td>17.1 ± 0.16</td>
<td>18.7 ± 0.91</td>
</tr>
</tbody>
</table>
TABLE III: Migration time of 3 policies for Moving Target Defense.
<table>
<thead>
<tr>
<th>Homogeneous</th>
<th>Digital Fountain (s)</th>
<th>Shamir (s)</th>
<th>Random (s)</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>36.6 ± 5.20</td>
<td>40.1 ± 6.60</td>
<td>27.2 ± 3.70</td>
</tr>
</tbody>
</table>
the other hand, for Random, the migration destination is not determinate, so each run, it may choose different destination.
**Scenario 2: Link capacity is homogeneous.** In addition to topology with heterogeneous capacity, we tested the same topology where for all the link the capacity is homogeneous, and set to 5Mbps. In this context the decisions of three policies are as follows:
1. **Bandwidth-based policy:** The destination host is randomly selected among the free host set: (H3, H4, H5).
2. **Shortest path policy:** H3 is selected as the destination host because of just 2 switches in between.
3. **Random Policy** the destination host is randomly selected among the free host set: (H3, H4, H5).
In this case, the bandwidth policy has multiple choices, so the migration destination may vary every run. Table II highlights how in this case, shortest path performs better than bandwidth and random.
**B. Use Case 2: Three policies evaluation for Moving Target Defense**
In addition to the Load Balancing use case, we evaluated the cost of the system security by the application of Moving Target Defense. We tested the migration time for the three policies aforementioned. Looking at Table III, it is possible to observe how Random policy is the fastest one while for Shamir the migration time is the highest. This happens because in Shamir policy source host asks to k disjoint hosts for k different keys, hence far hosts can be selected. In Digital Fountain policy the source host asks to k non-disjoint hosts for k keys. It is likely to ask the host with small latency more times, leading to a smaller migration time.
In essence, random policy is the fastest one, but it does not apply any secure mechanisms, while Digital Fountain provides the better speed-security trade-off.
**VI. CONCLUSION AND FUTURE PLAN**
In this paper we presented a policy-programmable container migration architecture based on Docker within an SDN prototype. It allows to change strategy and algorithm with a simple configuration file. Moreover, we tested two uses i.e., Load Balancing and Moving Target Defense, and we applied three different policies for each use case. Based on the results obtained we found that in different scenarios different algorithms provide the best performance. Hence, our policy-programmable LVM migration system guarantees the appropriate flexibility, as such it can adapt to different application needs by just modifying the configuration.
As a plan for the future, we want to improve the system in several aspects. For the software defined measurement, we could integrate the SDN controller with big data and machine learning algorithms. In this case, the migration destination host can be predicted. By doing this we can improve the network management service. In addition, we could scale further the testbed and explore the policies trade-off in different topologies, such as tree, linear, star, fully connected.
**ACKNOWLEDGMENTS**
This work has been partially supported by NSF CNS-1647084 and CNS-1836906.
**REFERENCES**
|
{"Source-Url": "https://iris.polito.it/retrieve/handle/11583/2752093/271658/Final_public.pdf", "len_cl100k_base": 5037, "olmocr-version": "0.1.50", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 18256, "total-output-tokens": 6475, "length": "2e12", "weborganizer": {"__label__adult": 0.0004074573516845703, "__label__art_design": 0.0004074573516845703, "__label__crime_law": 0.0006537437438964844, "__label__education_jobs": 0.0005283355712890625, "__label__entertainment": 0.00015747547149658203, "__label__fashion_beauty": 0.00017905235290527344, "__label__finance_business": 0.0004813671112060547, "__label__food_dining": 0.0004062652587890625, "__label__games": 0.0008912086486816406, "__label__hardware": 0.00426483154296875, "__label__health": 0.0008420944213867188, "__label__history": 0.0003635883331298828, "__label__home_hobbies": 0.00014460086822509766, "__label__industrial": 0.0007295608520507812, "__label__literature": 0.00027942657470703125, "__label__politics": 0.0003485679626464844, "__label__religion": 0.000438690185546875, "__label__science_tech": 0.405517578125, "__label__social_life": 0.00012409687042236328, "__label__software": 0.037933349609375, "__label__software_dev": 0.54345703125, "__label__sports_fitness": 0.0003216266632080078, "__label__transportation": 0.00060272216796875, "__label__travel": 0.00023376941680908203}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 27461, 0.03152]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 27461, 0.15556]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 27461, 0.88896]], "google_gemma-3-12b-it_contains_pii": [[0, 1205, false], [1205, 6474, null], [6474, 11776, null], [11776, 16439, null], [16439, 20962, null], [20962, 27461, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1205, true], [1205, 6474, null], [6474, 11776, null], [11776, 16439, null], [16439, 20962, null], [20962, 27461, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 27461, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 27461, null]], "pdf_page_numbers": [[0, 1205, 1], [1205, 6474, 2], [6474, 11776, 3], [11776, 16439, 4], [16439, 20962, 5], [20962, 27461, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 27461, 0.07692]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
ba40cf324a9ad8e11220d8268190efc73589be14
|
[REMOVED]
|
{"Source-Url": "http://www.csd.uoc.gr/~hy565/docs/pdfs/papers/Automate%20Procurement%20of%20WSs.pdf", "len_cl100k_base": 6521, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 34937, "total-output-tokens": 8871, "length": "2e12", "weborganizer": {"__label__adult": 0.0002923011779785156, "__label__art_design": 0.00036406517028808594, "__label__crime_law": 0.0004024505615234375, "__label__education_jobs": 0.00058746337890625, "__label__entertainment": 7.128715515136719e-05, "__label__fashion_beauty": 0.0001430511474609375, "__label__finance_business": 0.0010175704956054688, "__label__food_dining": 0.00031304359436035156, "__label__games": 0.0004193782806396485, "__label__hardware": 0.0005407333374023438, "__label__health": 0.0004496574401855469, "__label__history": 0.00019359588623046875, "__label__home_hobbies": 6.03795051574707e-05, "__label__industrial": 0.0003848075866699219, "__label__literature": 0.00026726722717285156, "__label__politics": 0.0003383159637451172, "__label__religion": 0.00029921531677246094, "__label__science_tech": 0.0291595458984375, "__label__social_life": 7.37309455871582e-05, "__label__software": 0.013092041015625, "__label__software_dev": 0.95068359375, "__label__sports_fitness": 0.00018644332885742188, "__label__transportation": 0.0004177093505859375, "__label__travel": 0.00017201900482177734}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 34984, 0.02996]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 34984, 0.31664]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 34984, 0.88603]], "google_gemma-3-12b-it_contains_pii": [[0, 2535, false], [2535, 5044, null], [5044, 7353, null], [7353, 10391, null], [10391, 13843, null], [13843, 17263, null], [17263, 19771, null], [19771, 22999, null], [22999, 24512, null], [24512, 25879, null], [25879, 27918, null], [27918, 31422, null], [31422, 34984, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2535, true], [2535, 5044, null], [5044, 7353, null], [7353, 10391, null], [10391, 13843, null], [13843, 17263, null], [17263, 19771, null], [19771, 22999, null], [22999, 24512, null], [24512, 25879, null], [25879, 27918, null], [27918, 31422, null], [31422, 34984, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 34984, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 34984, null]], "pdf_page_numbers": [[0, 2535, 1], [2535, 5044, 2], [5044, 7353, 3], [7353, 10391, 4], [10391, 13843, 5], [13843, 17263, 6], [17263, 19771, 7], [19771, 22999, 8], [22999, 24512, 9], [24512, 25879, 10], [25879, 27918, 11], [27918, 31422, 12], [31422, 34984, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 34984, 0.04225]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
d44248e89fec1c8459610ab233507db8cd853b19
|
10.1. QuickSort and Treaps with High Probability
You must be asking yourself what are treaps. For the answer, see Section 10.3, p7.
One can think about QuickSort as playing a game in rounds. Every round, QuickSort picks a pivot, splits the problem into two subproblems, and continue playing the game recursively on both subproblems.
If we track a single element in the input, we see a sequence of rounds that involve this element. The game ends, when this element find itself alone in the round (i.e., the subproblem is to sort a single element).
Thus, to show that QuickSort takes $O(n \log n)$ time, it is enough to show, that every element in the input, participates in at most $32 \ln n$ rounds with high enough probability.
Indeed, let $X_i$ be the event that the $i$th element participates in more than $32 \ln n$ rounds.
Let $C_{QS}$ be the number of comparisons performed by QuickSort. A comparison between a pivot and an element will be always charged to the element. And as such, the number of comparisons overall performed by QuickSort is bounded by $\sum_i r_i$, where $r_i$ is the number of rounds the $i$th element participated in (the last round where it was a pivot is ignored). We have that
$$\alpha = \mathbb{P}[C_{QS} \geq 32n \ln n] \leq \mathbb{P}\left[\bigcup_i X_i\right] \leq \sum_{i=1}^{n} \mathbb{P}[X_i].$$
Here, we used the union bound, that states that for any two events $A$ and $B$, we have that $\mathbb{P}[A \cup B] \leq \mathbb{P}[A] + \mathbb{P}[B]$. Assume, for the time being, that $\mathbb{P}[X_i] \leq 1/n^3$. This implies that
$$\alpha \leq \sum_{i=1}^{n} \mathbb{P}[X_i] \leq \sum_{i=1}^{n} \frac{1}{n^3} = \frac{1}{n^2}.$$
Namely, QuickSort performs at most $32n \ln n$ comparisons with high probability. It follows, that QuickSort runs in $O(n \log n)$ time, with high probability, since the running time of QuickSort is proportional to the number of comparisons it performs.
To this end, we need to prove that $\mathbb{P}[X_i] \leq 1/n^3$.
10.1.1. Proving that an element participates in small number of rounds
Consider a run of QuickSort for an input made out of $n$ numbers. Consider a specific element $x$ in this input, and let $S_1, S_2, \ldots$ be the subsets of the input that are in the recursive calls that include the
---
1 This work is licensed under the Creative Commons Attribution-Noncommercial 3.0 License. To view a copy of this license, visit http://creativecommons.org/licenses/by-nc/3.0/ or send a letter to Creative Commons, 171 Second Street, Suite 300, San Francisco, California, 94105, USA.
2 Also known as Boole’s inequality.
element $x$. Here $S_j$ is the set of numbers in the $j$th round (i.e., this is the recursive call at depth $j$ which includes $x$ among the numbers it needs to sort).
The element $x$ would be considered to be **lucky**, in the $j$th iteration, if the call to the QuickSort, splits the current set $S_j$ into two parts, where both parts contains at most $(3/4)|S_j|$ of the elements.
Let $Y_j$ be an indicator variable which is 1 if and only if $x$ is lucky in $j$th round. Formally, $Y_j = 1$ if and only if $|S_j|/4 \leq |S_{j+1}| \leq 3|S_j|/4$. By definition, we have that
$$\mathbb{P}[Y_j] = \frac{1}{2}.$$
Furthermore, $Y_1, Y_2, \ldots, Y_m$ are all independent variables.
Note, that $x$ can participate in at most
$$\rho = \log_{4/3} n \leq 3.5 \ln n \quad (10.1)$$
rounds, since at each successful round, the number of elements in the subproblem shrinks by at least a factor $3/4$, and $|S_1| = n$. As such, if there are $\rho$ successful rounds in the first $k$ rounds, then $|S_k| \leq (3/4)^\rho n \leq 1$.
Thus, the question of how many rounds $x$ participates in, boils down to how many coin flips one need to perform till one gets $\rho$ heads. Of course, in expectation, we need to do this $2\rho$ times. But what if we want a bound that holds with high probability, how many rounds are needed then?
In the following, we require the following lemma, which we will prove in Section 10.2.
**Lemma 10.1.1.** In a sequence of $M$ coin flips, the probability that the number of ones is smaller than $L \leq M/4$ is at most $\exp(-M/8)$.
To use Lemma 10.1.1, we set
$$M = 32 \ln n \geq 8\rho,$$
see Eq. (10.1). Let $Y_j$ be the variable which is one if $x$ is lucky in the $j$th level of recursion, and zero otherwise. We have that $\mathbb{P}[Y_j = 0] = \mathbb{P}[Y_j = 1] = 1/2$ and that $Y_1, Y_2, \ldots, Y_M$ are independent. By Lemma 10.1.1, we have that the probability that there are only $\rho \leq M/4$ ones in $Y_1, \ldots, Y_M$, is smaller than
$$\exp\left(\frac{-M}{8}\right) \leq \exp(-\rho) \leq \frac{1}{n^3}.$$
We have that the probability that $x$ participates in $M$ recursive calls of QuickSort to be at most $1/n^3$.
There are $n$ input elements. Thus, the probability that depth of the recursion in QuickSort exceeds $32 \ln n$ is smaller than $(1/n^3) * n = 1/n^2$. We thus established the following result.
**Theorem 10.1.2.** With high probability (i.e., $1 - 1/n^2$) the depth of the recursion of QuickSort is $\leq 32 \ln n$. Thus, with high probability, the running time of QuickSort is $O(n \log n)$.
More generally, for any constant $c$, there exist a constant $d$, such that the probability that QuickSort recursion depth for any element exceeds $d \ln n$ is smaller than $1/n^c$.
Specifically, for any $t \geq 1$, we have that probability that the recursion depth for any element exceeds $t \cdot d \ln n$ is smaller than $1/n^{tc}$.
2
**Proof:** Let us do the last part (but the reader is encouraged to skip this on first reading). Setting $M = 32t \ln n$, we get that the probability that an element has depth exceeds $M$, requires that in $M$ coin flips we get at most $h = 4 \ln n$ heads. That is, if $Y$ is the sum of the coin flips, where we get +1 for head, and −1 for tails, then $Y$ needs to be smaller than $-(M - h) + h = -M + 2h$. By symmetry, this is equal to the probability that $Y \geq \Delta = M - 2h$. By Theorem 10.2.3 below, the probability for that is
$$
P[Y \geq \Delta] \leq \exp\left(-\frac{\Delta^2}{2M}\right) = \exp\left(-\frac{(M - 2h)^2}{2M}\right) = \exp\left(-\frac{(32t - 8)\ln^2 n}{128t \ln n}\right)$$
$$= \exp\left(-\frac{(4t - 1)^2\ln n}{2t}\right) \leq \exp\left(-\frac{3t^2 \ln n}{t}\right) \leq \frac{1}{n^{3t}}. $$
Of course, the same result holds for the algorithm MatchNutsAndBolts for matching nuts and bolts.
### 10.1.2. An alternative proof of the high probability of QuickSort
Consider a set $T$ of the $n$ items to be sorted, and consider a specific element $t \in T$. Let $X_i$ be the size of the input in the $i$th level of recursion that contains $t$. We know that $X_0 = n$, and
$$E[X_i | X_{i-1}] \leq \frac{13}{24}X_{i-1} + \frac{1}{2}X_{i-1} \leq \frac{7}{8}X_{i-1}. $$
Indeed, with probability 1/2 the pivot is the middle of the subproblem; that is, its rank is between $X_{i-1}/4$ and $(3/4)X_{i-1}$ (and then the subproblem has size $\leq X_{i-1}(3/4)$), and with probability 1/2 the subproblem might has not shrank significantly (i.e., we pretend it did not shrink at all).
Now, observe that for any two random variables we have that $E[X] = E_y[E[X | Y = y]]$, see Lemma 10.5.1p9. As such, we have that
$$E[X_i] = \mathbb{E}_{y} [E[X_i | X_{i-1} = y]] \leq \mathbb{E}_{X_{i-1}=y} \left[\frac{7}{8}y\right] = \frac{7}{8} E[X_{i-1}] \leq \left(\frac{7}{8}\right)^i E[X_0] = \left(\frac{7}{8}\right)^i n.$$
In particular, consider $M = 8 \log_{8/7} n$. We have that
$$\mu = E[X_M] \leq \left(\frac{7}{8}\right)^{M} n \leq \frac{1}{n^8 n} = \frac{1}{n^r}.$$
Of course, $t$ participates in more than $M$ recursive calls, if and only if $X_M \geq 1$. However, by Markov’s inequality (Theorem 10.2.1), we have that
$$P\left[\text{element } t \text{ participates in more than } M \text{ recursive calls}\right] \leq P[X_M \geq 1] \leq \frac{E[X_M]}{1} \leq \frac{1}{n^r},$$
as desired. That is, we proved that the probability that any element of the input $T$ participates in more than $M$ recursive calls is at most $n(1/n^7) \leq 1/n^6$.
### 10.2. Chernoff inequality
#### 10.2.1. Preliminaries
**Theorem 10.2.1 (Markov’s Inequality).** For a non-negative variable $X$, and $t > 0$, we have:
$$P[X \geq t] \leq \frac{E[X]}{t}.$$
Proof: Assume that this is false, and there exists \( t_0 > 0 \) such that \( \mathbb{P}[X \geq t_0] > \frac{\mathbb{E}[X]}{t_0} \). However,
\[
\mathbb{E}[X] = \sum_{x} x \cdot \mathbb{P}[X = x] = \sum_{x < t_0} x \cdot \mathbb{P}[X = x] + \sum_{x \geq t_0} x \cdot \mathbb{P}[X = x] \\
\geq 0 + t_0 \cdot \mathbb{P}[X \geq t_0] > 0 + t_0 \cdot \frac{\mathbb{E}[X]}{t_0} = \mathbb{E}[X],
\]
a contradiction. \( \blacksquare \)
We remind the reader that two random variables \( X \) and \( Y \) are **independent** if for all \( x, y \) we have that
\[
\mathbb{P}[(X = x) \cap (Y = y)] = \mathbb{P}[X = x] \cdot \mathbb{P}[Y = y].
\]
The following claim is easy to verify, and we omit the easy proof.
**Claim 10.2.2.** If \( X \) and \( Y \) are independent, then \( \mathbb{E}[XY] = \mathbb{E}[X] \mathbb{E}[Y] \).
If \( X \) and \( Y \) are independent then \( Z = e^X \) and \( W = e^Y \) are also independent variables.
**10.2.2. Chernoff inequality**
**Theorem 10.2.3 (Chernoff inequality).** Let \( X_1, \ldots, X_n \) be \( n \) independent random variables, such that \( \mathbb{P}[X_i = 1] = \mathbb{P}[X_i = -1] = \frac{1}{2} \), for \( i = 1, \ldots, n \). Let \( Y = \sum_{i=1}^{n} X_i \). Then, for any \( \Delta > 0 \), we have
\[
\mathbb{P}[Y \geq \Delta] \leq \exp(-\Delta^2/2n).
\]
**Proof:** Clearly, for an arbitrary \( t \), to be specified shortly, we have
\[
\mathbb{P}[Y \geq \Delta] = \mathbb{P}[tY \geq t\Delta] = \mathbb{P}[\exp(tY) \geq \exp(t\Delta)] \leq \frac{\mathbb{E}[\exp(tY)]}{\exp(t\Delta)}, \tag{10.2}
\]
where the first part follows since \( \exp(\cdot) \) preserve ordering, and the second part follows by Markov’s inequality (Theorem 10.2.1).
Observe that, by the definition of \( \mathbb{E}[\cdot] \) and by the Taylor expansion of \( \exp(\cdot) \), we have
\[
\mathbb{E}[\exp(tX_i)] = \frac{1}{2} e^t + \frac{1}{2} e^{-t} = \frac{e^t + e^{-t}}{2} \\
= \frac{1}{2} \left( 1 + \frac{t}{1!} + \frac{t^2}{2!} + \frac{t^3}{3!} + \cdots \right) \\
+ \frac{1}{2} \left( 1 - \frac{t}{1!} + \frac{t^2}{2!} - \frac{t^3}{3!} + \cdots \right) \\
= \left( 1 + \frac{t^2}{2!} + \cdots + \frac{t^{2k}}{(2k)!} + \cdots \right).
\]
Now, \((2k)! = k!(k + 1)(k + 2) \cdots 2k \geq k!2^k\), and thus
\[
\mathbb{E}[\exp(tX_i)] = \sum_{i=0}^{\infty} \frac{t^{2i}}{(2i)!} \leq \sum_{i=0}^{\infty} \frac{t^{2i}}{2^i(2i)!} = \sum_{i=0}^{\infty} \frac{1}{i!} \left( \frac{t^2}{2} \right)^i = \exp\left( \frac{t^2}{2} \right),
\]
4
again, by the Taylor expansion of \( \exp(\cdot) \). Next, by the independence of the \( X_i \)s, we have
\[
\mathbb{E}[\exp(tY)] = \mathbb{E}\left[ \exp\left( \sum_{i} tX_i \right) \right] = \mathbb{E}\left[ \prod_{i} \exp(tX_i) \right] = \prod_{i=1}^{n} \mathbb{E}[\exp(tX_i)] \\
\leq \prod_{i=1}^{n} \exp\left( \frac{t^2}{2} \right) = \exp\left( \frac{nt^2}{2} \right).
\]
We have, by Eq. (10.2), that
\[
\mathbb{P}[Y \geq \Delta] \leq \mathbb{E}[\exp(tY)] \leq \frac{\exp\left( \frac{nt^2}{2} \right)}{\exp(t\Delta)} = \exp\left( \frac{nt^2}{2} - t\Delta \right).
\]
Next, we select the value of \( t \) that minimizes the right term in the above inequality. Easy calculation shows that the right value is \( t = \Delta/n \). We conclude that
\[
\mathbb{P}[Y \geq \Delta] \leq \exp\left( \frac{n}{2} \frac{\Delta^2}{n} - \frac{\Delta}{n} \right) = \exp\left( -\frac{\Delta^2}{2n} \right).
\]
Note, the above theorem states that
\[
\mathbb{P}[Y \geq \Delta] = \sum_{i=\Delta}^{n} \mathbb{P}[Y = i] = \sum_{i=n/2+\Delta/2}^{n} \left( \frac{n}{2^i} \right) \leq \exp\left( -\frac{\Delta^2}{2n} \right),
\]
since \( Y = \Delta \) means that we got \( n/2 + \Delta/2 \) times +1s and \( n/2 - \Delta/2 \) times (-1)s.
By the symmetry of \( Y \), we get the following corollary.
**Corollary 10.2.4.** Let \( X_1, \ldots, X_n \) be \( n \) independent random variables, such that \( \mathbb{P}[X_i = 1] = \mathbb{P}[X_i = -1] = \frac{1}{2} \), for \( i = 1, \ldots, n \). Let \( Y = \sum_{i=1}^{n} X_i \). Then, for any \( \Delta > 0 \), we have
\[
\mathbb{P}[|Y| \geq \Delta] \leq 2 \exp\left( -\frac{\Delta^2}{2n} \right).
\]
By easy manipulation, we get the following result.
**Corollary 10.2.5.** Let \( X_1, \ldots, X_n \) be \( n \) independent coin flips, such that \( \mathbb{P}[X_i = 1] = \mathbb{P}[X_i = 0] = \frac{1}{2} \), for \( i = 1, \ldots, n \). Let \( Y = \sum_{i=1}^{n} X_i \). Then, for any \( \Delta > 0 \), we have
\[
\mathbb{P}\left[ \frac{n}{2} - Y \geq \Delta \right] \leq \exp\left( -\frac{2\Delta^2}{n} \right) \quad \text{and} \quad \mathbb{P}\left[ \frac{n}{2} - Y \geq \Delta \right] \leq \exp\left( -\frac{2\Delta^2}{n} \right).
\]
In particular, we have
\[
\mathbb{P}\left[ \left| Y - \frac{n}{2} \right| \geq \Delta \right] \leq 2 \exp\left( -\frac{2\Delta^2}{n} \right).
\]
**Proof:** Transform \( X_i \) into the random variable \( Z_i = 2X_i - 1 \), and now use Theorem 10.2.3 on the new random variables \( Z_1, \ldots, Z_n \).
Lemma 10.1.1 (Restatement.) In a sequence of $M$ coin flips, the probability that the number of ones is smaller than $L \leq M/4$ is at most $\exp(-M/8)$.
Proof: Let $Y = \sum_{i=1}^{m} X_i$ the sum of the $M$ coin flips. By the above corollary, we have:
$$
\mathbb{P}[Y \leq L] = \mathbb{P}\left[\frac{M}{2} - Y \geq -\frac{L}{2}\right] = \mathbb{P}\left[\frac{M}{2} - Y \geq \Delta\right],
$$
where $\Delta = M/2 - L \geq M/4$. Using the above Chernoff inequality, we get
$$
\mathbb{P}[Y \leq L] \leq \exp\left(-\frac{2\Delta^2}{M}\right) \leq \exp(-M/8).
$$
10.2.2.1. The Chernoff Bound — General Case
Here we present the Chernoff bound in a more general settings.
Problem 10.2.6. Let $X_1, \ldots, X_n$ be $n$ independent Bernoulli trials, where
$$
\mathbb{P}[X_i = 1] = p_i \quad \text{and} \quad \mathbb{P}[X_i = 0] = 1 - p_i,
$$
and let denote
$$
Y = \sum_{i} X_i \quad \mu = \mathbb{E}[Y].
$$
**Question:** what is the probability that $Y \geq (1 + \delta)\mu$.
**Theorem 10.2.7 (Chernoff inequality).** For any $\delta > 0$,
$$
\mathbb{P}[Y > (1 + \delta)\mu] < \left(\frac{e^\delta}{(1 + \delta)^{1+\delta}}\right)^\mu.
$$
Or in a more simplified form, for any $\delta \leq 2e - 1$,
$$
\mathbb{P}[Y > (1 + \delta)\mu] < \exp(-\mu\delta^2/4), \quad (10.3)
$$
and
$$
\mathbb{P}[Y > (1 + \delta)\mu] < 2^{-\mu(1+\delta)},
$$
for $\delta \geq 2e - 1$.
**Theorem 10.2.8.** Under the same assumptions as the theorem above, we have
$$
\mathbb{P}[Y < (1 - \delta)\mu] \leq \exp\left(-\frac{\delta^2}{2}\right).
$$
The proofs of those more general form, follows the proofs shown above, and are omitted. The interested reader can get the proofs from:
http://www.uiuc.edu/~sariel/teach/2002/a/notes/07_chernoff.ps
10.3. Treaps
Anybody that ever implemented a balanced binary tree, knows that it can be very painful. A natural question, is whether we can use randomization to get a simpler data-structure with good performance.
10.3.1. Construction
The key observation is that many of data-structures that offer good performance for balanced binary search trees, do so by storing additional information to help in how to balance the tree. As such, the key Idea is that for every element $x$ inserted into the data-structure, randomly choose a priority $p(x)$; that is, $p(x)$ is chosen uniformly and randomly in the range $[0, 1]$.
So, for the set of elements $X = \{x_1, \ldots, x_n\}$, with (random) priorities $p(x_1), \ldots, p(x_n)$, our purpose is to build a binary tree which is “balanced”. So, let us pick the element $x_k$ with the lowest priority in $X$, and make it the root of the tree. Now, we partition $X$ in the natural way:
(A) $L$: set of all the numbers smaller than $x_k$ in $X$, and
(B) $R$: set of all the numbers larger than $x_k$ in $X$.
We can now build recursively the trees for $L$ and $R$, and let denote them by $T_L$ and $T_R$. We build the natural tree, by creating a node for $x_k$, having $T_L$ its left child, and $T_R$ as its right child.
We call the resulting tree a treap. As it is a tree over the elements, and a heap over the priorities; that is, TREAP = TREE + HEAP.
Lemma 10.3.1. Given $n$ elements, the expected depth of a treap $T$ defined over those elements is $O(\log(n))$. Furthermore, this holds with high probability; namely, the probability that the depth of the treap would exceed $c \log n$ is smaller than $\delta = n^{-d}$, where $d$ is an arbitrary constant, and $c$ is a constant that depends on $d$.\footnote{That is, if we want to decrease the probability of failure, that is $\delta$, we need to increase $c$.}
Furthermore, the probability that $T$ has depth larger than $ct \log(n)$, for any $t \geq 1$, is smaller than $n^{-dt}$.
Proof: Observe, that every element has equal probability to be in the root of the treap. Thus, the structure of a treap, is identical to the recursive tree of QuickSort. Indeed, imagine that instead of picking the pivot uniformly at random, we instead pick the pivot to be the element with the lowest (random) priority. Clearly, these two ways of choosing pivots are equivalent. As such, the claim follows immediately from our analysis of the depth of the recursion tree of QuickSort, see Theorem 10.1.2\footnote{That is, if we want to decrease the probability of failure, that is $\delta$, we need to increase $c$.}.
10.3.2. Operations
The following innocent observation is going to be the key insight in implementing operations on treaps:
Observation 10.3.2. Given $n$ distinct elements, and their (distinct) priorities, the treap storing them is uniquely defined.
10.3.2.1. Insertion
Given an element $x$ to be inserted into an existing treap $T$, insert it in the usual way into $T$ (i.e., treat it a regular search binary tree). This takes $O(\text{height}(T))$. Now, $x$ is a leaf in the treap. Set $x$ priority $p(x)$ to some random number $[0,1]$. Now, while the new tree is a valid search tree, it is not necessarily still a valid treap, as $x$’s priority might be smaller than its parent. So, we need to fix the tree around $x$, so that the priority property holds.
We call RotateUp($x$) to do so. Specifically, if $x$ parent is $y$, and $p(x) < p(y)$, we will rotate $x$ up so that it becomes the parent of $y$. We repeatedly do it till $x$ has a larger priority than its parent. The rotation operation takes constant time and plays around with priorities, and importantly, it preserves the binary search tree order. Here is a rotate right operation RotateRight($D$):
![Diagram of RotateRight operation]
RotateLeft is the same tree rewriting operation done in the other direction.
In the end of this process, both the ordering property and the priority property holds. That is, we have a valid treap that includes all the old elements, and the new element. By Observation 10.3.2, since the treap is uniquely defined, we have updated the treap correctly. Since every time we do a rotation the distance of $x$ from the root decrease by one, it follows that insertions takes $O(\text{height}(T))$.
10.3.2.2. Deletion
Deletion is just an insertion done in reverse. Specifically, to delete an element $x$ from a treap $T$, set its priority to $+\infty$, and rotate it down it becomes a leaf. The only tricky observation is that you should rotate always so that the child with the lower priority becomes the new parent. Once $x$ becomes a leaf deleting it is trivial - just set the pointer pointing to it in the tree to null.
10.3.2.3. Split
Given an element $x$ stored in a treap $T$, we would like to split $T$ into two treaps – one treap $T_{\leq}$ for all the elements smaller or equal to $x$, and the other treap $T_{>}$ for all the elements larger than $x$. To this end, we set $x$ priority to $-\infty$, fix the priorities by rotating $x$ up so it becomes the root of the treap. The right child of $x$ is the treap $T_{>}$, and we disconnect it from $T$ by setting $x$ right child pointer to null. Next, we restore $x$ to its real priority, and rotate it down to its natural location. The resulting treap is $T_{\leq}$. This again takes time that is proportional to the depth of the treap.
10.3.2.4. Meld
Given two treaps $T_L$ and $T_R$ such that all the elements in $T_L$ are smaller than all the elements in $T_R$, we would like to merge them into a single treap. Find the largest element $x$ stored in $T_L$ (this is just the element stored in the path going only right from the root of the tree). Set $x$ priority to $-\infty$, and rotate it up the treap so that it becomes the root. Now, $x$ being the largest element in $T_L$ has no right child. Attach $T_R$ as the right child of $x$. Now, restore $x$ priority to its original priority, and rotate it back so the priorities properties hold.
10.3.3. Summary
**Theorem 10.3.3.** Let $T$ be a treap, initialized to an empty treap, and undergoing a sequence of $m = n^c$ insertions, where $c$ is some constant. The probability that the depth of the treap in any point in time would exceed $d \log n$ is $\leq \frac{1}{n^f}$, where $d$ is an arbitrary constant, and $f$ is a constant that depends only on $c$ and $d$.
In particular, a treap can handle insertion/deletion in $O(\log n)$ time with high probability.
**Proof:** Since the first part of the theorem implies that with high probability all these treaps have logarithmic depth, then this implies that all operations takes logarithmic time, as an operation on a treap takes at most the depth of the treap.
As for the first part, let $T_1, \ldots, T_m$ be the sequence of treaps, where $T_i$ is the treap after the $i$th operation. Similarly, let $X_i$ be the set of elements stored in $T_i$. By Lemma 10.3.1, the probability that $T_i$ has large depth is tiny. Specifically, we have that
$$\alpha_i = \mathbb{P}[\text{depth}(T_i) > tc' \log n^c] = \mathbb{P}\left[\text{depth}(T_i) > c't\left(\frac{\log n^c}{\log |T_i|}\right) \cdot \log |T_i|\right] \leq \frac{1}{n^{c+1}},$$
as a tedious and boring but straightforward calculation shows. Picking $t$ to be sufficiently large, we have that the probability that the $i$th treap is too deep is smaller than $1/n^{f+c}$. By the union bound, since there are $n^c$ treaps in this sequence of operations, it follows that the probability of any of these treaps to be too deep is at most $1/n^{f}$, as desired. \hfill \blacksquare
10.4. Bibliographical Notes
Chernoff inequality was a rediscovery of Bernstein inequality, which was published in 1924 by Sergei Bernstein. Treaps were invented by Siedel and Aragon [SA96]. Experimental evidence suggests that Treaps performs reasonably well in practice, despite their simplicity, see for example the comparison carried out by Cho and Sahni [CS00]. Implementations of treaps are readily available. An old implementation I wrote in C is available here: http://valis.cs.uiuc.edu/blog/?p=6060.
10.5. From previous lectures
**Lemma 10.5.1.** For any two random variables $X$ and $Y$ (not necessarily independent), we have that $\mathbb{E}[X] = \mathbb{E}\left[\mathbb{E}[X \mid Y]\right]$.
Bibliography
|
{"Source-Url": "https://courses.engr.illinois.edu/cs473/fa2018/lec/lec/10_rand_II.pdf", "len_cl100k_base": 7905, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 42766, "total-output-tokens": 8752, "length": "2e12", "weborganizer": {"__label__adult": 0.0004467964172363281, "__label__art_design": 0.00039005279541015625, "__label__crime_law": 0.0005807876586914062, "__label__education_jobs": 0.000804901123046875, "__label__entertainment": 0.0001093745231628418, "__label__fashion_beauty": 0.0001901388168334961, "__label__finance_business": 0.0003056526184082031, "__label__food_dining": 0.000675201416015625, "__label__games": 0.0010442733764648438, "__label__hardware": 0.0019168853759765625, "__label__health": 0.0012197494506835938, "__label__history": 0.0004322528839111328, "__label__home_hobbies": 0.00020372867584228516, "__label__industrial": 0.0007696151733398438, "__label__literature": 0.00037288665771484375, "__label__politics": 0.0003991127014160156, "__label__religion": 0.0007882118225097656, "__label__science_tech": 0.13623046875, "__label__social_life": 0.00011289119720458984, "__label__software": 0.007205963134765625, "__label__software_dev": 0.84423828125, "__label__sports_fitness": 0.0005278587341308594, "__label__transportation": 0.0007729530334472656, "__label__travel": 0.00028824806213378906}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23491, 0.04554]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23491, 0.67139]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23491, 0.84736]], "google_gemma-3-12b-it_contains_pii": [[0, 2610, false], [2610, 5512, null], [5512, 8277, null], [8277, 10742, null], [10742, 13214, null], [13214, 14949, null], [14949, 17805, null], [17805, 20349, null], [20349, 23261, null], [23261, 23491, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2610, true], [2610, 5512, null], [5512, 8277, null], [8277, 10742, null], [10742, 13214, null], [13214, 14949, null], [14949, 17805, null], [17805, 20349, null], [20349, 23261, null], [23261, 23491, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23491, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23491, null]], "pdf_page_numbers": [[0, 2610, 1], [2610, 5512, 2], [5512, 8277, 3], [8277, 10742, 4], [10742, 13214, 5], [13214, 14949, 6], [14949, 17805, 7], [17805, 20349, 8], [20349, 23261, 9], [23261, 23491, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23491, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
075c3e15d63cd7e91ffcf79c936a4b9af83b9598
|
RUP DRIVEN DEVELOPMENT
OF SOCIAL LEARNING PLATFORM
Introduction
Nowadays most people understand quite well the need for life-long learning. It holds for students who are at the beginning of their career ladders as well as for professionals who would like to be highly competitive in their workplaces. However it requires ongoing, voluntary, and self-motivated pursuit of knowledge. This process may be significantly improved by ICT (Information and Communication technologies) based interactive tools used for professional development.
Tischner European University (TEU) in Cracow is developing an innovative education model as a means to meet students’ expectations and to adapt the university education policy to socioeconomic changes in such areas as: social development, blended learning and open source philosophy. As a part of this model, a comprehensive education offer supported by innovative state-of-the-art ICT solutions in the form of social learning platform called WeLearning have been developed. The users of this platform may become members of a community of active learners that participate in discussions, meetings and can develop education stuff (e.g. articles, DL modules, and infographics) on the platform. The goal of TEU is to make the philosophy of life-long learning among Poles more popular.
In case of most development processes of web based solutions, project activities are usually done informally. It works for small and technically simple projects with clearly stated requirements and centralized small project teams. The characteristics of TEU WeLearning project such as decentralization of stakeholders, not fully explored application domain, the size of project team and innovativeness as well as technical complexity of software solution required more disciplined approach. On the other hand, because of time constraints the development process should not be too formal. As a solution to a problem, Ra-
tional Unified Process (RUP) methodology has been selected for WeLearning project. RUP has proven its value in the industry by delivering significant returns on investment (ROI) to the companies and organizations that have adopted it [Kruc03]. It provides disciplined approach to the development process but at the same time the formalization level can be customized to project’s environment. The paper describes RUP driven development process of social learning platform and explains why decision of selecting RUP was a proper choice. What is more, all project activities undertaken have been analyzed and described from two main perspectives. The former regards RUP as software development approach and according to the latter RUP is considered as a software engineering process.
System under Development
The system under development is intended to be an internet platform that will help to create and share knowledge, as well as to develop skills and a sense of community among like-minded individuals. This community is not supposed to be made available exclusively for students, staff, and sympathizers of TEU, but from the very start it is open for everyone interested, regardless of age, level of education, or university affiliation. The only condition is a life-long hunger for knowledge and willingness to make new acquaintances with like-minded individuals. The interests of the users of the platform and their activities will be used as a way to measure the job and education market. The university will then be able to plan its endeavors (for example: the opening of new courses of study, creating new specializations) to ensure the best qualifications for its graduates and to build an interesting additional offer (cultural events, trips, conferences with interesting people). Besides basic functionalities planned, the platform will be equipped with mechanisms of gamification that will take the solution to new level of educational quality. According to the definition, gamification is “the use of game design elements in a non-game context” [Dete11]. The concept of applying game-design thinking to non-game applications has gained common acceptance in everyday activity. The huge success of using these two strategies (social media and gamification) pointed the way to the use of this type of mechanisms in other fields including education. The main objective is to encourage students to learn since some methods of learning that are often labeled as boring are met with resistance.
Why RUP?
There are several reasons why RUP methodology has been selected for the project. First of all it provides a disciplined approach to assigning tasks and responsibilities within a development organization. Its goal is to ensure the production of high-quality software that meets the needs of its end users within a predictable schedule and budget [KKB03]. Secondly RUP is adaptable and extendable process framework. It can be properly configured to suit the needs of the specific project. Next reason is that one can select proper formalization level for the development process. In the areas where there are requirements concerning formal documentation, adequate RUP artifacts can be developed. In other places less formal approach may be taken. Finally RUP promotes best practices of software development so called Spirit of the RUP developed by many professionals and organizations all over the world. One of the most important aspects is that RUP is an iterative process what makes the development more flexible and adaptable to changing requirements of the system’s owner and stakeholders.
RUP as a Software Development Approach
During the project both aspects of the RUP methodology have been taken into consideration. Firstly RUP is considered as a software development approach. It includes several essential principles driving the development process. They are the following [Kruc03]:
- attack major risks early and continuously… or they will attack you,
- ensure that you deliver value to your customer,
- stay focused on executable software,
- accommodate change early in the project,
- baseline an executable architecture early on,
- build your system with components,
- work together as one team,
- make quality a way of life, not an afterthought.
All of them have been implemented in the project to some extent. System under development is innovative solution that has not been developed so far by other educational institutions. It means that there are no reference models which can be taken into consideration during the process of functionality planning and architecture selection. Therefore there are many possible risks related to the development process of this kind of platform. The risk has been mitigated by conducting extensive research involving all stakeholders. There are several ques-
tions that had to be taken into consideration before project started. Sample questions asked and answered are: What are the possibilities of creating and moderating of active learners’ community? What are the possible methods and techniques used for e-learning that will stimulate interest and drive educational content development? What about motivational system? What mechanisms implemented will make them tick? What kind of revenue generation model can be applied (if any) to platform content? Should the access to the content be free or is it possible to receive payments for premium platform functionalities and educational stuff? In order to answer these general questions, in every category more specific ones have been formulated and research planned. For example for the first category related to potential of social learning platform the following questions have been identified:
- Do the prospects use social platforms? If yes which platforms? How often do they use this kind of services?
- Do the prospects know any social learning platforms or social platforms that offer educational services and content?
- What do the prospects mean by Distance Learning? What kind of associations do they have with regard to Distance Learning concept?
- Do the prospects have any experience with Distance Learning?
- What are the pros and cons of Distance Learning according to prospects opinion?
- Do the prospects use Wikipedia? How do they rate its content?
- Do the prospects have the skills and motivations for developing educational materials related to subjects they study and to sharing them with classmates?
Questions regarding other categories have also been elaborated [BaIP12a, BaIP12b]. Based on the questions prepared, research has been planned and conducted including such groups as students, alumni, university staff, external experts and employers. The research results enabled to make more informed decisions about platform’s functionality, content as well as techniques and methods used. It also reduced the risks related to development and introduction of such unique and non-standard solution. More detailed description of the research is presented in the section of the paper, describing Inception phase of the project. Final issue concerning risk management is related to iterative development. All of the projects tasks have been done in iterative fashion.
As in all other ICT projects, delivering value to customer is very important and obvious goal. However the tasks that should be taken in order to achieve this goal are sometimes poorly specified and vague. The situation looks different in case of RUP. RUP provides recommendations derived from best practices of software development. Recommendations regard three aspects: iterative development, communication with customers and capturing functional requirements
As has already been mentioned, the project under consideration has adopted an iterative approach. After every iteration, meetings with stakeholders have been arranged and conducted. During these meetings, presentations of system functionalities developed so far have been delivered and feedback from stakeholders gathered. All remarks have been taken into account during following iterations. This enabled to reduce the gap between stakeholders’ requirements and system under development functionality.
Finally, according to RUP, use case method has been used for capturing requirements. After all use cases have been specified, they drove the whole development process. Since use cases describe how a user will interact with the system, they are easy for a user to relate to. And since they describe the interaction in a time-sequential order, it is easy for both users and analysts to identify any holes in the use case.
Because of the project’s time constraints, staying focused on executable software principle has been adopted. This means that the progress of the project has been measured with regard to ready to use executable software. Therefore, main focus was not on RUP documentation artifacts but on modules (or functions inside modules) fully implemented, tested and presented to stakeholders. A clear focus on executable software forced right thinking among project team. Less risk of overanalyzing and theorizing has been run, and it was possible to prove which solutions are optimal. As was written in [KrKB03], forcing closure by producing executable software is often the fastest way of mitigating risk.
Continuous communication with stakeholders also enabled to accommodate changes early in all stages of the project. This approach has reduced costs of changes and minimized requirements creep. Before the project started, the research results have been deeply analyzed, all insights discussed with stakeholders and the contract describing the system scope approved by them. After functional and non-functional requirements have been gathered and specified, they have been presented to stakeholders and elaborated according to their hints and comments. What is more, decisions on solution final architecture were made before elaboration phase. Changes related to design and implementation have been managed rather easily as component base approach to system modules were taken. All the functionalities have been implemented with the use of customized by developers off-the-shelf elements, that are well tested and verified in many solutions working on-line.
Common knowledge is that people are the project’s most important asset. Project under consideration was organized around cross-functional team that consisted of project manager, system analysts, user experience and SEO expert.
as well as developers. During implementation stage developers coordinated their tasks quite smoothly as they knew each other very well because of many projects done together so far. The same held for system analysts and UX expert. Every team member was informed about state of project once or twice a week during meetings organized by the project manager. There was also continuous asynchronous and synchronous communication done via e-mail and instant messengers. Unfortunately every day stand-up meetings have not been organized. The reason was decentralization and responsibilities of team members related to other projects and classes (part of the team was sourced from university staff). As a solution to this problem, project documentation, constantly updated by system analysts, was published on-line in the form of interactive website, where everybody could analyze the contents, check the current state of the project and drill down the artifacts to the abstraction level she/he was interested in. As experience showed, this solution improved communication significantly and partially compensated the lack of every day short meetings.
**RUP as a Software Engineering Process**
The RUP is also software engineering process. All the stages and activities in the life-cycle are well-defined and well-structured with essential milestones and decision points precisely articulated. RUP clearly defines who is responsible for what, how things are done, and when to do them [Kruc03]. Structure of the RUP is presented on Figure 1. Process has two dimensions: static and dynamic. Static structure describes how process elements are logically grouped into core process disciplines. Basic process elements are: activities, disciplines, artifacts, and roles. Dynamic structure shows how the process, expressed in terms of cycles, phases, iterations, and milestones, unfolds over the lifecycle of a project [KrKB03].
RUP defines four main phases: inception, elaboration, construction and transition. In **inception** phase a good understanding of what system to build is gotten. It is done by getting a high-level understanding of all the requirements and defining the system’s scope. In this stage the focus is also on mitigating business risks, and producing the business case for building the system. Finally it is important to get acceptance of all stakeholders and decide whether to proceed with the project.
During **elaboration** phase most technically difficult tasks such as: design, implementation, testing, and baselining an executable architecture (including subsystems, their interfaces, key components, and architectural mechanisms) are undertaken. What is more, major technical risks are addressed by code implementation and validation [Barn07].
Most of the implementation is done during construction phase. Programmers are developing first operational version of the system on the base of executable architecture. Then they deploy alpha releases to verify if system under development meets stakeholders’ needs. At the end of this stage fully functional beta version is deployed, however system still requires improvements and tuning related to overall functional and non-functional requirements as well as quality.
Main aim of the transition stage is to collect final feedback and ensure the release of the system under development addresses needs of all stakeholders. During this stage testing and minor adjustments are made. Basic activities include fine-tuning of the product, configuration and usability analysis. Focus is also on users training and integration issues.
**The Inception Phase of WeLearning Project**
As has already been mentioned, this stage is mainly about understanding what to build – major system’s objectives and scope. System’s objectives are formulated in vision statement. However the process of preparing a vision has to be driven by the needs of system’s stakeholders. The vision statement should also express strategic potential of the solution under development. Therefore the first step was related to identification of project’s stakeholders and their needs.
All stakeholders have been divided into two groups primary and secondary. The first group includes students and university staff members. Second group consists of external experts and employers. Figure 2 presents primary stakeholders and the roles they can play as platform users.
In order to better understand the stakeholders’ needs as well as a strategic potential of the platform under development research was planned and conducted with the use of two well-known techniques: focus groups and expert interviews. Focus groups is a form of qualitative research in which different groups of people have been asked about their perceptions, opinions, beliefs, and attitudes towards a product itself (platform), services it will provide and the main idea that is quite unique and innovative – to provide users with internet platform where they will learn, share knowledge and insights, build the educational content as well as influence the quality of educational stuff created by other virtual community members.
The research was done in structured form with scenario prepared in advance. There were 137 participants from two main categories of stakeholders: students and alumni. They came from Tischner European University in Cracow, University of Information Technology and Management in Rzeszów and University of Management and Administration in Zamość. Groups included part time and full time students from first and second cycle studies (different majors) as well as graduate students and alumni.
Fig. 2. Primary Stakeholders of the system
The research was conducted from the end of October to the beginning of December 2012. Every focus group had 1.5 hour session time and the results were recorded, transcribed and coded.
For coding the results Atlas.ti software has been used. Focus groups members were informed in advance about the scope and goal of the research and the participation was fully voluntary. What is more all indicators and codes were operationalized in order to properly mark quotations selected from recordings (for details see [BaIP12a, BaIP12b, BaIP12c]).
All of the stakeholders answered the questions from the following modules: *Diagnosis of opportunities related to creation and moderation of life-long learning virtual communities, Social potential of learning platform, Scope and type of learning methods and techniques, Scope and topics of subject-matter, Motivation system and Diagnosis of opportunities related to introduction of payments for access to platform’s content.*
Second group of stakeholders included employers. The research was conducted in the form of structured *In-Depth Expert Interviews* with the use of scenario prepared in advance. The *Expert interview* is ideal tool for presenting ideas and content and encourages subject matter experts to share knowledge from an area under consideration. Ten experts from social media and ICT fields have been selected. This group consisted of entrepreneurs (ISPs and social portals owners – zadane.pl) and experts (social media investors and advisers, start-up practitioners, and on-line brand creation experts).
The research was conducted in December 2012 and every interview lasted from 0.5 to 1 hour. Every expert was informed in advance about the goal of research and the participation as in previous groups was fully voluntary. In case of this group new questions categories have been introduced such as: *Verification of technical capabilities of different social platforms including open source solutions and Market demand for this kind of services as well as prospect employees who are using them.*
University staff members constituted the third group which opinions were analyzed. As in case of employers, the research regarding university staff members was conducted in the form of structured *In-Depth Expert Interviews*. Group included 18 people – 6 from Tischner European University in Cracow, 6 from University of Information Technology and Management in Rzeszów and 6 from University of Management and Administration in Zamość. The group consisted of decision makers such as vice presidents, deans, heads of departments and lecturers. Part of them have been dealing with e-learning issues for some time. Besides the research modules already mentioned they were supposed to answer questions related to their experience with e-learning, key platform features, evaluation and certification processes and platform’s brand building process.
Careful analysis of research results enabled to formulate system’s vision, establish preliminary scope and key functionalities of platform under development. All these elements become a part of *inception deck* developed for a project.

In order to prepare inception deck, three techniques taken from agile approaches have been used: *elevator pitch*, *designing product box* and *NOT list*. Elevator pitch is well known tool that enables to communicate the essence of the project in a very short period of time. Elevator pitch for the project was created according to the template published in [Rasm10] and is presented on Figure 3. As a part of project’s documentation, there was also prepared the extended version of system’s vision (included in *Executive Summary*) for strategic stakeholders.
Next technique is called *designing the product box* for a solution. Creating a product box for the project, and asking why someone would buy it, gets the team focused on what’s compelling for stakeholders and the underlying benefits of your product [Rasm10]. Product box developed for the project is shown on Figure 4.

After vision statement has been formulated, the system’s scope was established. When setting expectations about the scope of the system, saying what will not be done is just as important as what will be [Rasm10]. The technique that has been used is a *NOT list*. By creating a NOT list, it is clearly stated what is in and out of scope for the project. Figure 5 presents *NOT list* for the project.
<table>
<thead>
<tr>
<th>In Scope</th>
<th>Out of Scope</th>
</tr>
</thead>
<tbody>
<tr>
<td><em>News</em></td>
<td><em>Calendar</em></td>
</tr>
<tr>
<td><em>Surveys</em></td>
<td><em>Wiki</em></td>
</tr>
<tr>
<td><em>Groups</em></td>
<td><em>Job e-Marketplace</em></td>
</tr>
<tr>
<td><em>Communication with University Administration</em></td>
<td><em>Contests Management</em></td>
</tr>
<tr>
<td><em>Blogs</em></td>
<td><em>Payments Management</em></td>
</tr>
<tr>
<td><em>Forums</em></td>
<td><em>Educational Resources</em></td>
</tr>
<tr>
<td><em>Galleries</em></td>
<td></td>
</tr>
<tr>
<td><em>Networking</em></td>
<td></td>
</tr>
<tr>
<td><em>Meetings</em></td>
<td></td>
</tr>
<tr>
<td><em>Social Bookmarks</em></td>
<td></td>
</tr>
<tr>
<td><em>Profile Management</em></td>
<td></td>
</tr>
<tr>
<td><em>Talents Management</em></td>
<td></td>
</tr>
<tr>
<td><em>Reporting</em></td>
<td></td>
</tr>
<tr>
<td><em>Search Engine</em></td>
<td></td>
</tr>
<tr>
<td><em>System Administration</em></td>
<td></td>
</tr>
<tr>
<td><em>Backup</em></td>
<td></td>
</tr>
<tr>
<td><em>Access Control</em></td>
<td></td>
</tr>
</tbody>
</table>
Unresolved
Integration with LMS Platform, Integration with Video Conferencing System
Fig. 5. NOT list developed for WeLearning platform
Final decisions that have been made in the inception phase were related to solution architecture and software engineering process. Project team decided that system will be built around three-tier architecture. It is common architecture for web based applications. All system components should comply with Model-View-Controller pattern. Because of time constraints and quite high technical complexity of planned functionality, most of the modules will be implemented with the use of Joomla® CMS and additional off-the-shelf components that will be tuned to system’s requirements. According to RUP best practices, the development process will be fully iterative. Releases and iteration planning will be done during elaboration phase.
**The Elaboration Phase of WeLearning Project**
The elaboration phase started from gathering details of system requirements. All functional requirements were specified in the use case model including use case diagrams and use cases scenarios. For selected use cases GUI proto-
types and storyboards were created to present the system from the user perspective. Users usually are able to better understand functional specification when it is presented in terms of screens they can see during interaction with system under development. Then non-functional requirements have been described and project glossary developed. Because of large number of use cases, they have been divided into modules. The architecture of WeLearning platform is presented in Appendix 1.
In order to plan the first release of the system and iterations, for all use cases priorities have been set and labor intensity estimated. Use cases priorities were determined by strategic stakeholders (system owner). Labor intensity estimates were provided by developers. For every iteration use cases were selected according to defined priorities and time needed for development.
First iteration (iteration 0) was responsible for baselining executable architecture of the solution. Therefore the tasks undertaken regarded auditing hardware architecture for platform, server virtualization, installation and configuration of CMS and frameworks (Gantry 4, Twitter Bootstrap), and selecting Joomla extensions as well as self-contained systems that will enable to implement planned platform’s functionality.
Table 1 presents main CMS extensions chosen for development [Wojc12].
<table>
<thead>
<tr>
<th>Extension Name</th>
<th>Related Platform Functionality</th>
</tr>
</thead>
<tbody>
<tr>
<td>SobiPro</td>
<td>Social Bookmarking</td>
</tr>
<tr>
<td>JomSocial</td>
<td>Networking</td>
</tr>
<tr>
<td></td>
<td>Groups</td>
</tr>
<tr>
<td></td>
<td>Galleries</td>
</tr>
<tr>
<td></td>
<td>Calendar</td>
</tr>
<tr>
<td></td>
<td>Communication among community members</td>
</tr>
<tr>
<td></td>
<td>Dashboard</td>
</tr>
<tr>
<td></td>
<td>Meetings</td>
</tr>
<tr>
<td>EasyBlog</td>
<td>Blogs</td>
</tr>
<tr>
<td>JS Jobs</td>
<td>Job e-Marketplace</td>
</tr>
<tr>
<td>Kunena</td>
<td>Forums</td>
</tr>
</tbody>
</table>
Besides Joomla extensions that can be relatively easy installed and configured, some functionalities required self-contained systems that had to be integrated with WeLearning platform. For educational resources management (DL modules) Moodle® LMS has been selected. Video conferences will be supported by BigBlueButton solution and synchronous communication by CometChat.
After all elements of executable platform architecture have been settled, the layout and look-and-feel of main system’s screens have been designed according to User Experience best practices. In order to better suit stakeholders needs the session was organized during which prospect users have drawn sample sketches of system’s screens. Based on the sketches, digital mock-ups have been developed and presented to stakeholders. With the feedback gathered, final look-and-feel of the user interface has been designed.
Next iterations planned were responsible for development of key solution features. In elaboration phase WeLearning platform was equipped mainly with features that can be implemented with Joomla Extensions. Iterations for elaboration phase are presented on Figure 6.
<table>
<thead>
<tr>
<th>Elaboration Phase</th>
<th>Access Control</th>
<th>Communication with University Administration</th>
<th>Search Engine</th>
<th>Surveys</th>
<th>Social Bookmarking</th>
<th>Forum</th>
<th>Gallery</th>
<th>Networking</th>
<th>Communication Among Community Members</th>
<th>Groups</th>
<th>Blogs</th>
<th>Meetings</th>
</tr>
</thead>
<tbody>
<tr>
<td>Iteration 1</td>
<td>Iteration 2</td>
<td>Iteration 3</td>
<td>Iteration 4</td>
<td>Iteration 5</td>
<td>Iteration 6</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>(1 week)</td>
<td>(1 week)</td>
<td>(1 week)</td>
<td>(1 week)</td>
<td>(1 week)</td>
<td>(1 week)</td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
Fig. 6. Iterations Planned for Elaboration Phase
The Construction Phase of WeLearning Project
Elaboration ended with the internal release of a baselined, executable architecture, which allowed to address major technical risks by implementing and validating actual code [Ba07]. During the Construction phase, project team focused heavily on detailed design of modules, modules implementation, platform integration with self-contained systems and testing.
Some of the modules planned in the construction phase could not be implemented with CMS extensions and needed in-house development, e.g. Reporting Module and Talents Management. There were also modules that were created on the basis of CMS extensions. In such cases basic functionality of specific off-the-shelf extension has been tuned and programmed according to system’s requirements (e.g. Profile Management).
Next tasks in the construction phase were related to self-contained systems integration. In order to enable users to create, upload, use and manage educational resources, platform was integrated with Moodle LMS. The integration was done with the use of Joomla component which is a bridge between Joomla and Moodle platforms.
Because of high significance of synchronous communication among community members Chat facility had to be developed. It was done with the use of CometChat system integrated with CMS and JomSocial extension. One of the advantages of such solution is smart-pooling technology used for sending messages from users to system and back, without having to deploy additional application server for synchronous communication.
<table>
<thead>
<tr>
<th>Construction Phase</th>
</tr>
</thead>
<tbody>
<tr>
<td>• Profile Management</td>
</tr>
<tr>
<td>• Educational Resources Management</td>
</tr>
<tr>
<td>• Educational Resources Usage</td>
</tr>
<tr>
<td>• Reporting</td>
</tr>
<tr>
<td>• Chat</td>
</tr>
<tr>
<td>• Talents Management</td>
</tr>
</tbody>
</table>
| Iteration 1 (1 week) | Iteration 2 (1 week) | Iteration 3 (1 week) | Iteration 4 (1 week) | Iteration 5 (1 week) | Iteration 6 (1 week) |
Fig. 7. Iterations Plan for Construction Phase
Third external solution used is BigBlueButton, open source video conferencing system offering many options and APIs for customization as well as integration with learning tools. It enables remote students to have a high-quality learning experience. However this part of the system is still under development. Iterations plan for construction phase is presented on Figure 7.
Because of time constraints (first version had to be available in 3 months) the platform functionality was divided into two releases. All system elements presented in iterations plans (Figure 6 and Figure 7) regarded first release. Second release included such modules as: Certification, Job e-Marketplace, Calendar, Wiki, Learning Outcomes, News Management, Shared Repository, Contests Management, Payment Management and Video Conferences. Most of them require in-house development. Part of the modules is done but some of them are still under development.
**Non-functional Requirements**
During development of first release of the system, non-functional requirements had also been taken into consideration [Wojc12]. Modern user interface and animation implementation without Adobe Flash Player required to use HTML5, CSS 3.0 and Javascript (with jQuery libraries). This enabled to use new features provided by newest versions of web technologies.
Because it should be possible for users to access platform with mobile devices, system was implemented with Twitter Bootstrap framework, which can be used for Responsive Web Design. WeLearning solution was equipped with flexible and dynamic user interface which automatically recognizes screen resolution and rescales all GUI elements.
Reliability requirements was implemented through code optimization for most internet browsers including IE8+, Firefox 3+, Safari 4+, Chrome 8+, Opera 10+ as well as for mobile devices internet browsers (Android and iOS). The source code complies with W3C standards.
Responsiveness of the system was improved thank to additional module with DEFLATE algorithm installed on the server that enables lossless compression of all data sent to platform users. All CSS and Javascript files have been minificated. All CSS and graphics files have Expiration Header and all files loaded by internet browser obtain Entity Tags. These improve the performance of browser’s cache. Most of the modules loaded to browser’s cache are stored and therefore there is no need to load them again when the user needs them.
Security requirements was implemented with SSL certificates and mechanisms protecting from unauthorized access and XSS as well as SQL injection attacks.
Transition Phase of WeLearning Project
In the Transition phase all major structural issues should have been worked out, and user feedback should focus mainly on fine-tuning, configuration, installation, and usability issues [KrKB03]. All of the WeLearning platform modules have periodically been presented to stakeholders and feedback was gathered. What is more testers were hired for checking if all systems elements meet acceptance tests. During this phase final documentation and user manuals were prepared and training planned. First training took place after first release was deployed. Second training regarded presentation of new modules from second release and improvements made to first release. The third training course was conducted in the form of workshop in computer lab where WeLearning team (10 persons) responsible for platform deployment learnt how to use all system functionalities in practice and provided feedback. In the time period when platform were being deployed all developers were constantly available on-line and under the phone.
Conclusions
It seems that the project is on the right track. So far the project team has met all acceptance criteria with regard to budget, schedule and scope. It was done to large extent thank to using RUP in development process. Of course it will take some time to fully verify the success of the whole enterprise. In case of platform like this, Metcalfe’s law will apply. It means that the value of WeLearning platform will be proportional to the square of the number of active users. Important question here is if motivation mechanisms implemented via virtual currency called talents will work according to assumptions taken. But the statistics gathered are quite optimistic. At the moment WeLearning platform has 640 users (580 active users). There were 222 educational resources items published. Users have taken 83 competence tests (57 completed) and created 43 profiles with individual development plans. Forum includes 30 categories with 110 topics and 442 posts. Further development of the solution will depend on effective moderation of life-long learning community members, smart discovering what makes them tick and transforming these insights into innovative features of WeLearning platform. It will require constant monitoring of e-learning standards, learners preferences and advances in ICT field.
Appendix 1. Architecture of WeLearning Platform
References
ZASTOSOWANIE METODYKI RUP W DZIAŁANIACH ANALITYCZNO-PROJEKTOWYCH PRZY ROZWOJU PLATFORMY NAUCZANIA SPOŁECZNOŚCIOWEGO
Streszczenie
Celem prac, których częściowe rezultaty przedstawiono w niniejszym artykule, było zaprojektowanie i zaimplementowanie platformy nauczania społecznościowego WeLearning na poziomie szkolnictwa wyższego, uwzględniającej mechanizmy grywalizacji. Działań analityczno-projektowych zrealizowano z wykorzystaniem metodyki RUP. W artykule wyjaśniono, dlaczego wybór metodyki RUP był właściwą decyzją podczas projektowania platformy, oraz przedstawiono szczegółowo kolejne fazy cyklu życia projektu platformy, tj. fazę rozpoczęcia, opracowywania, konstrukcji oraz przekazania systemu.
|
{"Source-Url": "https://www.ue.katowice.pl/fileadmin/_migrated/content_uploads/5_J.Jakiela_J.Wojcik_RUP_driven_development....pdf", "len_cl100k_base": 7307, "olmocr-version": "0.1.53", "pdf-total-pages": 19, "total-fallback-pages": 0, "total-input-tokens": 67186, "total-output-tokens": 8370, "length": "2e12", "weborganizer": {"__label__adult": 0.0006508827209472656, "__label__art_design": 0.0007686614990234375, "__label__crime_law": 0.00054168701171875, "__label__education_jobs": 0.050201416015625, "__label__entertainment": 0.00014126300811767578, "__label__fashion_beauty": 0.0003418922424316406, "__label__finance_business": 0.0011949539184570312, "__label__food_dining": 0.0007982254028320312, "__label__games": 0.0011301040649414062, "__label__hardware": 0.001071929931640625, "__label__health": 0.000530242919921875, "__label__history": 0.00046443939208984375, "__label__home_hobbies": 0.0002244710922241211, "__label__industrial": 0.0006389617919921875, "__label__literature": 0.0005536079406738281, "__label__politics": 0.00039005279541015625, "__label__religion": 0.000682830810546875, "__label__science_tech": 0.00479888916015625, "__label__social_life": 0.0003104209899902344, "__label__software": 0.00916290283203125, "__label__software_dev": 0.92333984375, "__label__sports_fitness": 0.0004398822784423828, "__label__transportation": 0.001018524169921875, "__label__travel": 0.0004277229309082031}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 39360, 0.0075]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 39360, 0.1635]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 39360, 0.94144]], "google_gemma-3-12b-it_contains_pii": [[0, 1942, false], [1942, 4446, null], [4446, 6771, null], [6771, 9616, null], [9616, 12424, null], [12424, 15186, null], [15186, 16537, null], [16537, 18084, null], [18084, 20989, null], [20989, 22206, null], [22206, 24875, null], [24875, 27564, null], [27564, 30474, null], [30474, 32604, null], [32604, 34955, null], [34955, 36271, null], [36271, 36319, null], [36319, 38655, null], [38655, 39360, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1942, true], [1942, 4446, null], [4446, 6771, null], [6771, 9616, null], [9616, 12424, null], [12424, 15186, null], [15186, 16537, null], [16537, 18084, null], [18084, 20989, null], [20989, 22206, null], [22206, 24875, null], [24875, 27564, null], [27564, 30474, null], [30474, 32604, null], [32604, 34955, null], [34955, 36271, null], [36271, 36319, null], [36319, 38655, null], [38655, 39360, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 39360, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 39360, null]], "pdf_page_numbers": [[0, 1942, 1], [1942, 4446, 2], [4446, 6771, 3], [6771, 9616, 4], [9616, 12424, 5], [12424, 15186, 6], [15186, 16537, 7], [16537, 18084, 8], [18084, 20989, 9], [20989, 22206, 10], [22206, 24875, 11], [24875, 27564, 12], [27564, 30474, 13], [30474, 32604, 14], [32604, 34955, 15], [34955, 36271, 16], [36271, 36319, 17], [36319, 38655, 18], [38655, 39360, 19]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 39360, 0.28481]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
00bd1c473df2ff7e66e6dd48991c5897be4e41f1
|
ABSTRACT
This paper conducts a systematic literature review of papers published in the proceedings of the International Conference on Performance Engineering (ICPE) and its predecessors. It provides an overview of prevailing topics within the community over time. We look at research and contribution facets that have been used to address these topics. Trends are outlined in terms of evaluation methods to validate contributions. The results are complemented with a geographical and organizational dimension. The paper concludes with a look at the top ten contributing countries and organizations for this purpose.
Categories and Subject Descriptors
A.1 [Introductory and Survey]; C.4 [Performance of Systems]
General Terms
Performance, Theory
Keywords
Systematic Literature Review, Performance Engineering, ICPE, WOSP, SIPEW, Performance Research
1. INTRODUCTION
Many researchers and industry practitioners around the globe have dedicated themselves to performance engineering, due to the complexity of this subject [5]. As a consequence, various workshops and conferences specialized on this field have been established. The principle conference that is focused on the performance of software systems and related questions is the International Conference on Performance Engineering (ICPE). The ICPE was established as a joint meeting of the ACM Workshop on Software and Performance (WOSP) and the SPEC International Performance Evaluation Workshop (SIPEW). In the scope of this conference, domain experts are invited to present and discuss state-of-the-art research results concerning performance measurement, modeling techniques, benchmark design and run-time performance management [1, 2, 3, 4, 11].
Although research in the field of performance engineering is not in its infancy anymore and numerous papers have already been published, a general overview of prevailing topics and methods within the community does not exist. To the best of our knowledge, there has not been any effort to systematically select, synthesize and review existing literature within the ICPE and its predecessors. Therefore, this gap is addressed in this work.
Performance engineering research at the ICPE and its predecessors is analyzed in a systematic literature review. The first WOSP took place in 1998 followed by six WOSP, one SIPEW and five ICPE events at the time of writing this paper. This work analyzes the proceedings of all these events and captures sixteen years of performance engineering research in total.
2. METHODOLOGY
The systematic literature review in this work is conducted following the guidelines provided by Kitchenham and Charters [8]. According to them, a systematic literature review is a “[...] means of identifying, evaluating and interpreting all available research relevant to a particular research question, or topic area, or phenomenon of interest”.
2.1 Research Questions
The initial task in a systematic literature review according to Kitchenham and Charters [8] is the definition of research questions (RQ). RQs in general are central drivers of this research methodology and consequently influence the research process heavily. As part of our study, the following three RQs will be answered:
- RQ 1: Which topics have been addressed in the papers published at the ICPE (respectively at its predecessors) in the time period from 1998 to 2014?
The goal of this RQ is to get an overview of different subjects that have been published and discussed at the ICPE, WOSP and SIPEW. Therefore, we investigate which specific topics are addressed more frequently by published papers.
and how this focus has shifted over the years. The answer for this RQ can be found in Section 3.1.
- **RQ 2**: Which research facets, contribution facets and evaluation methods have been used in papers published at the ICPE and its predecessors?
To answer this research question an overview of research and contribution facets is given in Section 3.2. This helps to get an overview of how researchers try to tackle topics outlined in Section 3.1. Furthermore, evaluation methods are outlined in Section 3.3 that have been used to validate different contribution types. Over the years, different types of evaluation methods have been established in the performance engineering domain. In this paper, methods are analyzed in terms of frequency, applicability to a certain topic and popularity within the performance engineering community.
- **RQ 3**: Who are the top ten countries and organizations in terms of the quantity of articles published at the ICPE and its predecessors?
RQ 3 aims to identify how papers published in the proceedings of the ICPE are distributed among countries and organizations. The resulting analysis includes research and publication activity, from these countries and organizations, at the ICPE and its predecessors from 1998 to 2014. First, the geographical perspective is outlined in Section 3.4. Second, the organizational perspective is addressed in Section 3.5.
### 2.2 Data Sources and Paper Selection
Only papers that have been published at the ICPE, WOSP and SIPEW were considered in a first step. The initial set of papers contained 471 publications in total. It was predominantly available online in the ACM Digital Library\(^1\) and SpringerLink\(^2\). Papers with illegible writing due to formatting issues such as overlapping characters were replaced. The replacements were taken from other digital libraries and were then checked on correspondence in order to avoid distorting the outcomes.
After having set a solid base for the research by establishing an initial set of papers, the set needed to be filtered to acquire meaningful results. The filter process was divided into two steps which had been specified with different goals and exclusion criteria. The first exclusion criterion (EC1) to be applied was the removal of all invited talks, keynote authors, type of report, year, conference) as well as specific information (organization, evaluation method, contribution facet, research type facet, lifecycle phase, domain, system under study).
### 3. RESULTS
In this section results of the systematic literature review are presented. The research questions outlined in Section 2 are answered in a chronological order.
#### 3.1 Topics at the ICPE
Obtaining a deep understanding of topics discussed at a conference and the evolution of these topics over several years is a difficult task. New technologies and trends have a constant influence on topics addressed by researchers. Thus, the focus of the conference is shifting from year to year. The N-Gram analysis is employed in this section to provide a solution to this problem and reveal trends within the conference from 1998 to 2014 [12, 6].
An N-Gram analysis is a technique used in the field of natural language processing for identifying the frequency of the occurrence of words or combinations of words [9]. An N-Gram represents a sequence of \( n \) words which is extracted from a body of text. For example, the phrase “software performance management” can be divided into three 1-Grams (“software”, “performance”, “management”), two 2-Grams (“software performance”, “performance management”), and one 3-Gram (“software performance management”).
In order to perform the analysis we follow the approach of Soper and Turel [12] and first establish a corpus of text. The corpus consists of the collection of 388 selected articles. All articles were available as PDF documents. We converted each document to a parsable text file. In order to prevent distortion of results we removed in several post-processing steps any unnecessary data such as author information, word lists, the bibliography, the appendix, page numbers and citation references. The resulting text files were then grouped by the year of the publication to enable an analysis run for each conference edition.
The N-Gram analysis is supported by a variety of tools. We used the freeware tool AntConc\(^3\) because it is easy to use and well documented. For each analysis run, the user can specify the minimum and a maximum length of N-Grams to be considered. When a sequence of words occurs more frequent than a single word, the sequence receives a higher rank within the analysis results. The results provided by the tool consist of the absolute frequency of each N-Gram. Since 388 papers have ultimately been moved to the data extraction process.
#### 3.2 Data Extraction and Synthesis
Data was extracted from a total of 388 included papers. Due to space limitations, we provide an online accessible list of the papers on our website\(^4\) instead of in this paper. We created a data extraction scheme in an Excel spreadsheet with respect to the previously stated RQs. This proved to be a necessary step as we could easily compute frequencies, filter for relevant information and analyze relationships between the different RQ findings. The scheme is divided into different sections. It contains generic (paper ID, title, authors, type of report, year, conference) as well as specific information (organization, evaluation method, contribution facet, research type facet, lifecycle phase, domain, system under study).
---
\(^1\)http://dl.acm.org/
\(^2\)http://link.springer.com/
\(^3\)http://pmw.fortiss.org/research/icpe/
\(^4\)http://www.antlab.sci.waseda.ac.jp/software.html
the relevance of each N-Gram depends on the size of the text corpus, we calculate relative frequencies and, thus, make the results for each conference edition comparable. Multiple occurrences of the same N-Gram within one article are counted separately. Since word classes such as articles are among the most frequent ones, a filtering needs to be performed to select only content-relevant keywords.
We first performed an analysis for each conference edition to include N-Grams with a minimum length of \( n=1 \) to include the most frequent keywords and maximum length of \( n=4 \) to limit the expense for the calculation. The most frequent N-Grams identified during the analysis have a length of \( n=1 \). However, some of the highest ranked results are of limited value for describing topics addressed by the research. Therefore, words such as performance, system, software, server, model or data were not considered. The frequency of the occurrence of these keywords remains constant over all conference editions\(^5\). Some of the most relevant topics and their evolution over time are shown in Figure 1.
The terms Power and Cloud are among the most frequent N-Grams in 2014. The frequency of the term Power first peaked in 2000 and then increased significantly after 2008. The term Cloud was first used in 2007 in a context different than cloud computing. Only in 2010 the term was first used in this context and its frequency continued to increase every year. During the transition from WOSP/SIPEW to ICPE between 2008 and 2010 only the terms Power and UML display a significant change.
To gain more insight on the conference topics we performed a second N-Gram analysis to include N-Grams with a minimum length of \( n=2 \) and maximum length of \( n=4 \). The top ten most frequent word combinations identified for each year at the WOSP/SIPEW and the ICPE are are shown in Tables 1 and 2. The values displayed in the tables represent relative frequencies. Among the most frequent N-Grams in 2014 are Energy Consumption and Power Consumption having an absolute frequency of 155 and 87 respectively. While used during every edition since 2002, the term Garbage Collection is included in 2014 for the first time in the top ten list having 58 occurrences.
### 3.2 Research and Contribution Facets
This section outlines different kinds of research and contribution facets of papers published in the WOSP/SIPEW and ICPE proceedings. It broadens the understanding of how researchers tried to address topics outlined in Section 3.1. Petersen et al. [10] propose a systematic map to classify and structure studies and their fields in the area of software engineering. For their map and its visualization they categorize studies in the following three different facets:
- Variability context facet - categorization for different topics among studies
- Research facet - classification for the type of research such as evaluations, proposals or experience papers
- Contribution facet - attribution of papers’ outcomes such as tools or models
Since we have already analyzed major topics in Section 3.1, the focus here lies on the research and contribution facets. For the research facet, Petersen et al. [10] differentiate between the following research types:
- Validation research - assessment of new techniques with example experiments
- Solution proposal - suggestion of a solution for an existing issue
- Philosophical paper - taxonomy or framework for existing subjects
- Experience paper - personal experience and guide for techniques in practice
- Evaluation research - assessment of already implemented techniques
- Opinion paper - personal opinion about methods and techniques
These categories are used to classify all papers that are included according to our selection in Section 2.2. The results are illustrated in Figure 2. If multiple research types were covered by a given paper, only the focused aspect was considered for the classification. Similarly to Petersen et al. [10], they are presented in a bubble chart showing the number of papers for each category with a corresponding bubble size. The research facets are aggregated per year to give an indication about the progress. Evaluation and validation research are the most common types with a total amount of 109 and 113 each and constantly appear over the years. Solution proposals are also very frequent since the second WOSP in 2000.
Petersen et al. [10] also considered the contribution facet of papers. Such contributions facets are methods, metrics, models, processes or tools [10]. All papers are classified according to their contribution facet. Since multiple papers contain two contribution facets and, thus, are counted twice, the total number of contribution types does not represent the total amount of papers. The results are presented in Figure 3. As before, a bubble chart is used to illustrate the amount of occurrences of each contribution in relation to the year.
The results show that each contribution facet is appearing at almost every edition. However, tools as well as processes are the minority group and occur several times not at all or only once per year. Methods represent the majority group and are contributed in 160 papers.
### 3.3 Evaluation Methods
In this section we investigate evaluation methods used for research and contribution facets outlined in the previous section. The methods are categorized according to the design science theory of Hevner et al. [7]. This theory categorizes evaluation methods based on the validation of IT artifacts. Each of these categories are described in Table 3. According to Hevner et al. [7], these “IT artifacts can be evaluated in terms of functionality, completeness, consistency, accuracy, performance, reliability, usability, fit with the organization, and other relevant quality attributes”. All papers are categorized according to these methods and the results are presented in Figure 4. The categorization is based on the evaluation type mentioned by authors of a paper. If multiple evaluation methods were applied in a given paper, only the focused method was considered for the classification.
The ICPE submissions can be categorized into nine out of twelve distinct evaluation methods: case studies, field studies, static analysis, architecture analysis, optimizations, controlled experiments, simulations, informed arguments and scenarios. No paper was submitted using one of the three remaining categories: functional testing, structural testing and dynamic analysis. It is important to note that Hevner et al. [7] refer to the performance of the artifact itself when he talks about dynamic analysis (e.g., performance). Thus, he does not refer to the performance of a system that is analyzed using the artifact.
The controlled experiment is the most popular evaluation method. 131 of 388 (34%) categorized papers used this evaluation method. The case study is equally popular with a maximum of 17 publications in 2008 and 90 publications (23%) in total. The interest in contributing research with this evaluation method decreases slowly while controlled experiments became more popular recently. A similar development can be observed for scenario based evaluations; both seem to get unpopular while controlled experiments are rising. Today, the most common methods (controlled experiments and case studies) count for over 55% of all papers.
Simulations were popular in the beginning and got a small renaissance in 2011. In the years from 2004 to 2010, this evaluation method was not very popular with only one to three papers per year. In the last years, the interest in simulation based evaluations is again slowly decreasing.
Informed arguments and static analysis are very exotic for ICPE papers. Field studies are rare, even if this method is used more frequently in 2014. We only found eight papers using architecture analysis as evaluation method and six papers in total using optimizations. We see that evaluation methods are more popular that rely on only one artifact, like case studies and controlled experiments. This evaluation popularity is related to the contribution facet of papers submitted to the ICPE. As seen in Section 3.2, model and method contributions are very popular. Such contributions require a case study or experiment as an evaluation. Process or tool contributions are easier evaluated in a field study for example but these contribution types are rare for the ICPE and its predecessors.
### 3.4 Geographical Perspective
A large number of different countries have contributed to publications over the years. In summary, 33 countries have been involved. Table 4 shows the top ten countries ranked by their total amount of publications. The metric publications includes exclusive as well as joint publications. If, for instance, one paper was published by three authors from USA and one from Germany, the number of publications will be increased by one for both countries since authors from both countries contributed to the publication. Therefore, the total amount of publications in Table 4 is not equal to the total amount of publications of all editions. Furthermore, the share of each country to the total amount of papers is listed.
#### Table 4: Top 10 contributing countries
<table>
<thead>
<tr>
<th>Rank</th>
<th>Country</th>
<th>Publications</th>
<th>Share</th>
<th>Cooperation</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>USA</td>
<td>130</td>
<td>33.51%</td>
<td>40</td>
</tr>
<tr>
<td>2</td>
<td>Germany</td>
<td>67</td>
<td>17.27%</td>
<td>23</td>
</tr>
<tr>
<td>3</td>
<td>Canada</td>
<td>61</td>
<td>15.72%</td>
<td>12</td>
</tr>
<tr>
<td>4</td>
<td>Italy</td>
<td>32</td>
<td>8.40%</td>
<td>23</td>
</tr>
<tr>
<td>5</td>
<td>UK</td>
<td>41</td>
<td>10.59%</td>
<td>14</td>
</tr>
<tr>
<td>6</td>
<td>Spain</td>
<td>20</td>
<td>5.15%</td>
<td>13</td>
</tr>
<tr>
<td>7</td>
<td>Australia</td>
<td>9</td>
<td>2.32%</td>
<td>2</td>
</tr>
<tr>
<td></td>
<td>Netherlands</td>
<td>9</td>
<td>2.32%</td>
<td>5</td>
</tr>
<tr>
<td>8</td>
<td>India</td>
<td>9</td>
<td>2.32%</td>
<td>2</td>
</tr>
<tr>
<td>9</td>
<td>Switzerland</td>
<td>9</td>
<td>2.32%</td>
<td>5</td>
</tr>
</tbody>
</table>
The first rank is represented by USA with 130 publications followed with a large distance by Germany and Canada with 67 and 61 publications respectively. An analysis of the number of papers published by countries hosting the ICPE indicates that hosting countries publish more papers that on average. Except for three events, the host countries have published twice as many papers than usual.
As publications include joint publications between countries, the number of papers in cooperation is listed in Table 4 as well. A remarkable value is presented by Canada, which counts 12 joint publications and, thus, presents the lowest proportion of papers in cooperation in relation to their publications with 20%. In contrast, Spain contributed 13 papers in cooperation and, therefore, has the biggest proportion with 65%.
3.5 Organizational Perspective
The evaluation of the research activity from an organizational perspective is performed in a similar way as in Section 3.4. Table 5 lists the top 10 contributing organizations ranked by the amount of publications. Its listed metrics publications, share and cooperation are defined in the same way as in Table 4, only applied to organizations instead of countries.
Carleton University constitutes the first rank with 38 publications. Rank two and three are placed by Karlsruhe Institute of Technology (KIT) and University of L’Aquila with 24 and 20 publications followed by Imperial College London and University of Rome Tor Vergata with 16 and 12 publications on rank four and five. Beginning from rank six, the remaining organizations count less than ten publications. Although the USA is ranked first by the number of publications, none of the top six organizations belongs to this country.
Trends found in Section 3.3 show that evaluations based on one IT artifact are very popular nowadays, while evaluations of multiple artifacts or architectures are rare. As contribution type methods and models are very popular, it would be of great benefit to use the results of such research to create tools and processes as well as to evaluate results in broader environments.
The data and conducted analysis in Section 3.4 indicate a positive influence on the number of publications of a country if it is the host of a conference. An explanation for this fact could be an increased amount of submissions due to lower travel costs. In our opinion, the conference organizers should consider this in order to increase the involvement of certain countries.
5. REFERENCES
|
{"Source-Url": "https://research.spec.org/icpe_proceedings/2015/icpe/p91.pdf", "len_cl100k_base": 4600, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 22604, "total-output-tokens": 5520, "length": "2e12", "weborganizer": {"__label__adult": 0.0005154609680175781, "__label__art_design": 0.0011234283447265625, "__label__crime_law": 0.0004303455352783203, "__label__education_jobs": 0.015533447265625, "__label__entertainment": 0.0002453327178955078, "__label__fashion_beauty": 0.00029277801513671875, "__label__finance_business": 0.0015697479248046875, "__label__food_dining": 0.0004773139953613281, "__label__games": 0.0012493133544921875, "__label__hardware": 0.0012331008911132812, "__label__health": 0.0010652542114257812, "__label__history": 0.0007925033569335938, "__label__home_hobbies": 0.00022614002227783203, "__label__industrial": 0.0006685256958007812, "__label__literature": 0.002025604248046875, "__label__politics": 0.0003266334533691406, "__label__religion": 0.0006651878356933594, "__label__science_tech": 0.21826171875, "__label__social_life": 0.0003159046173095703, "__label__software": 0.018951416015625, "__label__software_dev": 0.732421875, "__label__sports_fitness": 0.0004737377166748047, "__label__transportation": 0.0007257461547851562, "__label__travel": 0.00028324127197265625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23833, 0.04056]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23833, 0.42107]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23833, 0.93435]], "google_gemma-3-12b-it_contains_pii": [[0, 3606, false], [3606, 9371, null], [9371, 14335, null], [14335, 15530, null], [15530, 19721, null], [19721, 23833, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3606, true], [3606, 9371, null], [9371, 14335, null], [14335, 15530, null], [15530, 19721, null], [19721, 23833, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23833, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23833, null]], "pdf_page_numbers": [[0, 3606, 1], [3606, 9371, 2], [9371, 14335, 3], [14335, 15530, 4], [15530, 19721, 5], [19721, 23833, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23833, 0.12]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
05697c462ca55929ad94fae1f004093c0a74130c
|
A Timed Graphical Interval Logic
Abstract—We define a graphical language for expressing timed requirements on concurrent systems. This formal language, called Timed Graphical Interval Logic (TGIL), is inspired by realtime extensions of Dillon’s et al Graphical Interval Logic and can be used as an alternative to timed extensions of temporal logic. We define the semantics of TGIL as a set of timed traces—using a dense time semantics—and illustrate its use in formal verification by describing a method for generating an observer from a TGIL specification.
I. INTRODUCTION
An issue limiting the adoption of model-checking technologies by the industry is the ability, for non-experts, to express their requirements using the low-level languages used by model-checkers. In this paper, we define a Timed Graphical Interval Logic (TGIL), that is a formal graphical notation for expressing the timing constraints and behavioral properties of a reactive system.
Engineers frequently use diagrams to explain the behavior of a system or to describe desired scenarios. Nonetheless, such drawings usually suffer from the same drawbacks than requirements described using natural language: they can be ambiguous or misleading; they are not precise enough (do not cover all the cases); they are not amenable to automated transformation; … In this work, we take our inspiration from an existing graphical notation, the Graphical Interval Logic (GIL) of Dillon et al. [4], and extends it with two operators for expressing timing constraints. We show, with a simple example, that TGIL is more expressive than another realtime extension of GIL [6] that was already proposed.
Together with the definition of a formal semantics for TGIL, the main contribution of this work is to describe a method for generating “an observer” from a TGIL specification. These observers can be used in conjunction with a model-checking tool to check that the specification is valid on a given system.
Our main motivation in the design of TGIL was to define the semantics of a set of realtime specification patterns [2] using a graphical notation. This set of patterns extends the specification language of Dwyer et al. [5] with the ability to express hard, realtime constraints commonly found in the analysis of real-time systems. For example, the timed pattern “present $A$ after $B$ within $[d_1, d_2]$” express the requirement that event $A$ must occur within $d_1$ units of time (u.t.) of the first occurrence of $B$, if any, but not later than $d_2$. The semantics of patterns has already been defined using Metric Temporal Logic (MTL), a timed extension of linear temporal logic [7]. The idea is to provide an alternative formal definition based on TGIL.
We believe that this new approach may ease the work of engineers that are not trained with formal verification techniques. Moreover, our experience shows that being able to confront different definitions for the same pattern, using contrasting approaches, is useful for teaching patterns.
The remainder of the paper is as follows. Next, we describe the graphical notation for TGIL and define an equivalent textual syntax. In Section III, we define the semantics of TGIL using a satisfaction relation over timed traces. Before concluding, we study the expressiveness of our logic and describe a method for using TGIL as the property specification language in a model-checking problem.
II. TIMED GRAPHICAL INTERVAL LOGIC
We consider the problem of expressing behavioral properties and timing constraints on the execution of a reactive system. We assume that the execution of the system can be described using a combination of events and time delays. (We use letters $A, B, \ldots$ to denote a predicate on events.) Events should be understood as instantaneous actions involved in the evolution of the system: it can be an observable transition in the system; a process that changes its state; etc.
TGIL can be viewed as a real-time extension of the Graphical Interval Logic (GIL) of Dillon et al. [4]. An example of GIL diagram—that is also in TGIL—is given below.
A TGIL specification, or formula, is a diagram that should be read from top to bottom and from left to right. Our example depicts three main notions used in TGIL. For each notion, we briefly describe its graphical notation and propose an equivalent textual syntax.
1) Execution Context: every formula is expressed with respect to an execution context, displayed with a straight line $\rightarrow$, that represents a portion of an execution trace—a time interval—where the property is evaluated. The initial, top-most context symbolizes the whole execution trace, that is the time interval $[0, +\infty]$.
In this short abstract we only consider contexts that are “closed at their beginning and open at their end.” This convention, that is also followed in GIL, simplify the presentation and may help avoid problematic examples, such as Zeno behaviors. Nonetheless, our notation can be extended to handle unrestricted types of contexts.
2) Search: formulas and sub-contexts are built from searches, that define instants matching a given constraint in the current context; searches are displayed with a dashed arrow \(-\rightarrow\) and are decorated with (a predicate on) events. In our example, the first search starts from the beginning of the initial context (thus at time 0) and define the first time instant in the context where an event \(A\) occurs (say \(t_A\)). The second execution context is defined by the result of this search; it starts at time \(t_A\).
A search can be combined with a context in order to define a sub-context: from a context and a search point, say \(S\), we can define the context located after \(S\) (as we do in our example) or the context located before it (as shown with the diagram below). We use the notation \([\rightarrow A]\) for describing the first kind of context and the notation \([\phi \rightarrow A]\) for the other kind, where \(\phi\) is a TGIL specification.
\[
\text{\text{\text{\text{\begin{tabular}{c}
\(t\)\ldots \(t\) \\
\(\rightarrow A\)
\\
\(\text{\text{\text{\end{tabular}}}}\)}
}
}
\]
There are two kind of searches in our graphical notation: a weak search \((-\rightarrow\) and a strong search \((-\rightarrow\)) version. With a strong search, \([\rightarrow A]\), the formula is false if we fail to find an event matching \(A\) in the current context (if the search fails). At the opposite, with a weak search, the formula is true if the search fails. We show how to define the weak search version as a derived operator in our logic.
3) Formulas: a TGIL specification associates properties to contexts and search points in the diagram. For instance, the last (bottom) element in our example states that the formula \(\phi\) should be true somewhere/sometimes in the context \([t_A, t_B]\), where \(t_A\) and \(t_B\) are the dates associated to the pair of events \((A, B)\) matched in the previous searches. In our case, \(\phi\) can be a formula—expressed using another TGIL diagram—or simply a predicate on events. (The instant where \(\phi\) is true is materialized by the diamond, like in a search.)
As a consequence, our running example can be interpreted as follows: look for the first occurrence of an event \(A\). If there is any then find the following occurrence of \(B\). If no such occurrence exists the property is false. Finally, find an event, “in-between”, where \(\phi\) holds. For concision, we omit intermediate contexts when they can be inferred from the diagram. Thus, our example can be equivalently drawn:
\[
\text{\text{\text{\begin{tabular}{c}
\(t\)\ldots \(t\) \\
\(A\) \\
\(\rightarrow A\) \\
\(B\) \\
\(\phi\)
\\
\(\text{\text{\text{\end{tabular}}}}\)}
}
\]
The textual equivalent of this requirement could be written \([\rightarrow A]\ (\langle \phi \rangle \rightarrow B)\).
TGIL also provides a construction for expressing “punctual properties”—depicted using a triangle under a search point—that is a property relevant at this given instant in a context. For example, the first (boxed) part of the TGIL diagram below states that, at the instant \(A\) is false, then \(B\) is true (since it is a strong search, it also states that \(B\) must eventually happens). The textual equivalent of this formula is \([\rightarrow (\neg A)] B\).
\[
\text{\text{\text{\begin{tabular}{c}
\(\rightarrow (\neg A)\) \\
\(\Delta\) \\
\(B\) \\
\(\rightarrow (C \land \neg A)\)
\\
\(\Delta\) \\
\(C\)
\\
\(\text{\text{\text{\end{tabular}}}}\)}
}
\]
As we show in the previous diagram, TGIL formulas can be combined using boolean connectives and grouping—the graphical equivalent of parentheses—drawn using a boxed rectangle.
4) Timing Constraints: finally, TGIL provides two operators for adding timing constraints on formulas: an operator that bounds the delay between two instants; and an operator that restrict a context to a given time interval.
In the following, we use the symbol \(I\) as a shorthand for the time interval \([d_1, d_2]\).
The first operator, called time length, uses a “curly braces” notation, illustrated below. It states that the delay between the two instants materialized by the end of the brace is in \(I\).
\[
\text{\text{\text{\begin{tabular}{c}
\(B\) \\
\(A\)
\\
\(\rightarrow I\) \\
\(\in I\)
\\
\(\text{\text{\text{\end{tabular}}}}\)}
}
\]
We can use this operator to constrain the length (time duration) of a context. In particular, when the two instants are the boundaries of the same search—as it is the case in our example—we use the textual notation \([\rightarrow A \ldots]\) to state that the length of the search \(\rightarrow A\) is in \(I\). This restricted operator, denoted \(\text{Len}(I)\) in [6], is the only timing operator that was considered in a previous timed extension of GIL.
The second operator, time restriction, limits an execution context to a given time interval \(I\) (relatively to the starting point of the context). This operator is drawn using a combination of dotted and solid line. We give, as an example, the specification for the Present pattern described in Sect. I.
\[
\text{\text{\text{\begin{tabular}{c}
\(B\) \\
\(\rightarrow I\) \\
\(\text{\text{\text{\end{tabular}}}}\)}
}
\]
This formula can be interpreted as follows: assume that \(I_C\) is the time interval corresponding to the current execution context. If \(t_A\) is the instant of the first occurrence of \(A\) in \(I_C\), then search the first occurrence of \(B\) in the context \(I_C \cap [t_A + d_1, t_A + d_2]\). If \(I_C\) is bounded, its time restriction may be empty, in which case all posterior searches will fail.
To the best of our knowledge, the time restriction operator is totally new in the context of GIL. This operator is necessary to define the pattern Present \(A\) after \(B\) within \(I\) that we described in the introduction. Indeed, equipped with the \(\text{Len}\) operator alone, we can only detect if the first \(A\) following a \(B\) is within \(I\).
III. Formal Semantics
The semantics of a TGIL formula, \(\phi\), is defined as the set of timed traces that holds for \(\phi\). A timed trace \(\sigma\) is a (possibly
infinite) sequence of events and duration \(d(\delta)\) with \(\delta \in \mathbb{Q}^+\).
In the following, we use \(\varepsilon\) to denote the empty trace and, given a finite trace \(\sigma\) and \(a\) possibly infinite trace \(\sigma'\), we denote \(\sigma \sigma'\) the concatenation of \(\sigma\) and \(\sigma'\). This operation is associative.
The duration of a finite trace \(\sigma\), denoted \(\Delta(\sigma)\), is the sum of all its time delays. We extend this definition to infinite traces by defining \(\Delta(\sigma)\) as the limit of \(\Delta(\sigma_i)\) where \(\sigma_i\) are growing prefixes of \(\sigma\).
A TGIL specification can only be satisfied by execution traces that are either finite or that cannot block the passing of time: we say that a trace \(\sigma\) is well-formed if and only if \(\text{dom}(\sigma)\) is finite or \(\Delta(\sigma) = \infty\). With this restriction, we avoid problematic behaviors, such that an infinite number of events can occur in finite time, without forbidding time divergence. More generally, we consider execution traces up to “time equivalence”, \(\equiv\), that is the largest congruence such that the traces \(d(\delta_1 + \delta_2)\) and \(d(\delta_1)d(\delta_2)\) are equivalent. This relation preserves duration and guarantees that a well-formed trace is only equivalent to other well-formed traces.
We consider a finite set of propositional variables, \(A, B, \ldots\), that denote “atomic properties” of events \(\omega \in \Omega\). We use the expression \(\omega \in A\) to denote that the proposition \(A\) is true for \(\omega\). (By extension, we should also use \(A\) to denote a predicate over propositional variables.)
We define the semantics of TGIL using our textual syntax. Besides the propositional fragment, the main operators are the punctual formula \((A)\), the left and right searches; the sometimes(modality \((\diamond)\) and time restriction \((\downarrow)\).
\[
\phi ::= \neg \phi \mid \phi_1 \lor \phi_2 \mid A \mid [\downarrow A] \phi \mid \langle \phi \rangle_{\downarrow A} \mid (\diamond \phi) \mid (\downarrow \diamond \phi)
\]
We use the satisfaction relation \(\sigma \models \phi\) to denote that the formula \(\phi\) holds for \(\sigma\). In this definition, we use \(\sigma_I\) to denote the sub-trace of \(\sigma\) restricted to the time interval \(I\) and the notation \(\sigma \equiv A\sigma'\) as a shorthand for the condition \(\sigma \equiv \omega \sigma'\) and \((\omega \in A)\).
\[
\begin{align*}
\sigma &\models \neg \phi & \text{iff} & \text{not } \sigma \models \phi \\
\sigma &\models \phi_1 \lor \phi_2 & \text{iff} & \sigma \models \phi_1 \lor (\sigma \models \phi_2) \\
\sigma &\models A & \text{iff} & (\sigma \models \phi_1) \lor (\sigma \models \phi_2) \\
\sigma &\models [\downarrow A] \phi & \text{iff} & \sigma \equiv A\sigma' \\
\sigma &\models \langle \phi \rangle_{\downarrow A} & \text{iff} & \sigma \equiv \sigma_1 \sigma_2 \land A \not\equiv \sigma_1 \land \Delta(\sigma_1) \in I \land \sigma_2 \models \phi \\
\sigma &\models (\diamond \phi) & \text{iff} & \exists \sigma_1, \sigma_2 \cdot \sigma \equiv \sigma_1 \sigma_2 \land A \not\equiv \sigma_1 \land \Delta(\sigma_1) \in I \land \sigma_2 \models \phi \\
\sigma &\models (\downarrow \diamond \phi) & \text{iff} & \sigma_I \models \phi
\end{align*}
\]
This definition is quite similar to the satisfaction relation for Linear Temporal Logic (LTL). In particular, TGIL is an instance of a linear time logic, in the sense that it cannot be used to reason on “several possible timelines” simultaneously.
IV. DERIVED OPERATORS AND SYNTHESIS OF OBSERVERS
We can define additional logical operators that are useful for defining properties. The “true” formula, \(\top\), can be encoded by any tautology, such as \(A \lor \neg A\). Another example of derived formula is \((\downarrow (\diamond \phi))\), which defines a property that is satisfied if \(\phi\) holds sometimes in \(I\). We use the notation \((\downarrow (\diamond \phi))\), for this derived operator, to stress the direct relationship with MTL.
It is possible to derive the weak search operator from the strong search version. Indeed, the formula \([\downarrow A] \phi\) is true if and only if \([\downarrow A] \phi\) is true or the search for \(A\) fails in the context \(\downarrow I\). Put another way, if we can find \(A\) in \(\downarrow I\) then \([\downarrow A] \phi\) should be true: \([\downarrow A] \phi\) def \((\downarrow I) \Rightarrow [\downarrow A] \phi\).
We already showed how to use TGIL for defining the semantics of the Present pattern (see [2] for a complete catalog of timed patterns). We now look at an example of response pattern, used to express “cause–effect” relationship, such as the fact that a triggering event must be followed by a response in a bounded time. The pattern \(A \rightarrow B\) within \(I\) holds for all timed traces where every occurrence of \(A\) is followed by an occurrence of \(B\) within \(I\) (we only consider the first occurrence of \(B\) after \(A\)). Alternatively, we can define the semantics of the leadsto pattern with the diagram:
\[
\text{that is with the formula: } (\Box [\downarrow A] [\downarrow B] \top),
\]
Next, we describe a method for using TGIL as the property specification language for model-checking. We follow an observer-based approach, meaning that the relationship between a model and its specification is interpreted as the composition of the model with an observer of its behavior. More precisely, we consider systems defined using Time Transition Systems (TTS), an extension of Time Petri Nets with data variables and priorities. (See e.g. [1] for the semantics of TTS.) TTS models can be checked using selt, an SE-LTL model-checker provided in the Tina toolbox (http://projects.laas.fr/tina/).
The idea is to synthesize a TTS model (an observer) from a TGIL specification; to generate the state space of the system composed with its observer; and to test the satisfaction of a simple reachability property. Due to space limitation, we only show the result of applying our method on a specific example, the Present pattern: \([\downarrow B] (\downarrow I [\downarrow A] \top)\). The model-checking problem for TGIL, in its entirety, is undecidable. Nonetheless, the method that we describe here could be applied to any “positive” formulas, that is formulas without negation.
We define some conventions used when defining the observers for the formula \([\downarrow B] (\downarrow I [\downarrow A] \top)\). The observer will be a Time Petri Net without places, see Fig.1 (this net is composed with transitions in the observed system, that may have associated places). The observer uses boolean
variables to encode the “state” of every operator in the formula: foundB is true after the search for B succeeds (the top operator is $[\rightarrow B] \ldots$); startI is true after the beginning of the time restriction (\$$\land I$$) ($d_1$ u.t. after foundB is true) while endI is true after $d_2$ u.t.; foundA is true after the search for A succeeds.
In our encoding, each variable is set by a distinct transition. Transitions have a precondition, $\text{pre}$, that is a predicate over variables. The precondition should be true for the transition to be enabled. Symmetrically, each transition has an action, act, that is evaluated when the transition is fired. In the observer of Fig. 1, SI and EI are transitions that belong to the observer, whereas A and B are transitions that will be composed with the “events” A and B in the observed system. We also make use of priorities (dashed arrows between transitions) in order to give the precedence to transitions belonging to the observer.
The property holds if the search for $B$ fails or if we find an $A$ while the predicate ($\text{foundB} \land \text{startI} \land \neg \text{endI}$) is true. Therefore, to check if the pattern holds for the system, it is enough to check the reachability property (we express the property in LTL): ($\Diamond \text{foundB}$) $\Rightarrow$ ($\Diamond \text{foundA}$).
V. RELATED WORK AND CONTRIBUTIONS
We have defined the semantics—as well as both graphical and textual notations—for TGIL, an extension of the Graphical Interval Logic (GIL) of Dill et al [4]. Another real-time extension of the Graphical Interval Logic, called RTGIL, has been proposed by Dillon et al. [6]. RTGIL extends GIL by adding the equivalent of our “time length” search operator, $[\rightarrow I] \phi$. In comparison, TGIL is more expressive since it provides an operator for time constrained search, $(\land I \phi)$, that is not derivable in RTGIL. For example, the timed pattern present A after $B$ within $I$ can be expressed in TGIL, but not in RTGIL.
Other works propose graphical notations for expressing behavioral properties. Most of these proposals are based on informal diagrammatic notations, such as UML, or are not concerned with verification.
Apart from the work on GIL, that we mentioned extensively, Alfonso et al. [3] define Visual Timed event Scenarios (VTS), a graphical language to define complex requirements using annotations on a partial order of event. It is possible to express timing constraints using VTS but some simple requirements cannot be expressed, such as the fact that a given event, say $A$, should be true for a duration of $d$. This requirement corresponds to the formula: $(\Diamond [0, +\infty) (\square [0,d] A))$ in TGIL. Concerning tooling, another reference is the TimeEdit tool [9], that is based on timeline diagrams. TimeEdit specifications can be compiled into Büchi automata—just like LTL—and used with the Spin model-checker. Nonetheless, timeline diagrams do not directly support the definition of timing constraints.
The usefulness of TGIL goes beyond the definition of timed patterns. It is also a good candidate to replace timed extensions of temporal logic and study their decidable fragments. For example, the timed search operator of TGIL is reminiscent of the $\triangleright_r$ operators defined in the State Clock Logic (SCL) of Raskin and Schobbens [8], a decidable, realtime extension of PTL.
In this short abstract, we have defined a timed extension of Dillon’s et al Graphical Interval Logic (TGIL) that is more expressive than previous proposals. The semantics of TGIL can be easily defined using an equivalent “textual notation” that may facilitate (such as e.g. proving the consistency of partial proof systems) We show how to apply TGIL for the definition of a realtime specification language and give an example of the synthesis of an observer. A limitation of this approach is that TGIL is essentially a linear time logic (it expresses constraints on traces), whereas some properties may require a branching time extension. In future work, we plan to enrich the logic with more expressive timing constraints and to study their interaction with a branching time variant of TGIL. We also plan to extend our compilation of diagrams into TTS observers to a larger subset of TGIL.
REFERENCES
|
{"Source-Url": "http://homepages.laas.fr/dalzilio/ABSTRACTS/Papers/rtgil.pdf", "len_cl100k_base": 5378, "olmocr-version": "0.1.53", "pdf-total-pages": 4, "total-fallback-pages": 0, "total-input-tokens": 16709, "total-output-tokens": 6464, "length": "2e12", "weborganizer": {"__label__adult": 0.0003962516784667969, "__label__art_design": 0.0005922317504882812, "__label__crime_law": 0.0005507469177246094, "__label__education_jobs": 0.0005707740783691406, "__label__entertainment": 0.0001239776611328125, "__label__fashion_beauty": 0.00019109249114990232, "__label__finance_business": 0.00027251243591308594, "__label__food_dining": 0.0004940032958984375, "__label__games": 0.0007739067077636719, "__label__hardware": 0.0011091232299804688, "__label__health": 0.0006685256958007812, "__label__history": 0.0003104209899902344, "__label__home_hobbies": 0.00012195110321044922, "__label__industrial": 0.0007829666137695312, "__label__literature": 0.0004634857177734375, "__label__politics": 0.00041794776916503906, "__label__religion": 0.0006465911865234375, "__label__science_tech": 0.1014404296875, "__label__social_life": 0.00011682510375976562, "__label__software": 0.00879669189453125, "__label__software_dev": 0.8798828125, "__label__sports_fitness": 0.00035691261291503906, "__label__transportation": 0.0008416175842285156, "__label__travel": 0.00024580955505371094}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23800, 0.00552]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23800, 0.43916]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23800, 0.85632]], "google_gemma-3-12b-it_contains_pii": [[0, 5038, false], [5038, 11236, null], [11236, 17965, null], [17965, 23800, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5038, true], [5038, 11236, null], [11236, 17965, null], [17965, 23800, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23800, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23800, null]], "pdf_page_numbers": [[0, 5038, 1], [5038, 11236, 2], [11236, 17965, 3], [17965, 23800, 4]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23800, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
e9e0b16c2f572e74e14b15983736a8939f8773c6
|
Object-Model Transfer in the General Video Game Domain
Alexander Braylan, Risto Miikkulainen
Department of Computer Science, The University of Texas at Austin
Abstract
A transfer learning approach is presented to address the challenge of training video game agents with limited data. The approach decomposes games into objects, learns object models, and transfers models from known games to unfamiliar games to guide learning. Experiments show that the approach improves prediction accuracy over a comparable control, leading to more efficient exploration. Training of game agents is thus accelerated by transferring object models from previously learned games.
Introduction
Reinforcement learning methods have achieved high levels of performance across a broad spectrum of games but often require large amounts of training data (Hausknecht et al. 2014; Mnih et al. 2015). Learning forward models of an agent’s environment can reduce the amount of required training and improve overall performance and flexibility. A forward model is a function that predicts the future state of an environment from its current state. However, when the data used to train a model is sparse, noisy, or high-dimensional, the model is at risk of suffering from generalization error in predictions made outside of the data seen during training. For example, the first few frames of a new game an agent observes may not be sufficient to inform the agent about how the game will behave later on.
One field of research that may help address the problem of generalization error is transfer learning (Taylor and Stone 2009; Pan and Yang 2010), the reuse of knowledge and skills learned in source tasks to accelerate and improve performance in a different target task. Applied to video games, the idea is that an agent with ample experience playing various source games can learn a better model of a new target game by transferring and combining knowledge from the source games. This paper considers the role of this combined transferred knowledge in forming an inductive bias — an assumption that constrains the space of possible models, and a way to guard against generalization error (Mitchell 1980).
The first challenge in transfer learning is mapping between variables in the source and target environments. A second challenge is integrating and applying the transferred knowledge. This paper responds to both questions in the context of general video game playing. The key step taken toward the first challenge is to decompose game environments into collections of objects. In an object-oriented formulation of a game environment, objects belong to object classes exhibiting similar behaviors across a wide variety of games (Diuk, Cohen, and Littman 2008). The variables of an object class are interpreted in the same way regardless of the game, simplifying the question of variable mapping.
The approach of this paper toward the second challenge is to construct transfer ensembles out of models transferred from source games and scratch models newly trained for the target game. Each transfer ensemble uses a weighted average of predictions from its constituent models to predict the behavior of a target object. The weights are calculated based on how well each constituent model describes the data observed for the target object class. An important final step is retraining the source models to better fit target data.
Experimental results show that agents using transfer ensembles as models of object classes generalize better than using scratch models. After observing small quantities of in-sample training data, transfer ensembles achieve greater accuracy than scratch models when predicting the behavior of objects in subsequent out-of-sample test data. Agents that use learned models to inform their actions in an exploration task are shown to perform better when using the transfer learning approach than when learning from scratch.
Altogether, the conclusion is that decomposing environments into objects and transferring object models across games is a promising approach for learning to play video games from small amounts of experience.
Background
This paper draws from research in general video game playing, model-based reinforcement learning, and transfer learning. Each is a broad field of research, so this section will review the topics most relevant to this work.
General Video Game AI
General Video Game AI (GVG-AI) is an open-source project that facilitates artificial intelligence research in general video game playing (Schaul 2013; Perez-Liebana et al. 2016). The GVG-AI project provides a framework for agents to interact with games and includes 60 games hand-coded in source games. This paper considers the role of this combined transferred knowledge in forming an inductive bias — an assumption that constrains the space of possible models, and a way to guard against generalization error (Mitchell 1980).
the Video Game Description Language (Ebner et al. 2013). The games are similar to games from the Atari 2600 console and other popular video games, including games inspired by Space Invaders, Frogger, Zelda, Lemmings, Seaquest, Sokoban, Dig Dug, Pacman, Star Fox, Plants vs. Zombies, among many others. Borrowing from several genres of video games presents agents with a wide diversity of challenges.
Additionally, there are a few advantages to using GVG-AI over an Atari emulator. GVG-AI objects can exhibit stochastic behavior. For Atari, stochasticity can so far only be added artificially to the initial game state or to the actions input by the player (Hausknecht and Stone 2015). Furthermore, each game in GVG-AI includes several levels with different initial conditions. These features allow for straightforward out-of-sample testing, crucial for measuring generalization error. Therefore, the experiments in this paper use the GVG-AI framework and games.
**Model-Based Reinforcement Learning**
Reinforcement learning problems challenge agents to take actions in response to observations of an environment in order to accumulate rewards over time (Sutton and Barto 1998). In the most common case, the environment is formally a Markov decision process (MDP), which consists of a set of states, actions, and a probabilistic transition function. This function governs the distribution of subsequent states given every current state and action. Model-based reinforcement learning methods rely on an estimate of the transition function. In contrast to model-free methods, they have rich representations of the environmental dynamics. Such representations yield various benefits: data efficiency, better planning and exploration, and robustness against changes in the reward function (Atkeson and Santamaria 1997; Asmuth and Littman 2011). Most approaches to model learning for high-dimensional environments use factored state representations, learning approximate transition functions on a manageable number of features of the state space.
**Factored-State Model Learning for Video Games**
Because video games are high-dimensional environments, the only approaches that learn models of video games use factored state representations. One approach to learning models of Atari games by Bellemare et al. (Bellemare, Veness, and Bowling 2013) predicts patches of pixels from neighboring patches using a compression algorithm, taking advantage of the tendency of game objects to depend only on nearby objects. Alternatively, a deep learning approach by Oh et al. (Oh et al. 2015) uses convolutional neural networks on pixel inputs from the entire game screen to predict future pixel values.
While some research exists on learning factored models to make the most out of few training samples (Degris, Sigaud, and Wulffelen 2006; Hester and Stone 2013; Jong and Stone 2007), both papers on model learning for video games focus on the scalability and power of the models rather than on sample efficiency. The neural networks used by Oh et al. trained on 500,000 frames per game, while the models in Bellemere et al. trained on 10 million frames. This paper investigates training on as few as 10 frames.
**Object-Oriented Markov Decision Process**
An Object-Oriented Markov Decision Process (OO-MDP) is a factorization that exploits the object-oriented nature of many reinforcement learning problems by re-framing the environment as a collection of objects (Diuk, Cohen, and Littman 2008). Compared to the high dimensionality of the full game state, the object-oriented environment is represented only by the relatively few attributes of each object. These attributes include the object’s internal state variables and variables representing relations with other objects. For example, geographic relationships are encoded by first-order propositions \( \text{on}(o_1, o_2), \text{touch}^\text{x}(o_1, o_2), \text{touch}^\text{y}(o_1, o_2), \text{etc.} \). Each object belongs to an object class; all instances of the same object class are assumed to follow the same transition function, thus only one model is needed for each object class. The assumption that many of these object classes are similar over multiple games is one motivation for choosing the object-oriented factorization for transfer learning.
**Transfer Learning**
The transfer of models between tasks is related to the theory behind choosing a good inductive bias. When a learner can sample from multiple related tasks, an inductive bias that works well for several of those tasks can be expected to work well in other related tasks (Baxter 2000). For example, learning multiple related tasks at the same time with some shared learning parameters can be better than learning each task individually (Caruana 1997). Similarly, source knowledge can inform the selection of inductive bias in target tasks. Some such approaches involve the use of an ensemble model, a weighted combination of source models where the weights depend on how well each source model predicts the target data (Dai et al. 2007; Gao et al. 2008; Eaton and DesJardins 2009). This is the type of approach taken in this paper.
**Approach**
This section first presents a method for learning a forward model of the transition function of each object class from scratch in GVG-AI games. It then presents a transfer learning method for reusing scratch models to learn models more quickly for new objects in target games.
**Learning Object Models from Scratch**
A forward model \( F_j \) of an object class \( j \) is a function that generates a prediction \( \hat{S}_t^i = F_j(S_{t-1}^i) \) for the state \( S_t^i \) of object instance \( i \) (belonging to object class \( j \)), given its previous state \( S_{t-1}^i \). The state \( S_{t-1}^i \) includes the object instance’s internal variables as well as global state variables such as the player action \( A_{t-1} \). Learning a model involves using observed data to alter the parameters of the model so as to improve its prediction accuracy. The three major decisions for specifying a model learner are on the model variables, the model’s functional form, and the learning algorithm, described in detail in the rest of this subsection.
**Model Variables: Object Class Attributes**
In addition to a visual display, GVG-AI reports a list of all objects in the
game at each frame. For each of these objects, it discloses the occupied tile – or x and y position – as well as a token representing the object’s class used for grouping different instances of the same class within a game.
The above position and object class information are sufficient to extract a set of attributes that capture most of the observable object behaviors in GVG-AI games. The most common behaviors encountered include deterministic, stochastic, and player-controlled movement; death; and spawning of other objects on the same tile. Spawning is a novel extension of the OO-MDP formulation to capture the effect of a new object instance appearing in a game.
The predicted next state of an object instance g consists of the following output attributes:
- Directional movement (North/South/East/West) at time t, $M_t = \{m_{t}^{N}, m_{t}^{S}, m_{t}^{E}, m_{t}^{W}\}$;
- Whether the object is alive and on screen, $e_t$; and
- New spawns of other objects on the tile of this object, $N_t = \{n_{t}^{C(i)} : \text{spawn}_t(i), \text{on}_t(g, i)\}$.
To clarify how the spawn attributes are managed, the proposition $\text{spawn}_t(i)$ denotes whether an object $i$ is a spawn – a newly observed object instance in a game – at time $t$. Every spawn observation is recorded as $n_{t}^{C(i)} = 1$ for every other object on the same tile as the spawn, with $C(i)$ denoting the object class of object $i$. For example, when a new bomb object appears on the same tile as an alien object, that alien object takes a value of 1 for the attribute $n_{t}^{\text{bomb}}$.
In addition to the above output attributes, the following input attributes account for factors upon which the predicted behaviors are conditioned:
- Directional movement at time $t-1$, $M_{t-1} = \{m_{t-1}^{N}, m_{t-1}^{S}, m_{t-1}^{E}, m_{t-1}^{W}\}$;
- Other objects touching the object $H_{t-1} = \{h_{t-1,D,C(i)} : \text{touch}_t(g, i), D \in \{N, S, E, W, ON\}\}$;
- Whether the object was facing in the direction of its last movement, $f_{t-1}$; and
- Action input by the player, $A_{t-1} = \{a_{t-1}^{\text{NIL}}, a_{t-1}^{\text{UP}}, a_{t-1}^{\text{DOWN}}, a_{t-1}^{\text{LEFT}}, a_{t-1}^{\text{RIGHT}}, a_{t-1}^{\text{SHOOT}}\}$.
For example, whenever an object instance is adjacent or overlapping another object instance of a different class, its $h$ attribute corresponding to the other object’s class and relative position takes a value of 1.
In addition to object class models, termination models can be learned for predicting whether the game is won or lost at each frame from some global game variables. Termination models are not deeply explored in this paper but are helpful for experiments involving action selection. The two termination models, $P(\text{WIN}|X)$ and $P(\text{LOSE}|X)$, are conditioned on the following inputs:
- Existence of at least one live object instance of each class in the game, $X_{t-1} = \{x_{t-1}^j : \text{exist}_t-1(j)\}$.
Each $x^j_t$ represents whether any instance of the game’s object class $j$ exists at all at the given time. This input is used because termination often coincides with the total disappearance of one of the game’s object classes.
**Functional Form and Learning Algorithm** In a factored state model, the prediction $\hat{S}_t$ of the next state of an object is decomposed into predictions for each output variable $s^k_t \in \hat{S}_t$. All of the specified object variables take values of either 0 or 1. The values of variables not observed by an object are 0 by default. The factored-state model produces a prediction between 0 and 1 for each output variable of an object instance. Each prediction represents the probability of the output variable taking a value of 1 given the observed input values. A logistic regression model is trained for each output variable of each object class using observations of all instances of the object class in a game, depicted in the first two thirds of Figure 1.
A logistic regression output is a sigmoidal function of its weighted inputs, taking the form $s_{t}^{k} = \frac{1}{1 + e^{-w_{t}^{k} \cdot x_{t}^{k}}}$. The weight vector $W$ consists of coefficients to the input variables and an intercept term which are trained through gradient descent. The gradient descent algorithm iteratively decreases a cost computed from the values of observed $S_t$ and predicted $\hat{S}_t$. Weights are gradually changed in the direction of the partial derivative of this cost with respect to the weights so as to reduce the cost. The cross entropy error $E_t = -\sum_{k} (s_{t}^{k} \ln s_{t}^{k} + (1 - s_{t}^{k}) \ln (1 - s_{t}^{k}))$ is used as the cost function to ensure convergence near a global minimum.
During gradient descent training, data points are presented in random order at each iteration to avoid biasing the learned model.
**Object Model Transfer**
The transfer learning approach in this paper relies on a simple and intuitive assumption: Some object classes encoun-
tered in a target should behave similarly to other object
classes previously encountered in sources. Therefore, when
reasoning about an unknown target object, knowledge of
previously seen similar source objects can help constrain
and shape the distribution of predictions for the target ob-
ject’s behavior. The measure of similarity depends on what
is known about the target object, what is known about the
source objects, and the ability to establish relationships be-
tween attributes of the different objects. This assumption
forms an inductive bias which should help trained models
generalize better to unseen target data.
The bottom third of Figure 1 is a sketch of how this ap-
proach uses source models to train transfer models. The fol-
lowing example serves to illustrate more tangibly how object
class models can be transferred.
An Illustrative Example of Walls and Avatars In the
game Chase, the player-controlled avatar must chase goats
by moving freely in four directions except when blocked by
wall objects. These movement rules for the avatar are com-
mon in several other games, such as the game Escape. In
Escape, the avatar is again moved in four directions by
the player and is blocked by walls. The Escape avatar can also
push away box objects and disappear through hole objects.
A transfer-learning agent who has played many games of
Chase but has only seen a few frames of Escape should be
able to reuse specific knowledge from Chase to make more
accurate predictions about Escape than a total novice.
Upon encountering a wall for the first time in Escape, a
novice agent with no Chase experience would have low
certainty on the outcome of an attempt to move the avatar
into the wall. In contrast, a transfer learning agent could
notice some similar behavior between the Chase and Escape
avatars — such as how the player inputs move both of them
in similar directions — and reason that the interaction with
the wall is also likely to be the same in both games.
The transfer learning method presented in this paper pro-
duces models that make predictions as described above
when the source is Chase and the target is Escape. How-
ever, transferring object class models is not always so sim-
ple for all sources and targets. The following subsections
explain additional challenges encountered and how they are
addressed.
Source Knowledge Selection One objective for a trans-
fer learning algorithm is an ability to choose automatically
what knowledge to transfer from a potentially large pool of
source models. Transferred knowledge may harm rather than
improve performance in the target task, an outcome called
negative transfer (Taylor and Stone 2009). In order to re-
duce negative transfer, the transfer learning algorithm may
select its sources according to their expected contribution to
performance.
This paper uses a measure of one-frame forward predic-
tion accuracy to evaluate learned models, both for guid-
ing source selection and for overall evaluation. Prediction
accuracy of an object class model \( F \) on object transition
data \( S = \{S_1, S_2, \ldots, S_T\} \) is calculated as accuracy\((F, S) = \frac{1}{T} \sum_{t=2}^{T} \text{equal}(S^t, F(S^{t-1}))\), where equal\((S, \hat{S}) = 1\) if for
all output attributes \( k, \hat{s}_k = \text{round}(s_k) \), and 0 otherwise.
This measure can also serve to evaluate the goodness of
fit of a source model \( F_{\text{SRC}} \) to target data \( S_{\text{TRG}} \), referred
to in this paper simply as the fitness of SRC to TRG, \( = \text{accuracy}(F_{\text{SRC}}, S_{\text{TRG}}) \). These accuracy measures range from
0 to 1, with higher values denoting better models.
The fitness measure serves to estimate which source mod-
els are likely to transfer well to target object classes. A
source selection algorithm might additionally use other mea-
sures such as the visual similarity of the icon used to repre-
sent the object or the frequency at which the object class
model successfully transfers to other object classes. Such
additional measures could further improve performance on
source selection but are left to future research.
Target Model as Ensemble of Source Models The al-
gorithm for transferring object class models is described
detail in this subsection. The algorithm starts with sev-
oral trained object class models from source games. Then
it observes some frames in a new target game. Some of the
source models should predict later observations of the tar-
get game objects more accurately than new models trained
from scratch on the observations made so far. Specifically,
the assumption is that source models with high fitness to the
target data should be more useful than those with low fit-
ness. Therefore, for each object class in the target game, the
algorithm builds a transfer ensemble out of both the pool of
source models and the new scratch target model. The basic
ensemble used is a forward model that makes predictions
based on the weighted sum of its constituent forward mod-
els’ predictions. Each of its constituents is assigned a weight
as follows:
1. The scratch target model gets a nominal weight of 1.
2. Each source model \( j \) gets a nominal weight equal to
\((b_j - a/2)\), where \( b_j \) is the source model’s fitness and \( a \)
is the scratch accuracy. Subtracting a portion of the scratch
accuracy increases the relative strength of weights given
to fitter source models.
3. Source models with non-positive weights are dropped.
4. The final weights are normalized by the sum.
5. The source models are retrained by adjusting their inter-
nal coefficients through the same gradient descent method
used for their initial training, minimizing prediction error
on the new target data while leaving intact the parts of the
source models uninformed by target data.
The transfer ensemble is expected to predict target objects
in out-of-sample data better than the scratch target model
alone because of the inductive bias — the ensemble is bi-
ased toward models that work in other games. Retraining
improves the accuracy of the transfer ensembles and reduces
the number of cases and severity of negative transfer.
Experiments and Results
Out of the 60 GVG-AI games, 30 were used for exploratory
testing and tuning of the system, while the other 30 were
withheld for experiments. The reason for this division was to
prevent bias from corrupting the results of the experiments.
The first experiments test the generalization ability of transfer ensembles compared to models learned from scratch. The hypothesis is that, after observing a small amount of in-sample training data from target games, transfer ensembles achieve higher accuracy on out-of-sample target data than scratch models. Initially, source models are learned from scratch using 500 frames of each source game. Then, for each of the 30 target games, scratch and transfer models are trained on 10 frames of target data. Each transfer model is an ensemble composed of object models from one of three disjoint sets of six randomly selected source games. The target game is never in the set of source games used for transfer. The ensemble is built according to the method described in the section above, using target training data both to calculate each source model’s fitness score and to retrain the models. After the 10 frames of training, 100 out-of-sample testing frames are produced from a different level of the target game, and accuracy is measured for each object class model produced by the scratch and transfer methods. All player actions are selected randomly.
The main measure of success is the outperformance in forward prediction accuracy of transfer models over scratch models in the testing frames for each object class in each target game. To reject the null hypothesis of the differences being due to chance, a $t$-statistic is used to compute a one-sided $p$-value. A total of 500 object class models from 30 games are tested.

Figure 2: Test accuracy for scratch models versus transfer models. Points represent object classes, which come from all of the target games. Points appearing on the line or in the top-left half of the plot indicate transfer does as well or better than scratch. Transfer outperforms scratch for many object classes, such as the avatar in Bait and the portal in Aliens, and never significantly reduces accuracy.
Table 1 shows that the average increase in accuracy is statistically significant, and it can be concluded that the transfer ensemble approach for learning models of object classes is sound. Figure 2 displays how the transfer ensemble models compare in out-of-sample accuracy against models trained from scratch. Each dot represents one object class model; scratch and transfer perform equally when a dot falls on the line, transfer outperforms when a dot is in the upper-left, and scratch outperforms when a dot is in the lower-right.
<table>
<thead>
<tr>
<th>$T$</th>
<th>$S$</th>
<th>$\mu_s$</th>
<th>$\mu_t$</th>
<th>$\mu_s^a$</th>
<th>$\mu_t^a$</th>
<th>$t$</th>
<th>$p$</th>
</tr>
</thead>
<tbody>
<tr>
<td>10</td>
<td>1</td>
<td>0.90</td>
<td>0.92</td>
<td>0.74</td>
<td>0.80</td>
<td>3.53</td>
<td><1%</td>
</tr>
<tr>
<td>10</td>
<td>2</td>
<td>0.92</td>
<td>0.94</td>
<td>0.75</td>
<td>0.82</td>
<td>3.46</td>
<td><1%</td>
</tr>
<tr>
<td>10</td>
<td>3</td>
<td>0.91</td>
<td>0.92</td>
<td>0.78</td>
<td>0.85</td>
<td>3.70</td>
<td><1%</td>
</tr>
</tbody>
</table>
These graphs show that improvement is consistent across many games with rare occurrences of negative transfer.
As shown in Table 1, the average difference between scratch and transfer performance is only about two percent. However, this average difference understates the significance of the improvement. Many object classes are easy enough to model that scratch achieves perfect accuracy. For example, wall objects never move and are modeled with perfect accuracy by scratch in all the tested target games. More important is the improvement in object classes that are hard to model, such as the avatar, which behaves with varying complexity depending on the game. Transfer achieves higher accuracy of about seven percent for the avatar models. Furthermore, Table 1 shows that the improvement is consistent using all three sets of source games. Overall, these results strongly support the hypothesis that the transfer method of this paper leads to improved out-of-sample accuracy for many object classes, with very little negative transfer.
The final experiments test how well scratch and transfer models perform relative to each other and relative to a random action-taking agent on the task of exploring the environment. Agents perform this task on three levels of the game Labyrinth, in which the agent must guide the avatar to reach a destination through maze-like levels containing a few spike tiles fatal to the avatar.
First, agents are given either 10, 50, or 100 frames of training before being evaluated on a fresh 500 frames. If the avatar dies or reaches the goal at any time, the game is restarted with the avatar in its original position. In all setups, the transfer agent is built using an ensemble of six random source games other than Labyrinth, with 500 frames of training for each source, and retrained on the 10/50/100 frames of target data. Termination models are trained in addition to object class models for both scratch and transfer in source and target games in order to help predict death.
After training, agents go through a testing phase of another 500 frames. During this phase, agents use their forward models to choose one-step actions most likely to take them to novel or least-recently visited states. Their decision-making works as follows. The agent remembers each unique game state it visits and the time frame at which it was last visited. For each action the agent predicts the next game state by using its object class models to predict the next state of each game object. Model outputs are treated as probabilities of setting the corresponding object variables to one.
Treating forward predictions as probabilistic samples in this way helps agents avoid getting stuck. The value of each action considered at each frame by an agent is calculated as $1 - \frac{t_A}{t_A}$, where $t_A$ is the current time frame of the game and $t_A$ is the last visited time frame of the predicted next state ($t_A = 0$ if the state has never been visited). If the agent predicts death the value is -1. At each frame the agent chooses whichever action has the maximum value. At the end of the testing phase, the total number of unique states visited are counted and used as the metric of evaluation. After the testing phase, an additional 500-frame phase is run to measure prediction accuracy as in the previous experiments. During this phase, all agents take random actions rather than informed ones, in order to ensure fair comparison. The purpose is to determine the relationship between model accuracy and actual performance on an important task requiring action selection. Results are averaged over five experiments on each of the three levels of Labyrinth and each of the three training setups.
Table 2: Average improvement in accuracy (Acc) and exploration (Exp) over random actions, by level and training size (N), for the scratch (S) and transfer (T) approaches. Transfer outperforms scratch in exploration even when they are tied for accuracy, as in the results of the 100-frame training scenarios. The conclusion is that transfer leads to better accuracy and exploration performance.
<table>
<thead>
<tr>
<th>Map</th>
<th>N</th>
<th>Acc$_S$</th>
<th>Acc$_T$</th>
<th>Exp$_S$-$R$</th>
<th>Exp$_T$-$R$</th>
</tr>
</thead>
<tbody>
<tr>
<td>L0</td>
<td>10</td>
<td>0.71</td>
<td>0.86</td>
<td>-3.2</td>
<td>45.8</td>
</tr>
<tr>
<td>L1</td>
<td>10</td>
<td>0.77</td>
<td>0.84</td>
<td>4.4</td>
<td>24.4</td>
</tr>
<tr>
<td>L2</td>
<td>10</td>
<td>0.69</td>
<td>0.92</td>
<td>2.4</td>
<td>12.6</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>L0</td>
<td>50</td>
<td>0.84</td>
<td>0.93</td>
<td>12.4</td>
<td>27.8</td>
</tr>
<tr>
<td>L1</td>
<td>50</td>
<td>0.89</td>
<td>0.91</td>
<td>-3.6</td>
<td>32.8</td>
</tr>
<tr>
<td>L2</td>
<td>50</td>
<td>0.88</td>
<td>0.94</td>
<td>3.4</td>
<td>13.4</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>L0</td>
<td>100</td>
<td>0.95</td>
<td>0.86</td>
<td>41.8</td>
<td>39.2</td>
</tr>
<tr>
<td>L1</td>
<td>100</td>
<td>0.91</td>
<td>0.9</td>
<td>12.2</td>
<td>26.6</td>
</tr>
<tr>
<td>L2</td>
<td>100</td>
<td>0.93</td>
<td>0.95</td>
<td>10.6</td>
<td>25.6</td>
</tr>
</tbody>
</table>
Figure 3: Trajectory maps of avatar during test phase, five runs overlaid for Level 0, agents trained on 10 frames. The maps show more space explored by transfer agents.
Table 2 shows how scratch and transfer agents perform in exploring three levels of Labyrinth given 10, 50, and 100 initial frames of training. Figure 3 shows an example of the avatars’ trajectories. The agents trained from scratch on only ten frames of the game are not highly accurate in out-of-sample experience and struggle to perform better than random exploration. In contrast, the transfer agents are more accurate, supporting the results of the previous experiments, and are also able to explore much more efficiently.
As the number of training frames increases to 100, scratch models catch up in accuracy to transfer models. Interestingly, the transfer agents still explore more efficiently on average than the scratch agents, despite not being any more accurate. One possible explanation for the outperformance unexplained by accuracy is that the transfer agent may be particularly more accurate in the important predictions.
Figure 3: Trajectory maps of avatar during test phase, five runs overlaid for Level 0, agents trained on 10 frames. The maps show more space explored by transfer agents.
Discussion and Future Work
The methods explored in this paper - object-oriented factorization, transfer ensembles, and model retraining - help improve the sample efficiency of agents learning GVG-AI games. In these experiments, transfer-learning agents were more accurate than scratch agents when predicting future states. They were also more efficient at exploration, which is a widely useful ability for learning games. Future work will investigate the ultimate task of maximizing score, which is outside the scope of this paper because it requires the integration of planning and value approximation methods.
GVG-AI games contain diverse challenges that test how well the learning approach generalizes across games. Crucially, its games also have stochastic behaviors and multiple levels, which test how well agents generalize across experiences. However, there are some challenges that are not covered by the GVG-AI domain, and an important path for future work is to improve the robustness of this approach by adapting it to other domains. For example, using transfer to reduce generalization error could be useful in domains with noisy or high-dimensional observation spaces.
Conclusion
This paper demonstrated a model-based transfer learning approach for training video game agents from very little data. The approach constructs an ensemble out of source object models and uses the limited target data both to choose the ensemble weights and to retrain the final model. Although both scratch and transfer models achieve global minima in prediction errors during training, experiments showed consistently higher out-of-sample performance for transfer models across diverse GVG-AI games. Transfer agents showed particular improvement in modeling important objects such as avatars, which was useful for more quickly exploring unfamiliar game maps. Artificial agents can use this approach to accelerate early-stage learning and quickly adapt to novel situations.
References
|
{"Source-Url": "http://www.cs.utexas.edu/users/ai-lab/downloadPublication.php?filename=http%3A%2F%2Fwww.cs.utexas.edu%2Fusers%2Fnn%2Fdownloads%2Fpapers%2Fbraylan.aiide2016.pdf&pubid=127588", "len_cl100k_base": 7748, "olmocr-version": "0.1.53", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 24470, "total-output-tokens": 9152, "length": "2e12", "weborganizer": {"__label__adult": 0.001708984375, "__label__art_design": 0.001766204833984375, "__label__crime_law": 0.001987457275390625, "__label__education_jobs": 0.006420135498046875, "__label__entertainment": 0.00106048583984375, "__label__fashion_beauty": 0.0011997222900390625, "__label__finance_business": 0.001049041748046875, "__label__food_dining": 0.0019102096557617188, "__label__games": 0.218994140625, "__label__hardware": 0.003843307495117187, "__label__health": 0.00238800048828125, "__label__history": 0.001883506774902344, "__label__home_hobbies": 0.0005307197570800781, "__label__industrial": 0.0022335052490234375, "__label__literature": 0.001590728759765625, "__label__politics": 0.0010290145874023438, "__label__religion": 0.0016069412231445312, "__label__science_tech": 0.341796875, "__label__social_life": 0.0003235340118408203, "__label__software": 0.0145111083984375, "__label__software_dev": 0.38720703125, "__label__sports_fitness": 0.0022373199462890625, "__label__transportation": 0.0018749237060546875, "__label__travel": 0.0007653236389160156}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 37279, 0.02372]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 37279, 0.64807]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 37279, 0.90546]], "google_gemma-3-12b-it_contains_pii": [[0, 4924, false], [4924, 11276, null], [11276, 16246, null], [16246, 22677, null], [22677, 28139, null], [28139, 33691, null], [33691, 37279, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4924, true], [4924, 11276, null], [11276, 16246, null], [16246, 22677, null], [22677, 28139, null], [28139, 33691, null], [33691, 37279, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 37279, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 37279, null]], "pdf_page_numbers": [[0, 4924, 1], [4924, 11276, 2], [11276, 16246, 3], [16246, 22677, 4], [22677, 28139, 5], [28139, 33691, 6], [33691, 37279, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 37279, 0.08]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
37deeb33db614d3764d3669e5d59db6ba4118fa6
|
[REMOVED]
|
{"len_cl100k_base": 4632, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 22392, "total-output-tokens": 5615, "length": "2e12", "weborganizer": {"__label__adult": 0.0004029273986816406, "__label__art_design": 0.001010894775390625, "__label__crime_law": 0.0004596710205078125, "__label__education_jobs": 0.0025920867919921875, "__label__entertainment": 0.00010961294174194336, "__label__fashion_beauty": 0.000293731689453125, "__label__finance_business": 0.00258636474609375, "__label__food_dining": 0.0005354881286621094, "__label__games": 0.000896453857421875, "__label__hardware": 0.00263214111328125, "__label__health": 0.0007638931274414062, "__label__history": 0.0005002021789550781, "__label__home_hobbies": 0.00021183490753173828, "__label__industrial": 0.00820159912109375, "__label__literature": 0.0003437995910644531, "__label__politics": 0.0003314018249511719, "__label__religion": 0.0005726814270019531, "__label__science_tech": 0.42529296875, "__label__social_life": 0.00011110305786132812, "__label__software": 0.0264434814453125, "__label__software_dev": 0.52294921875, "__label__sports_fitness": 0.00038242340087890625, "__label__transportation": 0.002079010009765625, "__label__travel": 0.000263214111328125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 24831, 0.02404]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 24831, 0.33871]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 24831, 0.90071]], "google_gemma-3-12b-it_contains_pii": [[0, 4592, false], [4592, 10445, null], [10445, 15517, null], [15517, 18099, null], [18099, 20090, null], [20090, 24831, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4592, true], [4592, 10445, null], [10445, 15517, null], [15517, 18099, null], [18099, 20090, null], [20090, 24831, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 24831, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 24831, null]], "pdf_page_numbers": [[0, 4592, 1], [4592, 10445, 2], [10445, 15517, 3], [15517, 18099, 4], [18099, 20090, 5], [20090, 24831, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 24831, 0.05882]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
5433f5d74947bc1bdceb01509c0259c29f878cd3
|
RAM: array database management through relational mapping
Ballegooij, A.R.
Citation for published version (APA):
General rights
It is not permitted to download or to forward/distribute the text or part of it without the consent of the author(s) and/or copyright holder(s), other than for strictly personal, individual use, unless the work is under an open content license (like Creative Commons).
Disclaimer/Complaints regulations
If you believe that digital publication of certain material infringes any of your rights or (privacy) interests, please let the Library know, stating your reasons. In case of a legitimate complaint, the Library will make the material inaccessible and/or remove it from the website. Please Ask the Library: http://uba.uva.nl/en/contact, or a letter to: Library of the University of Amsterdam, Secretariat, Singel 425, 1012 WP Amsterdam, The Netherlands. You will be contacted as soon as possible.
UvA-DARE is a service provided by the library of the University of Amsterdam (http://dare.uva.nl)
Chapter 2
A History of Arrays
Simply put, arrays are multi-dimensional structures with elements aligned across a discrete, rectangular grid. An array’s elements are stored in an orderly fashion and each element can be uniquely identified by a numerical index. Especially in the area of high-performance computing, array structures have been, and remain to be, a popular tool. Arrays and array operations are expressive enough to effectively model many real world (computational) problems, yet their structure is simple enough to reason about. The level of abstraction introduced by use of bulk types (such as arrays) has driven high performance compiler technology by facilitating automatic vectorization and parallelism [1].
The expressive power of array-expressions is explained by their similarity to basic mathematical structures: vectors and matrices. These basic linear algebra structures are structurally equivalent to one-dimensional and two-dimensional arrays. Linear algebra is a successful field of mathematics: its techniques can be applied in many other fields of mathematics, engineering, and science. An often applicable approach to problem solving is to express the problem in terms of linear algebra problems with known means of solution.
In computer science, the popularity of the array structure initially had little to do with the relation to linear algebra. At the physical level, the hardware of a computer memory uses linear addressing to identify its different elementary slots. This linear addressing scheme is visible to the computer programmer in most programming languages. In such languages, the natural way to store multiple values of the same type is a sequence of elements stored at consecutive addresses in memory: a one-dimensional array. Sequential storage of data is not merely convenient, computer hardware has developed to a state in which maximum efficiency for number crunching often requires sequential memory access [2].
The benefit of highly efficient processing for a paradigm that allows many mathematical problems to be concisely expressed is appealing. For this reason, the scientific community that deals with large-scale computational problems favours the proven technology of low-level programming languages over generic database management systems. Yet, scientific instruments and computer simulations are creating vast vol-
umes of data to be organized, managed, and analyzed: these are the primary tasks of a database management system. The lack of use of database technology in scientific programming can be attributed to the failure of most DBMS systems to support ordered data collections natively [3].
This chapter discusses various incarnations of the “array” throughout computer science from the bottom-up. Section 2.1 starts by discussing different interpretations of the array in various programming languages. Section 2.2 continues the discussion by touching upon the mathematical formalization of arrays. Finally, Section 2.3 discusses the difficult relation between general-purpose database technology and the array structure.
2.1 Programming Languages
As mentioned, the different elementary slots in a computer's memory are physically addressed through a linear addressing scheme. This linear addressing scheme is visible to the computer programmer in low-level imperative programming languages.
The C programming language [4], famous for its use in the UNIX operating system, is one of these low-level languages. The C standard provides rudimentary support for multi-dimensional arrays, primarily a syntactic construct to facilitate typechecking, but at the core an array in C is a block of consecutive memory slots [5]. This close relation between the language and the computer system is by design: The C language is minimalistic, close to the hardware, and portable; these features allow for a generic implementation of low-level operating system components and applications across different computer architectures. However, this low-level interpretation of the “array” provides little abstraction for its users, for example:
Example 2.1 (Arrays in C). This small C program defines a two-by-two array, A, and makes a transposed copy, B, of it
```c
char A[2][2] = {{'a','b'},{'c','d'}};
char B[2][2];
for(i=0;i<2;i++) /* Explicit iteration over the axes */
for(j=0;j<2;j++)
B[i][j] = A[j][i]; /* Processing per single array element */
```
The example clearly shows the imperative nature of the language: the nested “for-loops” explicitly instruct the computer to iterate, in a particular order, over the array axes and process a single element each step. Multi-dimensional arrays in C are stored in a single block of memory. The compiler translates multi-dimensional indexes to linear memory addresses:
```c
char A[4] = {'a','b','c','d'};
char B[4];
for(i=0;i<2;i++) /* Iteration over the axes */
for(j=0;j<2;j++)
B[i*2 + j] = A[j*2 + i]; /* Address computation */
```
Note the straightforward mapping function that translates the multi-dimensional array indexes to linear addresses: This function is commonly referred to as the “polynomial indexing function”.
2.1.1 Array Oriented
The programming language FORTRAN [6], also imperative and considered low-level, offers more abstraction than the C language does: Its arrays are defined as a collection type over basic elements, and are supported by a small set of built-in functions. A notable innovation is the rich “subscripting” functionality provided: In a single statement, range selections over axes can be expressed that produce a new array containing a subset of the elements in the original. Arrays in FORTRAN can also be “reshaped”: reshaping reorders array elements by serializing a multi-dimensional array and subsequently de-serializing the produced sequence with different shape parameters.
An important difference with arrays in the C language, as discussed above, is that the FORTRAN language definition does not specify the storage scheme for arrays. The collection-type abstraction of arrays allows for different implementations on different platforms, however, the presence of the reshape operator does reflect assumptions about computer architecture: when an array is stored column major in a linear memory area, reshaping is a cost-free operation that merely alters an array’s shape parameters. A key abstraction in the language is the FORALL statement, which performs an action on all elements of an array without specifying the order in which this is done. The FORTRAN language has lead to efficient compiler implementations that exploit “single instruction multiple data” (SIMD) type parallelism on hardware architectures that support vectorized operations: an important contribution to FORTRANs popularity in computationally intensive problem domains.
**Example 2.2 (Arrays in FORTRAN).** This small FORTRAN program defines a two-by-two array, \( A \), and makes a transposed copy, \( B \), of it.
```fortran
INTEGER :: I
INTEGER :: J
CHARACTER, DIMENSION(2,2) :: A
CHARACTER, DIMENSION(2,2) :: B
A = RESHAPE((/'a','b','c','d'/),(/2,2/))
FORALL (I=1:SIZE(A,2))
FORALL (J=1:SIZE(A,1))
B(I,J) = A(J,I);
END FORALL
END FORALL
```
The **RESHAPE** command in the example is necessary as FORTRAN only supports one-dimensional literals: Array \( A \) is created by reshaping a sequence of characters into a two-dimensional array. Although visually similar to the C example presented earlier, this example does not specify the order in which the elements are to be processed, which leaves the compiler additional degrees of freedom for its code generation.
Matlab is a software package that is very popular among scientists working on for example multimedia analysis or applied mathematics in other fields [7]. Matlab uses a syntax closely related to the FORTRAN syntax to allow manipulation of its basic unit: the matrix. Matrices are structurally equivalent to two-dimensional arrays as the suitability of the FORTRAN array primitives for matrix manipulation demonstrates. The Matlab language is interpreted and as such does not provide the same raw processing performance that made FORTRAN popular. Instead its popularity stems from its ease of use, a rich library of efficient mathematical primitives, and visualization tools.
Another language influenced by FORTRAN is the FAN query language for arrays [8]. It combines the syntaxes of imperative array-oriented programming languages, notably FORTRAN, into a simple query language over arrays with focus on subscripting. Subscripting allows the selection of sub-arrays by specifying projections for each of the arrays axes. FAN is a query language in the strictest sense of the word: It allows users to denote concisely the subset of data in a file that they are interested in, nothing more – no computation or other non-trivial combination of data from different sources. The main contribution of this work is the realization that parallels can be drawn between array processing in programming languages and database technology. FAN focuses on typical data management aspects: platform-independence, persistence, and data-independence. The language is now part of a low-level software library used to store (large) arrays in files: netCDF [9]. NetCDF is an example of a file format designed to store large arrays in a platform independent way. It is commonly used in scientific computation applications [10].
2.1.2 Array Comprehension
The functional programming paradigm performs computation through the evaluation of mathematical functions. Programs in this paradigm are a collection of function definitions rather than a sequence of commands. The strength of the paradigm is that its functions are free of side-effects: Evaluation of functions produces results without effecting a global program state which is particularly useful for proving program correctness. As functional languages have no persistent variables, data structures are typically defined recursively. For example a list is defined as a head value followed by the tail of the list. Modern functional languages, however, offer a convenient method to specify collection types: comprehensions.
The language of comprehensions uses a concise syntax to specify a collection of data [11]. These comprehension syntaxes can be defined for a whole hierarchy of collection types ranging from unordered data to highly structured: sets, lists, and multi-dimensional arrays. Set comprehension is based on the selection of the subset desired given a larger set of values; it is commonly used in mathematics and closely related to common database query languages such as SQL [12, 13]. List comprehension is a construction found in functional programming languages such as Miranda [14] and Haskell [15]. Comprehension of lists is based on generation in combination with filtering to produce the list required. Array comprehension extends list comprehension by
associating array elements with (multi-dimensional) indices. Various proposals exist for an array comprehension syntax which differ mostly in syntax, not semantics.
A comprehension-based array constructor defines the shape of the array and a function that specifies the value of each cell given its array index. Examples of array comprehension are the array support for the programming language Haskell, the query language AQL (see Section 2.3.3), the query language supporting the RasDaMan system (see Section 2.3.3), and the query language of our own RAM system (see Chapter 3).
A set-comprehension \( \{ x \in D | C_1, C_2, \ldots, C_n \} \) (easily recognized in SQL variant \( \text{SELECT} * \text{FROM} D \text{WHERE} C_1 \text{AND} C_2 \text{AND} \ldots \text{AND} C_N \) ) specifies which elements from \( D \) are part of the result through selection conditions \( C_1, C_2, \ldots, C_n \), whereas the array-comprehension requires specification of the process that generates the result from its index values. This distinction in style is best demonstrated through an example. If we want to specify the even numbers smaller than 10, using an array-comprehension forces us to make explicit our knowledge about generating five even numbers: \( \{(2 \cdot (x + 1)) | x < 5 \} \). The set-comprehension approach specifies a superset of the desired result (\( \mathbb{N}_0 \)), reducing it to the desired result through the appropriate selection criteria: \( \{ x \in \mathbb{N}_0 | x < 10, \text{isEven}(x) \} \).
Array comprehension is a declarative, monolithic approach to functional language arrays: It defines all elements at once at the time the array is created. Comprehension syntax, however, is simple enough to allow straightforward implementation in an imperative setting. The straightforward imperative evaluation of array comprehensions, nested iteration over each of the source collections, is not always the most efficient solution. The problem is that imperative languages over-specify evaluation order of array elements, which makes it hard for a compiler to optimize the program. Functional languages under-specify evaluation order by focusing on what should be computed, rather than how it should be computed. Minimal imposed execution order is an advantage for any optimization process; recall that bulk operators have been introduced in imperative languages (e.g., FORALL in FORTRAN) precisely to ease optimization.
**Example 2.3 (Array Comprehension).** An array comprehension consists of an array shape and a function that specifies the value of each cell given its location in the array. This example specifies a \( 5 \times 5 \) array, where each element has an index tuple \((x, y)\) and a value defined by \( f(x, y) \):
\[
A = \{ ((x, y), f(x, y)) | x < 5, y < 5 \}
\]
The straightforward translation of this comprehension in an imperative program explicitly iterates over the axes and evaluates the function for each cell:
```c
double A[5][5];
for(y=0;y<5;y++)
for(x=0;x<5;x++)
A[y][x] = f(x,y);
```
---
\(^1\)This example uses the RAM syntax for array comprehension, see Chapter 3
Anderson and Hudak have shown that it is feasible to construct a compiler that removes the main sources of inefficiency in functional programming and realize performance comparable to native FORTRAN programs through analysis of Haskell array comprehensions [16]. Optimizations are partially to overcome basic problems imposed by lazy functional language design, and partially related to the Haskell array comprehension construct. Functional programming languages are notoriously costly to execute when heavy use is made of lazy evaluation. For efficient evaluation of a Haskell array comprehension it is important to avoid lazy evaluation. This is achieved through appropriate scheduling of the evaluation order of array elements, based on analysis of dependencies between different array elements. Haskell arrays are conceptually modeled as lists of index-value pairs, which requires verification that all indexes with an array domain exists and exist only once. These checks can often be resolved compile time, through analysis of the program, avoiding costly runtime checks.
2.1.3 Array Centric
Array comprehension is similar to array support as offered in imperative languages: it requires algorithms over arrays to be expressed at the individual-element level. A Programming Language, APL [17], takes array orientation as its central concept. APL is built around a mathematical notation developed to reason about ordered structures (arrays). It supports numbers and characters as basic types with arrays as the sole method to provide structure. Arrays are supported through the introduction of over a hundred new operators, each of which has unique and clearly defined semantics. Most of these basic operations take whole arrays as input to produce whole arrays as output, rather than single elements.
The practical problem with APL is that it is a very high-level language, designed to be concise and elegant, but not to match closely to computer hardware characteristics. In addition, the original language introduced new graphical symbols for each of its operations: The characters needed for APL’s many operators, and their ASCII equivalents, are standardized [18, 19]. The large number of operations and symbols introduced make the language hard to implement and master, yet it is applicable at each of the many different layers in computer architecture. It is well suited for technically low-level tasks, such as microprogramming of inner CPU functionality. As Iverson himself demonstrates in his book [17], low-level interaction can be modeled by realizing that at the lowest level, memory is no more than an array of binary values, bits. CPU’s manipulate these arrays of bits through basic operations, such as shifting and the various boolean combinators, easily expressed in APL.
Example 2.4 (Arrays in APL). This small APL program defines a two-by-two array, $A$, and makes a transposed copy, $B$, of it
$$
A \leftarrow \begin{pmatrix} a & b \\ c & d \end{pmatrix}
$$
$$
B \leftarrow {\uparrow}A
$$
The example immediately demonstrates that APL is a graphical language: The expressions clearly resemble mathematical formulas. The arrow over the variable $A$ in the second statement is the APL operator for transpostion, the direction of the arrow denotes the axis over which to transpose. In this case the northwestern direction indicates the diagonal of the matrix.
The elegance of APL has lead to a number of other array-centric languages. These languages aim at solving some of the shortcomings in the original language. For example, J [20] is a successor to APL, developed by Iverson and other APL developers. J eliminates the non-functional elements in APL and provides a purely functional language. Its focus is to offer the benefits of modern, functional, high-level language design for the concise expression of bulk computation. It also breaks with the symbolic language of APL through a syntax that requires only the standard ASCII character set to express its operations. Another example is the language K [21], developed by Arthur Whitney, an influential member of the APL community. The K language also provides a high-level, array-oriented array programming language with an ASCII based syntax. It focusses primarily on usability by providing both efficiency and simplicity for mathematical analysis with a comprehensive GUI framework. It targets specifically business domain applications such as analysis and predictions based on financial data.
2.1.4 Array Shape
The FISh programming language compiler goes one step further, using static program analysis that seperates array shape from the actual values [22]. FISH, "Functional = Imperative + Shape", is a functional array programming language designed to take advantage of shape theory (see Section 2.2.1). Shape is a separate type in FISh; every expression has both a value and a shape that can be independently manipulated. This strict separation between shape and value results in an environment with reduced complexity of the individual primitive operations, which allows for better scheduling of operations. For example, by moving around shape manipulating operations, data reductions can be pushed down avoiding operations over values that would otherwise be discarded. At the same time shape-independent operations allows for parallelization and vectorization of execution.
An interesting aspect of FISh is that the language operates on nested regular (dense and rectangular) arrays. Shape is considered a part of the array type, therefore, despite the fact that arrays can be nested, array structures in FISh are always rectangular:
arrays must be of the same type and therefore shape when they are contained within the same array.
2.2 Formalization
It is the intuitive nature of arrays that makes them such commonly used structures in computer programs. Of course, array structures are defined operationally in the specifications of programming languages that support array processing. However, fundamental theoretical foundations for the structure have been developed to study and get a grip on their mathematical properties. Different formalizations strive to maximize the elegance of the methods that describe complex array transformations. Typically, these frameworks focus on the relation between array shape and content: the index and value of each cell.
2.2.1 Shape Separation
Shape theory separates the notion of shape from the actual data [23]. Even though arrays are a prime example of a structure that allows such analysis, the theory unifies many different structures in a common theme. It is important to realize that shape has semantics: a set of numbers carries different meaning than a matrix composed of the same numbers. A separation between shape-modifying operations and content-manipulating operations often coincidentally results from efforts to realize elegant formalizations. Jay and Streckler [23] stress the importance of this separation from a conceptual point of view.
The theory separates shape from content by differentiating between shape modifying and content manipulating operators, called shapely and shape-polymorphic operations respectively. Many different shapely operations can be devised depending on the data type operated on. Shape-polymorphic operations however, are far less common. In practice the map operation, the application of a function to each element in a collection, is the only shape-polymorphic function. Moreover, many functions rely on both shape and value to produce their results, which implicitly reflects the semantics of the shape component. Nevertheless, in those cases where clear distinction can be made between a shapely and a shape polymorphic component in a computation both classes of operations are independent. Independent operations can generally be executed in any order, which provides optimization opportunities.
Arrays are a suitable data-type for the application of shape theory, since arrays allow for a wide variety of shapely operations that are meaningful, which eases shape and content separation. For example, the Google map-reduce technique applies the inherent parallelism in set-oriented bulk processing of data to parallelize complex analysis tasks over thousands of computers [24]. Shape theory is applied to arrays in the development of FISh [22], an array-centric programming language discussed earlier in Section 2.1.4.
2.2. Formalization
2.2.2 APL Inspired
Theoretical foundations for array structures are typically inspired by the apparent universal applicability of the structure in computer programming. APL especially, itself intended as a mathematical framework, has inspired formalization of the array structure and its operations. Two of these theoretical foundations for array structures, are the theory of arrays [25] and the mathematics of arrays [1].
The theory of arrays combines arrays with arithmetic and functions to produce an axiomatic theory in which theorems hold for all arrays having any finite number of axes of arbitrary length. The theory is initially defined over lists, arrays restricted to a single dimension, and subsequently extended to multi-dimensional arrays. It is built partially on top of the operations defined for LISP [26] and APL [17], both examples of programming languages that take ordered structures, lists and multi-dimensional arrays respectively, as basic units. Interestingly, in the theory of arrays, sets and set based operations are defined using array based primitives as a basis: the reverse of the traditional mathematical approaches that define arrays as a special type of set.
In the array theory, arrays are nested, rectangular structures with finite valence and axes of countable length. Nesting is included into the theory to compensate for the restrictions that the rectangularity constraint imposes on the structure. Contrary to the arrays in shape theory, discussed above, in the theory of arrays it is valid to nest arrays of arbitrary shape, which allows for the construction of non-rectangular structures by nesting arrays of differing shape in one array. This work has motivated the extension of arrays in APL to support nesting in APL2 [27] and forms the basis for the programming language Nial [28].
This theory builds array processing on the principles of counting and valence as the basis for location and shape: These properties follow from array indexes only, not the value of array elements. Arrays have axes of countable length, therefore the elements in an array can be serialized into a list using row-major ordering, and, any location within a multi-dimensional array can be reached by counting the elements in this list representation. Reshaping of arrays is also formalized through serialization: Its semantics correspond to serialization of one array into a list that is subsequently de-serialized, with different shape, to produce a new array. A notable example of a formal proof made using the theory is that any sequence of reshaping operations can always be collapsed to a single reshaping operation. The wealth of formal proofs that provide inspiration for rewriting of array-expressions is the main contribution of More’s work.
Like the theory of arrays, the mathematics of arrays (MOA) [1] is based on the operations found in APL. Arrays are simple yet effective structures. But where the theory of arrays attempts to leverage its potential by showing that this natural simplicity makes the structure a suitable basis for mathematics [29], the mathematics of arrays was developed to provide a firm mathematical reasoning system for algorithms involving flat arrays of numbers. Instead of extending APL array support with additional complexities, such as nesting, MOA axiomatizes a subset of the structuring and
partitioning operations found in APL.
MOA describes all partitioning operations and linear transformations on arrays in terms of their shape and the n-dimensional indexing function $\psi$. The algebra defined in MOA consists of a small number of operators and allows symbolic rewriting through rules defined on the basis of functional equality. The theory is used to express and exploit parallelism at different levels of granularity, such as the fine-grained SIMD type parallelism found in vector-processors and the coarse grained parallelism offered by systems with multiple processors. It has been successfully applied to prove theorems about register transfer operations in low-level hardware design. It has also been used to describe partitioning strategies of linear-algebra operations for parallel systems.
Another formalization of arrays is based on category theory [30]. This formalization is built on flat (not-nested) arrays as a basic unit, while nesting has been added as an extension to the framework in subsequent work [31]. The approach develops a framework based on array constructors. Operations over arrays are expressed in terms of basic array constructors, and different operations are related to each other on the basis of these constructors. The advantage of this approach over other frameworks, such as More’s array theory or the original APL, is that the precise semantics of the operations in this approach follow automatically from the constructor semantics, while existing approaches define each operation in isolation.
Two interesting array constructors produce complete arrays from a few parameters: basis and grid. Given a shape, the basis function results in a list of array axes each of which is represented as a list of possible indexes. The grid function takes this a step further and produces an array filled with self-indexes. These simple operations are remarkably useful for the formalization of array operations: they are used to construct an array from scratch, and they allow index-variables to be converted to values for use in computations by resolving indexes in their grid.
While claims are made about the benefits of this formalization for applications such as compiler technology, its applicability is limited. Processing arrays by recursively applying the various constructors is impractical; the theory could however give insight into the relation between the higher-level operators commonly found in array processing.
### 2.3 Arrays in Database Technology
Maier and Vance [3] identified the failure of most DBMS systems to support ordered data collections natively. The authors hypothesize that the mismatch in domains between scientific problems, often based on ordered structures, and database systems, based on unordered sets, explains why DBMSes are not used widely in general science. The mismatch in domains causes unnatural encoding of inherently ordered scientific data in a DBMS, encouraging users to implement client-side processing while using a DBMS only as a persistent data store.
2.3. Arrays in Database Technology
The relation between the (multi-dimensional) array structure and database management system has since long been a difficult one. Relational database technology owes its popularity in the business domain to the high degree of abstraction it offers: separating the application logic from data-management details [32]. Array structures typically occur however, in a context where minute details about the physical processing are important. Yet, these two requirements are not mutually exclusive: It is possible to provide a high-level interface for array-based processing that allows a smooth application integration in the domain of arrays and at the same time exploits in-depth knowledge of the structure and its properties at the low-level to realize efficient processing.
Trends in the evolution of database technology address the challenges posed by very large scientific data sets [33]. Relational query processing techniques are independently making their way into high performance computing systems, such as the previously mentioned map-reduce in Google’s search technology. This is similar to the techniques used to push the performance envelope of distributed database technology [34]. At a lower level, basic linear algebra operations at the core of many scientific computing problems have been shown to benefit from data abstraction. By utilizing generic relational data access methods and efficient join algorithms, matrix operations over complex storage schemes can be accelerated [35].
2.3.1 Ordered Structures in Databases
In spite of the overwhelming evidence that arrays are a useful construct, SQL-99 [36] the current standard for database query languages has only limited array support [37]. Relational database management systems operate on unordered data. Yet, it is known that order, inherent to the physical representation of data, is an important issue for efficient query processing. For example, it may be cost-effective to physically sort data in preparation for subsequent operations such as joining: Even though sorting in itself is a costly operation, using a “sort-merge” algorithm instead of a naive nested-loop join can be worth the initial investment. Another well known example is the propagation of order through a query plan to efficiently handle top-N type queries. Explicit knowledge about order can be valuable for a wide range of query optimizations.
A recurring approach in database literature is the introduction of ordered storage types at the relational algebra level. By treating relations as sets stored in lists and re-defining the relational algebra over these lists of tuples it is possible to explicitly model physical data order in the query process [38]. Wolniewicz and Graefe take the opposite approach, adding scientific data types and associated operations into a database framework by implicitly modelling those datatypes using the existing set primitive [39]. Both approaches are complementary: explicit addition of ordered types to a database kernel may facilitate efficient query processing, also for conceptually unordered data structures, while modelling new types using existing primitives provides convenient interfaces to existing technology.
The SEQ model [40, 41] differentiates between record-oriented operations and
positional operations. Positional operations are supported by a sequence data-model: (nested) sequences are explicitly added to the relational data model. This data-model allows for specialized operators that simplify the expression of operations based on order and the order-aware optimizations of such queries.
Another example, the AQuery system, is based on “arrables”, or “array tables”. These arrables are vertically decomposed tables (aligned one-dimensional arrays) that are explicitly ordered on some ORDER BY clause [42]. By keeping track of this order explicitly, the AQuery system can optimize queries that are based on order. Moreover, the explicit storage of arrables as decomposed one-dimensional arrays allows for more efficient low-level operators to be implemented.
Explicitly taking notice of such physical order to implement efficient storage and processing primitives is also done in the MonetDB Database system [43]. MonetDB explicitly decomposes tables into one-dimensional arrays (called void-BATs) in order to allow the use of more efficient positional primitives.
2.3.2 Conceptual Arrays in Databases - OLAP
Online analytical processing (OLAP) systems are based on the notion of data-cubes, structures that store data of interest over multiple dimensions (for an overview see [44, 45]). Data-cubes closely resemble multi-dimensional arrays.
OLAP systems come in two flavours, ROLAP and MOLAP, either implemented using a relational engine or on top of a specialized multi-dimensional data-cube engine. Alternatively, systems exists that use a combination of both techniques. The conceptual model of data-cubes is however independent of the underlying implementation. This independence is made explicit by Cabibbo and Torlone [46], whose MD model defines mappings to both relational and multi-dimensional backends.
2.3.3 Multidimensional Arrays
Multidimensional array data differs however from data that fits in data-cubes in a fundamental way: It is shaped. This property of array data leads to a distinct class of array operations based on the manipulation of array indices [47]. Support for these kinds of operations differentiates array database efforts from OLAP systems.
The array query language (AQL) proposed in [48] has been an important contribution toward the development of array support in database systems. AQL is a functional array language geared toward scientific computation. It adds some syntactic sugar to NRCA, a nested relational calculus (NRC) extended to support arrays as well as multisets. The proposed language takes the point of view that an array is a function rather than a collection type, and is based on a comprehension-like syntax defining arrays of complex objects.
Although a prototype system enriched with AQL is reported, the main contributions are of theoretical nature. NRCA supports most traditional set-based operations, such as aggregation, through the manipulation of complex objects, basically nested
collections. The authors prove that inclusion of array support to their nested relational language entails to the addition of two functions: an operator to produce aggregation functions and a generator for intervals of natural numbers.
The array manipulation language (AML) is more restrictive and no prototype appears to exist [49, 50]. An interesting characteristic of AML is its alternative definition of arrays and an unconventional set of operators, supposedly designed to express image manipulation efficiently. In AML arrays are defined having infinite valence \((x \times y \times z \times 1 \times 1 \times \cdots)\) and sub-sampling is achieved through bit patterns over axes rather than explicit index numbers. A point of concern, however, is that AML is not always applicable. For example, a seemingly simple array operation, matrix-transposition, cannot be expressed elegantly – the source must be decomposed entirely and the transposed matrix explicitly (re-)built.
A deductive database approach with array support, DATALOGA proposed in [51], provides many viable opportunities for (array) query optimization. Unfortunately, the query language itself requires users to explicitly encode nested loop type evaluation of array operations in a Prolog-like language.
Sarawagi et al. [52] have added support for large multi-dimensional arrays to the POSTGRES database system. Multidimensional arrays are stored in specialized data-structures, which are integrated into the core of the database system. Focus of this work is on the low-level management of large arrays where arrays are split into chunks (using a regular grid) that are distributed over blocks on the storage device. In addition to discussing disk-based storage, the work focuses on specific problems that tertiary storage poses, proposing optimizations that minimize, for example, the need for a tape robot to swap tapes. Although some rules are derived to optimize the fragmentation process, the process itself is only partially automated, and human intervention is required to instruct the system which particular fragmentation strategy to follow.
The RasDaMan DBMS is a domain-independent array database system [53, 54, 55]. Its RasQl query language is a SQL/OQL like query language based on a low-level array algebra, the “RasDaMan Array Algebra”. This algebra consists of three operators: an array constructor, an aggregation operation and a sorting operation. The constructor is similar to the AQL array constructor, in that it defines a shape and a function to compute the value for each array cell. The aggregation construction reduces an array to a scalar value; the sorter facilitates the sorting of hyper-planes over a single dimension.
The RasDaMan DBMS provides an example of an operational array based multi-dimensional DBMS. Although RasDaMan is intended as a general purpose framework for “multi-dimensional discrete data” (basically sparse arrays), its primary application so far has been image databases. An interesting contribution of their work is an optimized arbitrary tiling system for the storage manager. The RasDaMan storage manager fragments arrays into “tiles” and optimizes the fragmentation pattern automatically to best match observed access patterns.
A similar effort is based on the AMOS-II functional DBMS [56, 57]. This system
is also implemented in an OO-DBMS, and offers a functional matrix query language supported by a comprehensive library of foreign functions with matrix operations. In addition, the system supports various matrix-storage schemes, such as “full”, “sparse”, and “skyline” representations. The system takes care of selecting the appropriate functions, order of application, and appropriate storage scheme for a given task.
2.4 Summary
Throughout this chapter a large spectrum of areas has been discussed, each of these areas provides its own perspective on arrays. The RAM system, presented in this thesis, derives inspiration from many of these areas.
Its query language is constructed around a comprehension-style array constructor following in the footsteps of functional programming languages and multi-dimensional array query languages such as AQL and RasQL.
Its optimizer is inspired by classical-relational database query-optimizer technology for its design, while its transformation rules are inspired by work from a variety of areas mentioned throughout this chapter, specifically the work on array programming languages and array query languages.
Its query evaluation is delegated to existing relational-database technology. For its primary target platform, MonetDB, the RAM system makes explicit use of ordered structures and order-aware operators available in the native relational algebra, deriving inspiration from the work on ordered structures in databases.
Bibliography
|
{"Source-Url": "https://pure.uva.nl/ws/files/857559/68082_05.pdf", "len_cl100k_base": 7979, "olmocr-version": "0.1.53", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 41337, "total-output-tokens": 12402, "length": "2e12", "weborganizer": {"__label__adult": 0.0004036426544189453, "__label__art_design": 0.000568389892578125, "__label__crime_law": 0.0003476142883300781, "__label__education_jobs": 0.0016689300537109375, "__label__entertainment": 0.00013828277587890625, "__label__fashion_beauty": 0.00022459030151367188, "__label__finance_business": 0.00034046173095703125, "__label__food_dining": 0.00045680999755859375, "__label__games": 0.0005936622619628906, "__label__hardware": 0.0019588470458984375, "__label__health": 0.0007762908935546875, "__label__history": 0.0005369186401367188, "__label__home_hobbies": 0.00016951560974121094, "__label__industrial": 0.0007648468017578125, "__label__literature": 0.0005335807800292969, "__label__politics": 0.00033092498779296875, "__label__religion": 0.0006899833679199219, "__label__science_tech": 0.27001953125, "__label__social_life": 0.0001327991485595703, "__label__software": 0.01245880126953125, "__label__software_dev": 0.70556640625, "__label__sports_fitness": 0.0002646446228027344, "__label__transportation": 0.0006618499755859375, "__label__travel": 0.00021457672119140625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 51560, 0.0399]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 51560, 0.77729]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 51560, 0.87513]], "google_gemma-3-12b-it_contains_pii": [[0, 1117, false], [1117, 3498, null], [3498, 6090, null], [6090, 8766, null], [8766, 12080, null], [12080, 15217, null], [15217, 18014, null], [18014, 20851, null], [20851, 23636, null], [23636, 27020, null], [27020, 30072, null], [30072, 33396, null], [33396, 36374, null], [36374, 39716, null], [39716, 41190, null], [41190, 43408, null], [43408, 45505, null], [45505, 47795, null], [47795, 50145, null], [50145, 51560, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1117, true], [1117, 3498, null], [3498, 6090, null], [6090, 8766, null], [8766, 12080, null], [12080, 15217, null], [15217, 18014, null], [18014, 20851, null], [20851, 23636, null], [23636, 27020, null], [27020, 30072, null], [30072, 33396, null], [33396, 36374, null], [36374, 39716, null], [39716, 41190, null], [41190, 43408, null], [43408, 45505, null], [45505, 47795, null], [47795, 50145, null], [50145, 51560, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 51560, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 51560, null]], "pdf_page_numbers": [[0, 1117, 1], [1117, 3498, 2], [3498, 6090, 3], [6090, 8766, 4], [8766, 12080, 5], [12080, 15217, 6], [15217, 18014, 7], [18014, 20851, 8], [20851, 23636, 9], [23636, 27020, 10], [27020, 30072, 11], [30072, 33396, 12], [33396, 36374, 13], [36374, 39716, 14], [39716, 41190, 15], [41190, 43408, 16], [43408, 45505, 17], [45505, 47795, 18], [47795, 50145, 19], [50145, 51560, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 51560, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
f27c38bc89d76060d40759df5098eef3e7ce3516
|
ABSTRACT
Circuit breakers are crucial components for power system operation. The currently adapted time-directed maintenance strategy and the emerging new condition-based strategy require a flexible information processing technique and software architecture. In this paper, mobile agent software has been applied in implementing circuit breaker maintenance and repair tasks. Several potential application scenarios have been described and the relevant software features have been discussed. The benefits of using the mobile agent techniques are discussed at the end.
I. INTRODUCTION
Circuit breakers are crucial components for power system operations. They play an important role in switching for the routine network operation and protection of other devices in power systems. To ensure circuit breakers are in healthy condition, periodical inspection and preventive maintenance are typically performed. The maintenance schedules and routines usually follow the recommendation of circuit breaker vendors, although the recommended schedules may be conservative.
New maintenance techniques and methodologies are emerging, while the circuit breakers keep improving in their designs and functions. As an example, some new circuit breakers have embedded monitoring instruments available to measure the coil current profiles and the operation timing. The recorded information can be used to monitor the condition of breakers during each operation. In this case, it may be more appropriate to replace the time-directed maintenance by condition-directed maintenance practice. When applied properly, both the size of the maintenance crew and maintenance cost may be reduced greatly with this approach. Since the number of circuit breakers in a power system is usually very big, a small maintenance cost saving per each circuit breaker can accumulate to a considerable benefit for the whole system. A more systematic solution is Reliability Centered Maintenance (RCM), which can be used to select the most appropriate maintenance strategy.
During the maintenance or repair work, the maintenance crew will need to access information distributed across the utility and stored using different data formats. By equipping the crew with new information access methods to replace the old paper-based information exchange and logging method, the efficiency may be improved since less time will be spent on preparation, reporting and logging. An information access method that is capable of handling heterogeneous information sources will be helpful to achieve the above goal. Also, the new information access method should be secure and able to work on unreliable public networks.
The mobile agent software provides a flexible framework for mobile agent applications. An agent application program can travel through the internet/intranet to the computers where the mobile agent server or transporter is running. The mobile agent software also supports Distributed Events, Agent Collaboration and Service Bridge. Compared with client server systems, an agent can process the data locally and thus reduce the network traffic. Besides, the Java platform encapsulates the network layer from the agent, which makes the programming easier. The mobile agent software may fit very well in the circuit breaker maintenance scenario. In this paper, we considered how mobile agent software might be applied in circuit breaker maintenance and monitoring from the viewpoint of the maintenance crew.
II. CIRCUIT BREAKER MAINTENANCE TASKS
The maintenance of circuit breakers deserves special consideration because of their importance for routine switching and for protection of other equipment. Electric transmission system breakups and equipment destruction can occur if a circuit breaker fails to operate because of a lack of a preventive maintenance. The need for maintenance of circuit breaker is often not obvious as circuit breakers may remain idle, either open or closed, for long periods of time. Breakers that remain idle for six months or more should be made to open and close several time in succession to verify proper operation and remove any accumulation of dust or foreign material on, moving parts and contacts.
The circuit breakers mainly consist of the interrupter assembly (contacts, arc interrupters and arc chutes), operating mechanism, operation rod, control panel, sealing system, and breaking medium (SF6, oil, vacuum and air). To ensure the performance of a circuit breaker, all the components should be kept in good condition; therefore time-directed preventive maintenance has been widely adopted. The preventive maintenance tasks include periodic inspection, test, and replacement of worn or defective components and lubrication of the mechanical parts. The maintenance intervals are usually determined using experiences or following the recommended schedules provided by the vendor or standard.
The maintenance practices can be divided into three categories: corrective maintenance, preventive maintenance, and predictive maintenance.
The different strategies are summarized in Table I. Each maintenance strategy has its own advantages and disadvantages, and thus most suitable application scenarios. A systematic solution is to utilize the Reliability Centered Maintenance (RCM) methodology. It performs analysis of the failure modes and the cause-effect impacts on the devices as it tries to find which strategy is the most cost-effective and appropriate for an application. The result of utilizing the RCM techniques and tools will be an optimal maintenance schedule for a specific application scenario.
### TABLE I
MAINTENANCE STRATEGIES
<table>
<thead>
<tr>
<th>Strategy</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>Run-to-failure maintenance (Corrective, repair only)</td>
<td>The repair and restoration of equipment or components that have failed or are malfunctioning and are not performing their intended function</td>
</tr>
<tr>
<td>Time-directed maintenance (Preventive)</td>
<td>The periodic and planned maintenance actions taken to maintain a piece of equipment within the expected operating condition. It extends the equipment life and is performed prior to equipment failure to prevent it. This includes technical specification surveillance, in-service inspection, and other regulatory forms of preventive maintenance</td>
</tr>
<tr>
<td>Condition-directed maintenance (Predictive)</td>
<td>The continuous or periodic monitoring and diagnosis in order to forecast component degradation so that as needed planned maintenance can be performed prior to equipment failure. Not all equipment conditions and failure modes can be monitored; therefore, predictive maintenance must be selectively applied.</td>
</tr>
</tbody>
</table>
The location of the information needed to perform maintenance can be the enterprise maintenance system, the substation data concentrators and the maintenance crew’s computer.
The information about the spare parts, test procedures, historical maintenance records, and instruction manuals, etc. is typically accessible in the enterprise maintenance system. Also, the enterprise maintenance will usually utilize a RCM or conventional maintenance scheduling system to generate work orders. The work orders indicate when and where to perform what kind of maintenance on what devices.
The information about the substations equipment may be retrieved from the substation computers or concentrators. With the introduction of continuous monitoring of circuit breakers, the real-time data becomes available for accessing in the substation concentrators. The continuous monitoring instrument may measure the coil current profiles and switching timing during the normal operation. The condition of a circuit breaker can be assessed using some signal processing and artificial intelligence techniques. In this way, the time-directed preventive maintenance may be replaced by condition-directed predictive maintenance. The real-time data in the substation concentrators is also a useful complement to the historical information stored in the enterprise maintenance system. The data may be utilized to automatically update or populate the enterprise maintenance database.
The maintenance crew may have the inspection or test report stored on a mobile computer. Also, the crew may need to update the status of the work order stored on the computer as well. Since the maintenance information is distributed among different systems, a software technique that has the flexibility of interfacing with multiple heterogeneous information systems is desired.
The software should have the following characteristics to meet the maintenance information exchange requirements:
- Security support (encrypted data transmission, user authentication and authorization)
- Efficient network bandwidth usage
- Robust and fault-tolerant communication over unreliable environment and portable personal communication devices
- Ability to integrate with heterogeneous systems
- Automatic software update to ease the user burden
III. MOBILE AGENT SOFTWARE
There are different definitions of what is a software agent. An agent is a proactive software component, which is capable of acting reasonably to accomplish tasks on behalf of the user. An agent should be autonomous and have sound intelligence. A good software agent should be able to adapt to the changing environment; it may also be helpful to have the ability to exchange knowledge with other agents. Agent-based programming offers greater flexibility and adaptability than component-based programming. Compared with object-oriented software engineering, agent-oriented software engineering uses a set of high-level, flexible abstractions to represent systems. Agents communicate with each other by passing messages or by synchronization. Depending on their functions, we can classify agents into several categories: personal agents, mobile agents, collaborative agents, etc.
Mobile agents are small software entities that can travel around the network, performing their functions on behalf of users. As the next generation middle-ware infrastructure for developing distributed applications, it meets all the requirements mentioned above. Since the mobile agents travel to the locations of the data sources and process the data locally, the network bandwidth consumption has been minimized. The built-in support for security, event notification, and agent collaboration can greatly improve the programming efficiency.
As shown in Fig. 1, the mobile agents can travel to devices that have mobile agent servers or lightweight transporters running. The mobile agent server can run on any platform where the Java runtime environment is available, and the devices without Java Virtual Machine (JVM) are supported through a communication node. The communication node can use any proprietary protocol to talk with the mobile devices. As long as the communication node has the mobile agent server running and exposes the communication functions to the agent through some programming interface, the agent can communicate with the mobile devices. Since the Java environment cannot cover the whole range of devices, the mobile agents need to know the programming interface in order to communicate with certain mobile devices.
Since the mobile agent software is built on Java platform, other functions supported by Java platform are also available for the mobile agents. Among them, the Java Database Connectivity (JDBC) interface to access database, the Remote Method Invocation (RMI) for distributed objects, and the Extensible Markup Language (XML) support are most notable.
IV. APPLICATION SCENARIOS
To reflect the distributed characteristic of the data sources, three computers are used to represent the enterprise maintenance system, the substation concentrator and the maintenance crew respectively as shown in Figure below.
The enterprise maintenance system may contain the maintenance history database, the RCM system, warehouse inventory system, and other information. The substation concentrator is in charge of collecting data from the sensors installed on the circuit breakers. Some analysis software will be running and a status report describing the circuit breaker operation can be generated. The maintenance crew uses a mobile computer to access the information and prepares report utilizing software mobile agents. The number of possible application scenarios is great. Only a few of them are given here to illustrate the benefits of utilizing the mobile agent software in the circuit breaker maintenance practice.
A. Information Storage and Retrieval
The mobile agent can help storing and retrieving all the information needed to perform maintenance or repair work. Mobile agent software supports accessing the data saved into heterogeneous systems. The information related to maintenance may be saved at heterogeneous databases and files. The heterogeneity may be reflected platform wise (differences in protocol, differences in format), concept wise (differences in schema and vocabulary, relative incompleteness), or both. Also, an information source may use a slow network connection, which means great network delays. And a source may only be operating part-time. Mobile agent software provides a framework to work in heterogeneous environments. At first, the Java platform is highly portable, which makes the mobile agent server run on a plethora of platforms. Java also has standard Application Programming Interface (API) to access data source in relational database and Extensible Markup Language (XML) files. Second, the mobile agent server will save the status of mobile agents, therefore providing reliable transmits on slow or part-time connected networks.
Agents can do sophisticated search and improve efficiency and scalability. The mobile agents travel to the location where the data is stored to do the processing and return with the final results only. In this way, the bandwidth consumption is minimized. Also, the mobile agents have abilities beyond using only the SQL or XML API. It can
utilize some heuristic knowledge to do more complicated search. What an agent can do is limited by its intelligence. For example, in a database table, a field is named “phone” instead of “telephone”. When an agent tries to search a field named “telephone”, it will fail. But if the agent knows the relationship between “phone” and “telephone” and it finds the “phone” field from the database metadata, it will try to use the “phone” field. Of course, the semantic may be different, but it is better to return some results to the user rather than just reporting “nothing found”.
Agents can help to make the location and format of the information transparent to users. In general, to utilize an information source, the users are required to know the types and location of the information. “Intelligent” agents should be able to comprehend the user’s requirement and automatically find the appropriate information and services. In a simple implementation, every agent has a knowledge base about the location of certain information and format, so the crew no longer needs to remember those trivial things. He/she can ask the agent to actually do something instead of how to do something. The knowledge about the information locations can be acquired via a central agency or mutual knowledge exchange among agents. Currently, all the information about locations are stored at a centralized place and the agent can populate its location knowledge base at first. After that, the agent could use its own knowledge base to retrieve the information.
When the location or format of an information source is changed, the central database will be updated. The agent will fail when it uses its own old copy of the knowledge base. In that case, the agent will consult the central agency again to update its knowledge base.
B. Creating Circuit Breaker Failure Reports
No maintenance activities can ensure no failure will happen. When a circuit breaker failure does happen, some immediate action and follow-up investigation are required. As an example, to file an IEEE Std-1325 compliant power circuit breaker failure report, the user needs to collect the circuit breaker information (manufacturer, type, voltage, etc.), the operating environment data, the description of the trouble, the effect of the failure, the single line substation diagram, the operation and timing sequence, line condition, oscillogram, etc. To gather all the information and compile a report may be time consuming and error prone, since the information may be distributed among different data sources and in versatile formats. Therefore an automated method is preferred.
A mobile agent makes an itinerary according to its knowledge about the location of information sources and will travel to each source and collect the information. The agent may also choose to delegate tasks to a bunch of second level agents and edit the last returned result from other agents. The second method may be faster when there are many sources for the software agent to visit. The software agent that helps the user to create the report will create and send out additional mobile agents to the distributed information sources.
sources for collecting the data. The collaboration mechanism provided by the mobile
agent software has been utilized to exchange information among agents. The agent may
also utilize some heuristic rules to help determine the possible causes of the failure.
The generated report is in Extensible Markup Language (XML) format and contains
only the necessary information about the failure report. After giving the corresponding
Extensible Style sheet Language (XSL) style sheets to define the appearance, the reports
of different formats (e.g. HTML or PDF) can be automatically generated using XSL
Transformation (XSLT) tools from a single data set. Using XML to represent the report
separates the content from the presentation, which makes the data more accessible and
exchangeable. In fact, XML has been selected as the proposed standard to exchange real-
time system information among control centers. By saving only the content, the storage
space is also optimized.
C. Circuit breaker monitoring
The distributed event mechanism is helpful in monitoring the status and events of
circuit breakers. The user can select the event of interest to monitor. Once the monitoring
starts, the selected events will be registered with the mobile agent server running on the
corresponding substation concentrator. The concentrator can get the real-time
information about the circuit breakers by communicating with sensors, and it can notify
the user when the selected type of event happens.
D. Security Consideration
Two apparent security problems arise when applying mobile agents. First, the mobile agents need to be authenticated and authorized at the servers. Second, to ensure the integrity of the data, it must be transmitted in secure communication channels.
Every mobile agent must be authenticated at first to identify whom it represents. Secure Agent supports user authentication by using the username/password pairs. Once identified, mobile agents can be checked against the security policy to see whether they are authorized to do certain things at a server. The Administrator tool provides a user-friendly interface for the server security and service management.
The mobile agent server can control agent's access to resources depending on both the user identification and server permits. A user can be created for each maintenance crew. Alternatively, a maintenance crew group can be used to represent all the crews. In Figure 5, a mcrew group has been created, which has two members: jack and janet. The mcrew group has the permissions to access maintenance-related services.
The user interface to assign different types of permissions to users or groups is provided. The permissions are divided into different groups for agent, class, event, file, etc. For example, the permission in the agent group decides if an agent with the user/group's identity can arrive or be launched to/from this server. The file group permission determines whether a user or group can access the local files.
Secure communication channels among the mobile agent servers may become important, especially when the data passes through the public network or wireless channels. The mobile agent software provides options to encrypt the data when it is in transit, and thus prevents others tampering with the data. A digital envelope will be used to protect *Sealed Agents* when travelling.
All the above security measures are supported by the mobile agent software directly and thus greatly simplify the programming work. There are some other security-related features provided. As an example, the mobile agents can work with firewalls, which is important when accessing the company Intranet from the outside public Internet is needed.
**E. Logging and Experience Sharing**
Software agents may help in logging and sharing maintenance experience. In the maintenance and repairing industry, experiences are often extremely important. Having an effective way to accumulate and distribute the experiences will help avoiding common mistakes and improving work efficiency. Currently, most of the experiences are still passed and shared by word of mouth, therefore some good experiences may be lost with years and personnel changes. The quality of this kind of experience sharing depends on the willingness of the people to share experience and their expression abilities. It is also difficult to search a solution for a given problem, since it is hard to know who has the
corresponding experience. Agents may help in recording the maintenance process and converting it into some standard format (e.g. XML with standardized schema). The recorded experience can be saved in a case-based reasoning system for future retrieving.
**F. Cost-benefit analysis**
Agents can assist users in performing some standard analysis. For example, to determine whether in-service monitoring of a circuit breaker is satisfied, a cost-benefit analysis based on the risks and investment return may be processed automatically. In this decision making sequence has been recommended, which consists of three stages. Some historical data will help the user to give a more appropriate score for assessing each failure mode.
**G. Other**
Some other applications of agent technique have been proposed. For example, agents can improve the usability of some software by providing a friendly user interface with ability of speech recognition and synthesis.
V. SUMMARY
Utilizing mobile agent techniques, the convenience and speed of accessing and generating maintenance data will be improved. With increasing complexity of circuit breaker equipment, more information is needed to perform the maintenance work, so an efficient data communication exchange becomes very important. Mobile agents can process information locally with less network bandwidth consumption. Also its ability to provide reliable transmission on disconnected or low quality networks is an important merit.
Mobile agent computing easily facilitates the integration and automation of the maintenance process, starting from the generation of work orders to the completion of the maintenance report. The entire process may involve multiple entities and steps. Thus, an automated procedure is highly desirable. With the ability to collaborate and update agent itineraries adaptively, the mobile agent software provides an ideal framework for modeling and supporting the maintenance workflow.
Furthermore, mobile agent computing also provides the means by which real-time and off-line data can easily be integrated into a single distributed maintenance management system. Using real-time monitoring data, the maintenance system can evaluate the condition of devices, which makes predictive maintenance possible. The integration of real-time data may be utilized to improve the maintenance management
decisions. The platform independence feature of mobile agents provides interface and support for great variety of devices.
VI. CONCLUSIONS
In this paper, mobile agent software has been applied in circuit breaker maintenance. Several representative application scenarios have been described. Mobile agent software may be suitable for applying in circuit breaker maintenance practice due to its support for heterogeneous systems, security, distributed events, low-bandwidth usage, etc. Using the mobile agent software, the development work can be greatly simplified. Also, agent-based software architecture makes the application more flexible and upgradable.
VII. REFERENCES
1. Improving Circuit Breaker Maintenance Management Tasks by Applying Mobile Agent Software Technology - M. Kezunovic
3. Mobile Agent Software Applied in Maintenance Scheduling - X. Xu
5. Circuit Breakers - Power System by V.K. Mehta
|
{"Source-Url": "http://docshare04.docshare.tips/files/21056/210563368.pdf", "len_cl100k_base": 4451, "olmocr-version": "0.1.53", "pdf-total-pages": 22, "total-fallback-pages": 0, "total-input-tokens": 34095, "total-output-tokens": 5258, "length": "2e12", "weborganizer": {"__label__adult": 0.0008797645568847656, "__label__art_design": 0.0006132125854492188, "__label__crime_law": 0.0008563995361328125, "__label__education_jobs": 0.0012531280517578125, "__label__entertainment": 0.00019562244415283203, "__label__fashion_beauty": 0.00040531158447265625, "__label__finance_business": 0.0014467239379882812, "__label__food_dining": 0.0006442070007324219, "__label__games": 0.0014944076538085938, "__label__hardware": 0.05511474609375, "__label__health": 0.0010938644409179688, "__label__history": 0.0004298686981201172, "__label__home_hobbies": 0.0010204315185546875, "__label__industrial": 0.045318603515625, "__label__literature": 0.00030350685119628906, "__label__politics": 0.0003521442413330078, "__label__religion": 0.0010747909545898438, "__label__science_tech": 0.24169921875, "__label__social_life": 0.0001233816146850586, "__label__software": 0.1956787109375, "__label__software_dev": 0.4462890625, "__label__sports_fitness": 0.0006952285766601562, "__label__transportation": 0.0025424957275390625, "__label__travel": 0.0003974437713623047}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25630, 0.00275]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25630, 0.64168]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25630, 0.9013]], "google_gemma-3-12b-it_contains_pii": [[0, 568, false], [568, 2033, null], [2033, 3471, null], [3471, 5037, null], [5037, 7176, null], [7176, 8635, null], [8635, 9470, null], [9470, 10920, null], [10920, 11717, null], [11717, 12068, null], [12068, 12323, null], [12323, 13024, null], [13024, 14521, null], [14521, 16331, null], [16331, 17692, null], [17692, 19172, null], [19172, 20689, null], [20689, 22144, null], [22144, 23101, null], [23101, 24512, null], [24512, 25171, null], [25171, 25630, null]], "google_gemma-3-12b-it_is_public_document": [[0, 568, true], [568, 2033, null], [2033, 3471, null], [3471, 5037, null], [5037, 7176, null], [7176, 8635, null], [8635, 9470, null], [9470, 10920, null], [10920, 11717, null], [11717, 12068, null], [12068, 12323, null], [12323, 13024, null], [13024, 14521, null], [14521, 16331, null], [16331, 17692, null], [17692, 19172, null], [19172, 20689, null], [20689, 22144, null], [22144, 23101, null], [23101, 24512, null], [24512, 25171, null], [25171, 25630, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25630, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25630, null]], "pdf_page_numbers": [[0, 568, 1], [568, 2033, 2], [2033, 3471, 3], [3471, 5037, 4], [5037, 7176, 5], [7176, 8635, 6], [8635, 9470, 7], [9470, 10920, 8], [10920, 11717, 9], [11717, 12068, 10], [12068, 12323, 11], [12323, 13024, 12], [13024, 14521, 13], [14521, 16331, 14], [16331, 17692, 15], [17692, 19172, 16], [19172, 20689, 17], [20689, 22144, 18], [22144, 23101, 19], [23101, 24512, 20], [24512, 25171, 21], [25171, 25630, 22]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25630, 0.05435]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
150f8ca3999c0e24ac983018a7241d10f5c125f5
|
Abstract—Service Oriented Architecture (SOA) is a framework to enhance the use of existing web service into the application. Business Process Execution Language (BPEL) is an XML language used for web service composition and implementations. However, BPEL exhibit some limitations in providing security in web service composition. Companies will get satisfied only if their requirements of authentication, confidentiality and integration are fulfilled. In this paper, the main process is to weave security dynamically as an aspect in AO4BPEL, an aspect oriented extension to BPEL. Here, the Service Oriented E-Vehicle system is taken as an example for implementing security aspect during runtime.
Index Terms— SOA, AO4BPEL, AOP, WS-Security.
I. INTRODUCTION
Web service composition [1] is a collection of web services that are published by various service providers. The services are composed according to clearly defined rules in such a way that they provide us with a more complex service. The composition languages follow valid work-flow rules, whereby they are defined as a work flow process. These rules determine the web services that participate in the composition and the order of control-flow. The data is transferred between the process activities (data-flow).
Aspect Oriented Programming (AOP) aims on enhancing the modularity and maintainability of the system, and consequently influencing the evolution of the software. It helps in separation of core concerns and cross-cutting concerns. AOP provides separation of cross-cutting concerns by introducing an aspect that cross-cut the other module. AOP is a programming methodology that is part of the overall paradigm of Aspect Oriented Software Development. It is possible to introduce AOP in Business Process Execution Language [2] to overcome the drawback of capturing web service composition in a modular way and for dynamic change.
An Aspect Oriented Extension to BPEL (AO4BPEL)[2] is the most popular language for web service composition which has capability to accept widely in the industry. It captures web service composition in a modular way and the composition becomes more open for dynamic change. BPEL allows the specification of interactions among the web services that participate in the composition according to various control-flow patterns. It provides three activities for web service interaction: <invoke> for invoking an operation on a partner web service, <reply> for sending a response to a client, and <receive> for blocking until a client request is received. These activities produce a message-based interaction between the composite web service and its partners. They also specify the functional logic of a composite web service. These activities are done in XML process such that it affects the composition logic, these XML file contains the above said interaction process. However, beyond the functional aspects, non-functional composition properties such as security, reliability, and quality of service are of importance for web service composition languages to keep their promises.
In this paper, we present a framework for integrating the specification of security features into the specifications of web service compositions. The proposed framework consists of security service, a process and a deployment descriptor. The security service is an aspect based web service that provides operations to secure the interactions of the BPEL[3] process with its partners and clients. This service provides functionality that is used by the web service orchestration engine to authenticate, give integrity, prevent repudiation, or make BPEL activities confidential. Apache ODE (Orchestration Director Engine) software executes business processes written following the WS-BPEL standard. It talks to web services, sending and receiving messages, handling data manipulation and error recovery as described by your process definition. It supports both long and short living process executions to orchestrate all the services that are part of our application.
WS-BPEL (Business Process Execution Language) is an XML-based [4] language defining several constructs to write business processes. It defines a set of basic control structures like conditions or loops as well as elements to invoke web services and receive messages from services. It relies on WSDL to express web services interfaces. Message structures can be manipulated, assigning parts or the whole of them to variables that can in turn be used to send other messages.
The security process which wraps BPEL processes provides transparent security the users of the composed web service. The process container is implemented as a set of aspects that are specified in AO4BPEL, which is an aspect-oriented extension to BPEL. The XML-based deployment descriptor specifies the requirements of the process activities along with the function of security features.
Signcryption security [5] technique is implemented and dynamically weaved in a SOA application. This security service is non-functional component that can be plugged or unplugged during runtime. During process deployment time, the deployment descriptor file has to be specified in addition to the BPEL configuration.
The remainder of this paper is organized as follows. Sec.2 explains the System Architecture of E-vehicle System. In Sec. 3, we briefly describes the scope of aspect service in E-vehicle System. Sec. 4 gives a short overview of Security framework design and its implementation. We conclude the paper and outline areas for future work in Sec. 7.
© 2014 IJEDR | Volume 2, Issue 1 | ISSN: 2321-9939
IJEDR1401055 International Journal of Engineering Development and Research ( www.ijedr.org)
II. SYSTEM ARCHITECTURE
The web services that are created initially are composed in the BPEL process using the work flow model and executed in Apache Orchestration Directory [6] engine. The additional functionality implemented in services can be modeled as an aspect and weaved into the existing BPEL process [7]. This is achieved using run time using aspect weaver. The Aspect Oriented BPEL process is created, providing an efficient modularization of core concerns and cross-cutting.
Fig. 1. System Architecture.
It also supports changes of composition logic during the run time of the BPEL [8] process. The AO4BPEL process is done by the promoters in web service composition, they will also work with public relations personnel to increase public awareness of the company and the products it offers. Often, there are a number of projects in operation simultaneously, with some promotions being long-term and general while others are short-term and specific. The work-flow for implementing this task, is clearly depicted in Figure 2.
Fig 2: Work flow for implementing the task
Dynamic reconfiguration of web service is implemented in an Aspect Oriented Environment. In Service Oriented E-Vehicle system the offered web services are composed according to the promoters need. The additional functionalities or new services are implemented as an aspects and weaved at run time, this information can be obtain in the registry where the promoters add aspects based on the needs of the Fig. 2. Work flow for implementing the task consumers. They compose web service along with the aspect dynamically in the composition [9]. The woven aspects can be unwoven at any time by the promoters based on the changes in the work-flow process.
Adding Security features in a SOA application, Where security is a non functional components which may not needed in this application even though some clients needed this Security web service so this service is provided with Aspect features.
III. APPLICATION SCENARIO
The effective communication between customer, mechanic and service station plays a vital role for providing good vehicle service for the customer vehicle. Currently the communication mechanism is based on an elaborate paper work which is not efficient in the modern era of electronic book keeping for processes. The problems with this kind of communication mechanism are,
- Service station may not understand the parts written by the Mechanic.
- The mechanic may not have clear history information about the service of the vehicle done.
- If anyone who knows the URL of the service or finds it in a UDDI registry they can place orders or perform bank transfers.
In order to provide the effective communication between the trio (customers, station, mechanic), the concept of Service Oriented e-vehicle system is introduced to the domain.

**Fig. 3. Web Service Composition in E-Vehicle Service.**
In the system, the registration, appointment, availability of services, billing are all done electronically. An UniqueID is generated for each customer, who is registering his credentials with the system. Security is provided by extending the features of WS-Security and WS-Trust protocols to set of services offered in the proposed system. Security features are modeled as an aspect and dynamically recomposed during the runtime of the application.

**Fig. 4: Aspect based Service Oriented E-Vehicle Service system**
The task involved in the project are, web service creation, web service composition and deployment of aspects. These tasks are implemented using specific modules in the application.
**Web Service Creation**
The following are the list of services that were created for e-vehicle service system:
a) RegistrationWS - This Service registers the customer details and provides unique Id to the customer.
b) NeededPartsWS - This service helps the mechanic to suggest the parts to the customer and sends back control to the Repair Service. It also returns the payment made by the customer to mechanic.
c) SurveyPartsWS - This service returns the best Manufacturer for the given parts.
d) StockVerificationWS - This service checks the availability of the given parts and returns the status (available or unavailable in stock) of the parts.
e) OrderIdWS - This service returns the ordered which helps in transferring the control to the new branch.
f) RepairINWS - This service generates the rate for each of the parts suggested by the mechanic.
g) PrepareInvoiceWS - This web service obtain the payment data from the mechanic and list from the Repair Service. Based on the list and needed parts obtain from the mechanic, a bill is generated and calculated as a payable amount by the customer.
h) VehicleAdminWS - This sends notification regarding the retrieval of the Net Pay by the Service Station.
**Web Service Composition**
BPEL is used in composing the web service and deployed in Apache Orchestration Directory Engine. In this project, composition is done namely E-RepairService and E-VehicleService. The services used in the composition of E-RepairService are,
- StockVerificationWS
- OrderIDWS
- SurveyParts
- RepairInWS
The Service involved in E-VehicleService are
- NeededpartsWS
- RepairInWS
- SurveyParts
- PrepareInvoiceWS
- E-RepairService
- VehiclesAdminWS
**Deployment Of Aspect**
Apache ODE is considered for weaving the aspect into BPEL. In order to provide runtime process change, the security functionality is injected in the result of the OrderWS. It can be (un)plugged at any point of time without modifying existing BPEL process.
**IV. Scope Of Aspect Service In E-Vehicle Service System**
For the security requirements of web service composition, we consider e-vehicle service as an example scenario. The service providers define a BPEL process for the purpose of composing set of basic services. The process orders items from the supplier and consequently invokes the bank’s payment web service to pay the transaction amount. The operations of the partner web services require authentication since it is not acceptable that anyone who knows the URL of the service or finds it in a UDDI registry can place orders or perform bank transfers. The supplier and bank web services must be accessible only to business partners with appropriate credentials. This means that the web service composer has to know the security policy of the partner service before writing the BPEL process that invokes the partner service.
With authentication mechanisms, the partner web service can be sure of the identity of the caller. The next step is to decide what the caller is allowed to do, which is the focus of authorization. Furthermore, it is also important that the factory which passed the order to the supplier cannot deny having done so (non-repudiation) and that nobody can claim the misuse of identity. Digital signatures and signature verification can be used for implementing the security. A further requirement is data integrity; both parties need appropriate support for integrity, i.e., if the factory orders one hundred items then the security infrastructure must make sure that nobody can tamper with the data on its way to the target web service and change the order position. Appropriate security mechanisms [3] are also needed to avoid replay attacks, i.e., if some malicious third-party copies the message for ordering car parts from the registry and resends it later, then the order should not be accepted for the second time. This is usually implemented by means of timestamps. When invoking the bank’s payment web service, it is essential that nobody can see the sensitive information transferred from the composite service to that partner (confidentiality). Both parties have to negotiate and agree on the mechanisms (usually key-based encryption and decryption) used to ensure confidentiality.
The security requirements of BPEL process focused on the <invoke> activity during interaction of partner web service. The <receive> and <reply> activities have slightly different requirements on security specifications.
A BPEL orchestration engine that executes the <receive> activity waits until a client request matching that <receive> arrives and then starts the process interpretation. From the client’s perspective, the composite web service looks like any other web service with its WSDL interface. From the security perspective, it should be clear who is allowed to invoke the composite web service, what kind of authentication, encryption, or signing mechanisms are required by the composite service. The security policy of the composite web service can be expressed using the WS-Policy specification; this policy will have to be enforced by the orchestration engine. Unfortunately, current BPEL engines do not yet support policies. For the <reply> activity, the orchestration engine on which a composite web service runs has to secure the response as required by the security policies of both the composite web service and its client. The security framework should make sure that the response is encrypted if the client requires an encrypted response and that the orchestration engine supports one of the encryption algorithms specified in the client policy.
The issues mentioned so far are not covered by BPEL. There is no means to express the security capabilities and requirements of the process or of a given activity. However, this is not a limitation of BPEL itself because non-functional concerns should be addressed by other specifications for a better separation of concerns and for more modular composition specification. If we extend BPEL with new constructs for each non-functional concern of the composition, it would evolve into a very complex language, which in turn would limit it acceptance. Furthermore, mixing the specification of the core logic of the composition with specifications of security features and other non-functional concerns into one unit would make the composition specification too complex and hard to maintain and evolve.
In this paper, we present a framework for securing web service compositions based on AOP. The logic needed to ensure security features is plugged into the composition logic using a set of aspects. These aspects are generated from a generic aspect library at deployment time according to the deployment descriptor, which specifies the security requirements of BPEL activities along with the required security parameters such as keys and certificates. A web service is used by the orchestration engine to add security to certain <invoke>, <reply>, and <receive> activities. The integration of the BPEL process and the security service is tackled by the generated aspects.
Composition with BPEL
BPEL is a workflow-based web service composition language. It specifies the composition as a process, which declares the web services participating in the composition (partners), data containers (variables), and a set of activities with specific patterns of control and data flow. The building blocks of BPEL processes are activities. There are primitive activities such as <invoke> and <assign> and structured activities such as <sequence> and <flow>. Structured activities manage the order of execution of their enclosed activities. BPEL processes can run on any BPEL-compliant orchestration engine. The engine orchestrates the invocations of the partner web services according to the process specification. For illustration, we present a skeleton of the BPEL process that corresponds to the car manufacturer scenario through the code given below. We omitted some unessential constructs due to restriction of space in the page.
```
<process name="OrderProcess"/>
<partnerlinks>
<partnerLink name="supplier"/>
<partnerLink name="bank"/>
<partnerLink name="factory"/>
</partnerlinks>
<variables>
<variable name="clientrqst" messageType="orderInMT"/>
<variable name="clientrspse" messageType="orderOutMT"/>
<variable name="payrequest" messageType="payInMT"/>
</variables>
<sequence name="Main">
<receive partnerlink="factory" operation="order" variable="orderqst" createInstance="yes" />
<invoke partnerlink="supplier" operation="putOrder" inputvariable="supplyrequest" />
<invoke partnerlink="bank" operation="pay" inputvariable="payrequest" />
<reply partnerlink="factory" operation="order" variable="clientrspse" />
</sequence>
```
WS-Security and WS-Policy
Signcryption[5] is a new paradigm in public key cryptography that simultaneously fulfills both the functions of digital signature and public key encryption in a logically single step, and with a cost significantly lower than that required by the traditional signature and encryption approach. It uses two Schemes Digital Signature Public Key encryption it uses RSA cryptosystem where the Signcryption costs on average 50 percent less in computation time, 91 percent less in message expansion. This Signcryption Algorithm [5] can be implemented using: ElGamals Shortened Digital Signature Scheme.
Schnorr Signature Scheme ensures that the message sent must not be forged and the contents of the message are confidential and non-repudiation Computation involved when applying the Signcryption. Unsigncryption algorithms and communication overhead is much smaller than signature-then-encryption schemes Receiver can only obtain the message m by decrypting it using his private key Xb Any changes he makes to the message m will reflect in the next step of Signcryption one-way keyed hash function on the message m will not match the value r. An attacker has all three components of the Signcrypted message: c, r, s. He still cannot get any partial information of the message m. The attacker have to also know receiver’s private key, p and q (known only to Sender and Receiver).
Implementation of AO4BPEL
The pointcut language of AO4BPEL is XPath. That is, XPath expressions are used to select the activities where the advice code should be executed. Point-cuts can span several processes. An advice in AO4BPEL is a BPEL activity that specifies some crosscutting behavior that should execute at certain join points. Like AspectJ [10], we support before, after and around advice. That is, the behavior defined in an advice can be executed before, after or instead a join point activity. The around advice allows replacing an activity by another. The activity of integrating aspects into base functionality is called weaving. A weaver is a tool that integrates a base program’s execution with aspects. In the case of AO4BPEL, the base program is the BPEL process. AO4BPEL supports dynamic weaving, i.e., aspects can be deployed or un-deployed at process interpretation time. We have implemented AO4BPEL as an aspect-aware orchestration engine for BPEL.
Cross-Layer Pointcuts
To implement the security framework, it was necessary to extend AO4BPEL with pointcut designators that in addition to capturing BPEL activities such as <invoke>, <reply>, also capture so-called internal join points. The later are points in the interpretation of an activity, i.e., points at the interpretation level rather than at the composition specification level. To implement security logic we need to capture execution points during the orchestration engine’s interpretation of a BPEL activity, where SOAP messages are sent/received. We need to modify a SOAP message before it is sent out in the course of interpreting certain BPEL activities invoke the security service.
V. SECURITY FRAMEWORK
In this section, we present the design and the implementation of our security framework.

The architecture shown in Figure 5 depicts the overall structure of the proposed security framework. The core components of our security framework are the deployment descriptor, the security service and the process container.
The XML-based deployment descriptor relates process activities to its security requirements and the necessary parameters. For example, the deployment descriptor could declare that any <invoke> activity that calls the operation pay of the payment web service requires caller authentication. The security framework deployment descriptor invokes the security header of the soap message in the BPEL process. Then the deployment information is send to BPEL engine through the interpretation function. This function generates a SOAP [11] message and sends the results to the security service. The SOAP message with the security header is send through the network and performs the action and given back to the BPEL process.
VI. CONCLUSIONS AND FUTURE WORK
In this paper, we presented a framework for securing web service compositions. The core components of this framework are the aspect-based process container, the security service, and the deployment descriptor. The process container intercepts some points in the process interpretation and plugs in to make BPEL interactions through messaging activities safer. The container is implemented as a set of aspects specified in the AO4BPEL. The deployment descriptor enables an easier usage of the framework and hides the implementation details from other users. The remainder of this project work involves, bringing security functionality as an aspect and encapsulated it as a separate entity.
REFERENCES
|
{"Source-Url": "https://www.ijedr.org/download.php?file=IJEDR1401055.pdf", "len_cl100k_base": 4534, "olmocr-version": "0.1.53", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 18490, "total-output-tokens": 5541, "length": "2e12", "weborganizer": {"__label__adult": 0.00038814544677734375, "__label__art_design": 0.0003046989440917969, "__label__crime_law": 0.0005993843078613281, "__label__education_jobs": 0.0002532005310058594, "__label__entertainment": 6.157159805297852e-05, "__label__fashion_beauty": 0.00013196468353271484, "__label__finance_business": 0.0003299713134765625, "__label__food_dining": 0.0003082752227783203, "__label__games": 0.0004925727844238281, "__label__hardware": 0.0008745193481445312, "__label__health": 0.00040984153747558594, "__label__history": 0.00017309188842773438, "__label__home_hobbies": 6.026029586791992e-05, "__label__industrial": 0.0004119873046875, "__label__literature": 0.00021016597747802737, "__label__politics": 0.0002639293670654297, "__label__religion": 0.00031447410583496094, "__label__science_tech": 0.022796630859375, "__label__social_life": 6.753206253051758e-05, "__label__software": 0.01021575927734375, "__label__software_dev": 0.9599609375, "__label__sports_fitness": 0.0002486705780029297, "__label__transportation": 0.000896453857421875, "__label__travel": 0.00017070770263671875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 25052, 0.01183]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 25052, 0.3905]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 25052, 0.90867]], "google_gemma-3-12b-it_contains_pii": [[0, 5716, false], [5716, 8093, null], [8093, 10298, null], [10298, 15860, null], [15860, 19612, null], [19612, 23928, null], [23928, 25052, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5716, true], [5716, 8093, null], [8093, 10298, null], [10298, 15860, null], [15860, 19612, null], [19612, 23928, null], [23928, 25052, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 25052, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 25052, null]], "pdf_page_numbers": [[0, 5716, 1], [5716, 8093, 2], [8093, 10298, 3], [10298, 15860, 4], [15860, 19612, 5], [19612, 23928, 6], [23928, 25052, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 25052, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
40480289306d88d3de1901ba595d95769d63986a
|
Eventizing Applications in an Adaptive Middleware Platform
Andreas Frei, Andrei Popovici, and Gustavo Alonso
Department of Computer Science
Swiss Federal Institute of Technology Zürich
CH-8092 Zürich, Switzerland
{frei, popovici, alonso}@inf.ethz.ch
ABSTRACT
Adaptive middleware is increasingly being used to provide applications with the ability to adapt to changes such as software evolution, fault tolerance, autonomic behavior, or mobility. It is only by supporting adaptation to such changes that these applications will become truly dependable. In this paper we discuss the use of event based systems as a platform for developing adaptive middleware. Events have the advantage of supporting loosely coupled architectures, which raises the possibility of orthogonally extending applications with the ability to communicate through events. We then use this ability to change the behavior of applications at run time in order to implement the required adaptations. In the paper we briefly describe the mechanisms underlying our approach and show how the resulting system provides a very flexible and powerful platform in a wide range of adaptation scenarios.
1. INTRODUCTION
Advances in hardware architectures and the widespread availability of wireless networks have radically changed the computing environments a software application must face during its active life time. A way to cope with the increasingly dynamic nature of the computing environments (e.g., mobile or pervasive computing) is to use adaptive software architectures. Of the several possibilities for implementing such architectures [9], event-based systems offer a wide range of advantages [11]. The main one is the high degree of decoupling between components. By using events as the way to communicate, components are independent of each other and can therefore change and evolve independently of other components. This property fits rather well with the need to cope with unexpected changes in the computing environment. One can treat changes as new types of events being thrown into the system and adaptation as the ability to dynamically react to or generate new events.
The type of changes and the range of adaptations we have in mind can be illustrated with a few examples. As a first example, consider changes in network technology that result in an increase in available bandwidth. Applications that today exchange few and very compressed events to avoid cluttering the network may have to cope later with a larger number of more complex events that are generated taking advantage of the additional bandwidth. Ideally, we would like older applications to be able to deal with the new stream of events without having to redesign or change them in a significant manner. One way to do this is to dynamically add a software layer that deals with the new events and acts as a translator for the older, less capable application.
A second example are changes in policy that may force applications to cope with new types of events not foreseen at the time they were designed. An automobile control system, for instance, may be required to generate an event when the combustion in the engine is less than perfect. The event can then be used to monitor the pollution level caused by the automobile. The ability to monitor the combustion is present in most modern cars but the software that turns the information into an event is probably not. Ideally, it should be possible to dynamically add the ability to generate such an event without having to change the software already installed in the automobile.
These examples are similar in that they require to dynamically extend already deployed and possibly already running applications. In this paper we outline an architecture that allows to eventize an application without a prior knowledge of events. An application designed and already running can be extended to publish relevant events and react to events produced by its surrounding infrastructure.
The architecture is based on the peer to peer concept of JXTA [10] which allows to discover and activate new application extensions. At activation time of the application extension the underlying dynamic AOP system based on PROSE [15] extends the running application with the new functionality. The dynamic AOP system is responsible for crosscutting the running application defined by an aspect inside the application extension. Once a crosscut occurs the application extension takes over the task for handling events like generating an appropriate event and publishing it. The underlying event system is then responsible for sending the event and handling the subscriptions.
The rest of this paper is structured as follows. Section 2
*The work presented in this paper was supported (in part) by the National Competence Center in Research on Mobile Information and Communication Systems (NCCR-MICS), a center supported by the Swiss National Science Foundation under grant number 5005-67322.
discusses the system’s architecture, including an explanation on how eventization of applications can be achieved with aspect-oriented programming and an event system. Section 3 gives some examples of how the adaptations can be done. Section 4 tests the viability of the approach we propose. Section 5 concludes with examples where the proposed architecture can be used.
2. SYSTEM ARCHITECTURE
2.1 Overview
Figure 1 depicts the basic architecture of the system. We assume there is an application running on the same JVM and that this is the application that will be adapted as needed. On top of the JVM, a first layer implements the basic resources of the system. These basic resources constitute the JXME peer to peer layer and include a platform for dynamic AOP. A second layer is devoted to the dynamic resources acquired at run time. As a first dynamic resource the event system is required which can be used by the application extensions to publish events and subscribe for events. Application extensions may include a dynamic AOP aspect and the behavior of the extension itself. The dynamic AOP aspect inserts crosscuts into the application at its activation. On the other hand the behavior of the extension is responsible for creating and publishing events and subscribing for relevant events and reacting accordingly.
The adaptations that we consider are of three generic types. The first type of adaptation consists of endowing an application with the ability to generate events (either because it did not generate any event at all or because it did not generate the necessary events). The second type of adaptation transforms applications into consumers of events (again, either because the application did not accept any events to start with or because it did not accept the necessary events). The third type of adaptation affects the loaded application extensions itself and therefore also the event system. This allows to extend dynamic resources itself for further changes.
The way the system works is as follows. The JXME layer gets informed when a new dynamic resource is available and loads the discovered application extension. When the application extension gets activated the AOP aspect is inserted into the AOP platform. The dynamic AOP platform intercepts the execution of the application and monitors their progress. Whenever it reaches selected points in the execution, the dynamic AOP platform redirects the execution to the appropriate application extension. Once the extension is executed, the control flow returns to the application that then resumes its work (exactly where and how work is resumed depends on the nature of the extension being executed).
The basic resources have been designed as a minimal required library when the application is started. The total size of this architecture is currently about 1 MByte.
2.2 Basic Resources
Dynamic AOP Support
The basics for adaptation in our architecture is dynamic Aspect Oriented Programming (AOP). Aspect-Oriented Programming (AOP) is a software design technique that allows the separation of orthogonal concerns within an application. These orthogonal concerns are then programmed as separate aspects rather than locating them in many different places in the code. The main advantage of AOP is precisely the possibility of abstracting out concerns that crosscut through the application (i.e., appear in many different places throughout the code). These aspects can then be treated as separate software modules, thereby increasing the modularity of the design. For instance, Zhang and Jacobsen argue that aspect-oriented re-factorization can enhance the modularity of middleware and reduce structural complexity.
An aspect defines what to do (e.g., invoke an additional method) when a particular point is reached in the code (e.g., when invoking methods with certain signatures, when modifying a variable, etc.). Conventional AOP uses a weaver to add the aspect code to the base code of a program at compile time, e.g., AspectJ. In dynamic AOP, the aspect code is added (woven) at run time by executing it whenever the specified point in the execution is reached. Aspects can also be dynamically withdrawn (unwoven) leaving the application in its original state.
In our system, we use PROSE as the platform for dynamic AOP. PROSE hooks into the JVM and intercepts method calls for the points in the execution where aspects are to be executed.
Figure 2 shows how PROSE is used to introduce extensions that deal with events. The figure assumes an application that is not aware of the events and depicts how extensions can be used to turn the application in a consumer and/or producer of events.
PROSE uses the debugger interface for the interception of method calls. Therefore the application has to be run on a PROSE enabled JVM. Figure 2 shows PROSE first as an extension to the JVM and second as a library inside the basic resources which is used by an application extension.
JXME Overview
The second basic resource required by our system is based on the peer to peer concept of JXTA. Since we are mainly interested in pervasive and mobile computing applications, we have chosen a decentralized solution unlike, e.g., Jini, where a centralized lookup service is responsible for distributing services. An independent lookup mechanism is a significant advantage in an ad hoc application scenarios where it is not feasible to have all the time a centralized server. Similarly, JXTA uses peer groups to combine peers
with similar services or behavior. As such, a peer may reside in different peer groups at the same time. We use this functionality to apply extensions to entire groups rather than to individual nodes. Being able to do so is a significant advantage when dealing with realistic scenarios where several dozen devices are involved in the interaction.
As JXTA has been built for a wired Internet peer to peer network it is much too heavy for smaller devices. As we require a discovery mechanism only in a wireless environment a striped down JXTA has been implemented. This new JXME implementation uses multicast to send out messages whereas it is listening at the same time for messages. This basic messaging allows to advertise services by a service provider. Any other node in the multicast domain may then discover such service advertisements and download the service from the provider. With this basic messaging we are able to download as a first service the event system.
By implementing the JXME layer directly on top of UDP we are independent of a remote procedure call infrastructure like RMI or CORBA. This leads to a smaller infrastructure and the possibility to extend the concept to further environments not based on the chosen RPC infrastructure.
2.3 Dynamic Resources
An application extension found through JXME is inserted as a jar file containing a manifest file for the extensions meta information. To allow dynamic resources to be unloaded again every application extension is loaded in its own classloader. After removal of an application extension its classloader gets garbage collected. Extensions can use classes from other dynamic resources by specifying in their manifest a dependency list to other extensions. This allows for example the AppExt 1 in Figure 2 to use the EventSystem extension. The activating class is specified through the Main-Class entry in the manifest. By taking Java’s jar specifications for a manifest to describe the application extension the extension can be activated inside JXME with the mainclasses main method.
The event system designed as a dynamic resource itself has to be loaded and activated by a node before eventization is required by application extensions. The application extensions are then able to use the event system to publish and subscribe to events.
Event System Implementation
The event system we use is based on a publish and subscribe model [4,12,6,7]: a producer node publishes an event and a consumer node may subscribe for this event and be notified of the occurrence of the event. TPS [2] is a type based publish and subscribe model built on top of JXTA [10,3]. In our implementation we use a similar approach to TPS. As we only rely on the JXME we have implemented our own event system on top of this discovery and messaging layer.
The event system interface is shown in the code example below. This is also the interface which can be used by the application extensions.
```java
interface Event {
1 void init(Message msg);
2 Message toMessage(Class class);
}
interface Filter {
4 boolean matches(Event event);
6}
interface EventListener {
8 void processEvent(Event e);
10}
interface EventSystem {
12 void subscribe(Filter filter, EventListener listener);
14 void unsubscribe(Filter filter);
16 void publish(Event event);
18}
```
The implementation of the EventSystem interface uses again the JXME messaging layer to send an event to other nodes. Before sending an event it gets serialized into the JXME message format which is then sent as a multicast datagram packet. On the other nodes the message gets deserialized into the event type. The subscription to events is done through an event filter following the type- and attribute-based subscription model introduced in [13,2].
As we are targeting a decentralized infrastructure each node manages its own subscriptions. A subscription is therefore done only locally and does not lead to any traffic on the wireless channel. On the other hand when publishing an event it has to be sent to all nodes where it has to be checked for a registered subscription.
With some specific filtering attributes we are able to mimic the peer group concept defined by JXTA. New application extensions or messages can then be published by including the peer group attribute in the event.
3. EXAMPLES OF ADAPTATION
3.1 Producers of Events
Transforming an application into an event producer requires two things. The first is to detect the application state that should lead to the generation of an event. The second is to actually publish the event using whatever event system is available. We deal with these two problems using a single extension.
Once the designer has identified when events are to be generated (e.g., after a variable has been updated, when a method is invoked, etc.), an AOP aspect is created that traps that particular situation. Beside the aspect the application extension also contains the logic necessary to publish the corresponding event. In Figure 2 step 1 indicates the...
points in the execution that are relevant for the generation of the events. When these points are reached, the extension is invoked and the event is passed on (step 2) to the underlying event system which will then publish it (step 3).
As an example, consider a calendar application running on a PDA. The following PROSE extension can be used to automatically generate an event for every meeting entry the user makes in the calendar. This way, other calendars can be synchronized and notifications being sent to other persons or applications (e.g., to reserve a meeting room).
```java
class AddMeetingEventsAsProducer extends
Pointcut {
// mechanism for publishing events
EventSystem eventSystem;
// definition of the pointcut where events should be fired
PointCutter pointCutter() {
return Executions.before().AND(Within.methods("*"));
}
void METHOD ARGS(MeetingDates mdates, REST arguments) {
Event e = new Event(MeetingDate);
eventSystem.publish(e);
}
}
```
This aspect defines the operation that has to be intercepted (line 1). As explained, it corresponds to state changes in the MeetingDates class. Line 2 defines the extension to be executed. This extension creates an event object with the information about the meeting and then calls the EventSystem component to publish the event.
3.2 Consumers of Events
Transforming applications into consumers is done by including consumer behavior in an application extension. The extension does two things. First, it subscribes to events of interest. Then, upon arrival of an event, it calls the corresponding method(s) of the underlying application. The procedure is the reverse of the one used for event producers. In Figure 2, when an event arrives (step 3), the event system notifies the extension (step 4) which will then invoke the methods in the application (step 5).
Following the calendar example, this extension can be used to automatically generate entries in the calendars of users as a result of an entry being made in a particular calendar. Such an extension looks as follows:
```java
class CalendarBecomesEventConsumer
implements EventListener {
EventSystem eventSystem;
eventSystem.subscribe(MeetingDateFilter, new CBEC());
void processEvent(MeetingDate received) {
add the date to the calendar
}
}
```
When the extension is inserted it subscribes in the initialization phase for MeetingDate events. When such an event arrives, the processEvent method is executed. This method inserts the date in the calendar and may notify the user of the new appointment or of potential conflicts.
4. BENCHMARKING
In order to test the viability of the approach, we have conducted a number of experiments with our architecture.
We test the different building parts of the architecture and the scalability of the whole architecture for messaging and the event system in a wireless environment. The overhead of the dynamic AOP system has been measured in [13].
The local benchmarks have been measured on a IBM Laptop A31 running Red Hat 9.0 which is our master laptop. For remote benchmark machines we used IBM Laptops R32 running Red Hat 7.3 with built in wireless cards. In a first benchmark we measured the behavior of the architecture on the same node and on two different nodes with a wireless connection in ad hoc mode. In the scalability benchmark we increased the involved nodes up to 9 and analyzed the behavior with various wireless parameters.
The measurements in the first benchmark have a standard deviation of less then 1%. In the scalability benchmark we achieved less then 5% deviation as during the access point test the access point has been used by other people.
4.1 Local, and 2 Node Benchmark
In the first measurement section the benchmarking parts of the architecture have been categorized:
PROSE/JXME As PROSE and JXME run as a basic resource in every node we measured the time of sending messages between two different nodes. In a first test the two different nodes were running on the same machine. The second test sent messages over a wireless connection in ad hoc mode.
Event System (ES) As the event system is used by any application extension we included an additional test to measure our type- and attribute-based subscription mechanism.
First, we analyze the overhead for the application when started with the basic resources without any application extensions running. As PROSE uses the Java Debugger Interface on older JVMs a significant performance overhead occurs as the whole application runs in debugger mode. With the newer JVM 1.4 debugging is possible with full speed and
with a lot of nodes sending messages with a small time interval. Switching the wireless ad hoc infrastructure mode to an access point improves the package loss for the 9 node infrastructure by 75%. A combination of access point mode and larger time intervals may scale better for more nodes.
4.2 Scalability Benchmark
In the scalability benchmark we analyze the behavior of having many nodes in a wireless infrastructure publishing events and subscribing for events. The infrastructure represents a star topology where the master sends events and the other nodes respond immediately. The system architecture remains the same in all tests but with increasing responding nodes. The master laptop sends out messages and events with a delay of 50 ms and 500 ms. Beside the round trip time the package loss is included in Figure 4. We also compared the access point (AP) infrastructure mode with the ad hoc (AH) infrastructure mode less node.
Figure 4 shows that with increasing number of nodes, the round trip time increases. As we measured in a star topology the master node becomes for every sent message as many messages as clients. The response messages are queued until the measuring thread is able to process them and stop the clock. When comparing the wireless infrastructure mode it can be seen that the round trip time in ad-hoc mode is in average 14% less than in access point mode. As in access point mode a message/event is sent over an access point it occurs no overhead when no aspects are inserted. The second basic resource, JXME, does not lead to a slow down of the application as no messages have to be processed.
Second, we measured JXME and the event system extension (Figure 5). It shows that the round trip time (RTT) of a basic message between two different JVMs on the same machine takes on JXME 3.5 ms. Whereas on two different machines and over wireless it takes 10 ms. When the event system is included an event round trip takes on the same machine in average 10.5 ms which is quite significant more compared to a basic message. This increase goes back to the serialization and deserialization steps of an event type into a basic message and finally on cost of the filtering mechanism involved in both JVMs. Finally, an event round trip over wireless in ad-hoc mode takes 13.7 ms.
5. CONCLUSION AND FUTURE WORK
In this paper we have outlined an adaptive middleware platform based on events. The platform treats events and the management of events as aspects that can be changed at run time in response to new requirements or necessary adaptations. We are currently in the process of completing the implementation of the platform and exploring more advanced forms of adaptation, including dynamic changes to the event management system itself. Furthermore the platform runs on resource constraint devices like iPAQs where the implementation is still to heavy and where we will take advantage of lighter virtual machines.
We are also using the first prototype of the platform in a variety of applications. One of them is a cooperating robots scenario where autonomous robots with wireless communication capabilities coordinate their movements through the exchange of events. Through the adaptive platform described in the paper, we can separate the software that controls the movement of the robot from the software that deals with events and the coordinated behavior. This is a significant advantage over existing designs as it greatly simplifies development, maintenance, and offers much more flexibility in terms of adaptation. The management of events and the coordinated behavior are treated as dynamic extensions and can be changed at any time. For instance, a set of extensions implement a train formation movement where all robots follow a leading robot. The leading robot is remotely controlled with a joystick and its movements are communicated to all other robots via events. These events are interpreted by extensions on each robot that control the movement of the robot as a function of its position in the formation and the movement of the leader.
The advantage of using this software architecture is that we can change at any time the extensions and completely modify the behavior of the system. As an example, the train formation extensions can be exchanged for a line formation extension where all robots move parallel to each other. The extensions can also be exchanged for new ones that implement more sophisticated behavior by generating new types of events such as robots randomly leaving the formation, speed control events, movement filters that prevent the formation from entering certain areas, etc.
6. REFERENCES
|
{"Source-Url": "https://www.research-collection.ethz.ch/bitstream/handle/20.500.11850/69777/eth-4776-01.pdf;jsessionid=959EC2F44C5A9DC7D06182B8302E78B7?sequence=1", "len_cl100k_base": 4797, "olmocr-version": "0.1.50", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 19905, "total-output-tokens": 6261, "length": "2e12", "weborganizer": {"__label__adult": 0.0002872943878173828, "__label__art_design": 0.00022232532501220703, "__label__crime_law": 0.00023567676544189453, "__label__education_jobs": 0.0002741813659667969, "__label__entertainment": 4.7087669372558594e-05, "__label__fashion_beauty": 0.00011169910430908204, "__label__finance_business": 0.0001302957534790039, "__label__food_dining": 0.00027251243591308594, "__label__games": 0.0003032684326171875, "__label__hardware": 0.0007762908935546875, "__label__health": 0.0003108978271484375, "__label__history": 0.00016224384307861328, "__label__home_hobbies": 4.9173831939697266e-05, "__label__industrial": 0.00025582313537597656, "__label__literature": 0.00014674663543701172, "__label__politics": 0.00019991397857666016, "__label__religion": 0.00031638145446777344, "__label__science_tech": 0.006984710693359375, "__label__social_life": 5.537271499633789e-05, "__label__software": 0.004772186279296875, "__label__software_dev": 0.9833984375, "__label__sports_fitness": 0.00023567676544189453, "__label__transportation": 0.00035834312438964844, "__label__travel": 0.0001857280731201172}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28098, 0.02506]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28098, 0.20939]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28098, 0.91563]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 4978, false], [4978, 10490, null], [10490, 15555, null], [15555, 20194, null], [20194, 25420, null], [25420, 28098, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 4978, true], [4978, 10490, null], [10490, 15555, null], [15555, 20194, null], [20194, 25420, null], [25420, 28098, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28098, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28098, null]], "pdf_page_numbers": [[0, 0, 1], [0, 4978, 2], [4978, 10490, 3], [10490, 15555, 4], [15555, 20194, 5], [20194, 25420, 6], [25420, 28098, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28098, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
7c69575df055caa9ffb4da8739ca513476d20d15
|
Abstract
This paper describes the architecture, design and implementation of the system for project development support and all additional modules we have used or developed to integrate into the CESNET infrastructure and to streamline its administration. The support currently includes using Redmine ticket tracking system and providing hosting of multiple GIT source code repositories and APT, RPM and TAR package repositories.
Keywords
Project development support, Redmine, Git, APT, RPM, repository hosting, Charon
Introduction
Motivation
A major part of CESNET’s work consists of research and development. There are many teams in many departments working in various fields. So far, each of these teams has had their own source code repository installed somewhere and had to manage it and take care of its security and backup. They
have usually used some kind of ticket tracking system different from the system used by another team. Thus, working in more than one team meant one had to learn to use more than one system. This approach was both time consuming and a waste of resources, so we came up with an idea to create a single system which would provide the teams with the ticket tracking and repository hosting services.
System requirements and system design goals can be summarized as follows:
1. Flexible ticket tracking system with following the key features:
1. Support for multiple projects, project hierarchy
3. Good administration interface with enabling delegating the responsibility for project management to project managers
4. Per project tools: wiki, roadmap, Git repository read access
2. Multiple Git repository hosting with good user access control
3. Multiple package (APT, RPM, TAR) repository hosting
**System overview**
Figure 1 below depicts the system architecture. Currently, there are five services provided by the system:
1. Redmine [3] project management and ticket tracking system
2. Git repositories [4]
3. APT package repositories
4. RPM package repositories
5. TAR package repositories

Those five services are accessible through three entry points. There is SSH, HTTP(S) and FTP(ES). SSH is used for authenticated read and write access to all Git repositories and it allows users to invoke restricted set of commands on the host system (see below for more information). HTTP(S) is used for read-only access to the public Git repositories and for read-only access to APT, RPM and TAR package repositories. Anonymous FTP is used for read-only access to APT, RPM and TAR package repositories. And finally, authenticated FTPES is used for write access to APT, RPM and TAR package repositories. Users upload their packages to the host system through this access point.
**Design**
**Redmine**
Redmine is a mature, stable, feature rich and easy to use project management and ticket tracking system chosen mainly for the following reasons:
1. Support for management of multiple projects, project hierarchy, public and private projects
2. Rich set of per-project tools: task management, time tracking, roadmap, forums, wiki
3. Read access to long list of VCS repositories including Git, possibility of referencing the source code in the repository from the wiki page or task description.
4. Clean user interface, powerful administration interface with possibility of delegating responsibility for project administration to project managers
5. Easy implementation of federated authentication
Installing Redmine system was pretty straightforward. The installation process for various platforms is well described in the official documentation [5]. The system deployment is depicted on Figure 2. We used Apache web server [6] for content serving. Since Redmine is written in Ruby, we used Phusion Passenger [7] as the actual application server and container. We use the MySQL database as a data persistent storage backend. Another possibility was PostgreSQL. Both are equivalent in performance and features for the Redmine's requirements and we opted for the one we are more used to.
The only tricky part during the installation was implementing the federated SAML user authentication. After some research we came across the *HTTP Authentication Plugin* [8], capable of authenticating the Redmine user using the environment variables previously set up by Apache web server authentication modules. Then it was an easy task to use Apaches’`mod_shib2` [9] authentication module and thus shift the responsibility for user authentication to Apache. Using this approach, we could get around the need to customize the Redmine’s source code so that the future system upgrades are easy and painless. The only customization we made was done in *HTTP Authentication Plugin* and it was cosmetic - we changed the string on the login button. The authentication through the local mechanism is still possible, but this option is reserved for administrator accounts only.
Redmine was modified to use federated SAML authentication. However all user accounts still need to be created in the local database because Redmine needs various account metadata for general system functionality. To simplify the administration task of having to look for all the necessary information and creating the account manually, we created a very simple registration page. When accessing this page, the user is first redirected to the WAYF (Where Are You From) server and then authenticated with a selected IdP (Identity Provider). Once the authentication is complete, the database is checked and non-existent account is automatically created in the Redmine’s database. It is however created as locked and system administrators are informed about this event. After reviewing the request, the administrator can manually unlock and enable the account from the Redmine’s administration interface.
**SSH publishing service**
The SSH service is used for authenticated read and write access to the Git repositories and for invoking a specific set of repository management related commands. Figure 3 depicts the SSH related part of the project support system architecture. We use a standard OpenSSH server [18].
SSH is the only (reasonable) way of securely accessing the remote Git repository for writing [10]. But SSH itself cannot restrict one user from writing into repository s/he should not have access to. The first obvious solution is using Linux file system permissions. It is an easy task to configure Linux file system permissions provided the groups of users that should be able to access two different repositories are disjoint. Once the group members begin to overlap, a configuration nightmare begins. The first solution is to use the extended file system attributes. This approach, however, does not solve the bigger issue with SSH access: users have access to the shell and can potentially harm the system.
More elegant solutions are the Gitosis[10,12] and Gitolite[10,13] projects. Those systems are configured to be executed instead of the login shell after a successful SSH authentication, deny interactive usage of SSH and restrict the set of commands user may remotely invoke on the host system. We tested the Gitosis utility for a while, but in the end we ended up developing our own solution which is not only Git specific like Gitosis or Gitolite and enables users to execute a defined set
of arbitrary commands. This resource access layer is called \textit{charon} and is described in detail in a separate technical report called \textit{Charon – Resource Access Layer} \cite{charon}.
But let us start from the top of the architecture scheme. Security was our big concern, so the first line of defence is using the \textit{AllowUsers} directive in OpenSSH server`s configuration file. After a successful authentication, \textit{charon} is executed instead of the login shell. It works exactly as Gitosis, it forbids users to run in the interactive mode and expects the command name to be passed as argument. \textit{Charon} then looks into the ACL (Access Control List) and verifies whether the user is allowed to execute the given command. If yes, the command is executed on behalf of the user, otherwise the user`s request is denied. A part of the ACL configuration is performed by a text configuration file, but following data is also pulled directly from Redmine`s database:
1. List of projects with assigned Git repository
2. List of user accounts
3. List of groups and their members
4. Roles of users or groups within the projects
User roles from Redmine are mapped to the Charon access types and Redmine account names are mapped to the local system accounts. All ACL configurations are finally mixed together. For more information about \textit{charon} please refer to the technical report \textit{Charon – Resource access layer}.
To secure access to the git repositories, \textit{charon} controls the execution of the following binaries:
1. \texttt{git-upload-pack} and \texttt{git upload-pack} (reading from the repository)
2. \texttt{git-receive-pack} and \texttt{git receive-pack} (writing to the repository)
Another advantage of using \textit{charon} as proxy layer is that it does some translations and substitutions for the user. For example, following two lines are equivalent, currently fully functional and point to the same repository, but the latter makes use of the translation feature, is more convenient for the user and hides the actual repository location from him/her:
\begin{verbatim}
1: git clone [user]@[server]:/var/reps/git/test.git
2: git clone [user]@[server]:test
\end{verbatim}
Besides restricting access to the Git related binaries, we also allow users to execute several other utility scripts:
\begin{verbatim}
1: ssh [user]@[server] ssh-key
2: ssh [user]@[server] status
3: ssh [user]@[server] apt-update
4: ssh [user]@[server] rpm-update
\end{verbatim}
By using the command on line 1, the user may upload and set public SSH key to his/her account. The second command allows the user to view the list of all resources s/he has access to. The last two commands are used to regenerate the APT and RPM repository metadata after uploading new packages. For more details see appropriate chapters below.
\textbf{HTTP(S) publishing service}
The web service is used to access the Redmine interface, for unauthenticated read access to public Git repositories and for unauthenticated and authenticated access to APT, RPM and TAR package repositories. Figure 4 depicts the HTTP(S) related part of the system architecture. As stated before, we
are using the Apache HTTP server.
For security reasons, the Apache web server is configured to redirect plain HTTP to secure HTTPS. The access to private Git repositories is blocked in the Apache configuration file. To prevent the administrator from making the mistakes by manually editing the configurations, all necessary configurations are generated by the charon-admin administration utility (see the chapter Charon-admin utility below for more information).
**FTP(ES) publishing service**
The anonymous FTP service is used for read access to public Git repositories and to all APT, RPM and TAR package repositories. Authenticated FTPES (FTP over explicit SSL) is used for write access to all package repositories; users may upload new packages to the server through this entry point. Figure 5 depicts the FTP(ES) related part of the system architecture. We chose vsftpd[14] as FTP server, because...
we have been using it successfully in our other systems.

**Figure 5: FTP(ES) publishing service scheme**
Again, **vsftpd** configuration files for anonymous and local users are generated by the **charon-admin** administration utility (see the appropriate chapter below for more information). We are using the **deny_file** and **hide_file** configuration directives for restricting anonymous and local users from accessing repositories they should not have access to. This is the only way. When this article was written, **vsftpd** was not able to grant access selectively to a part of the file system tree only. Another tricky part was how to restrict directory access for local users. **Vsftpd** is capable of using per-user configuration files. Those files must reside in the `/etc/vsftpd/` directory and the name of the configuration file must be the same as the name of the user account. All configurations in the user files take precedence before the configurations in the general config file. To restrict the user from wandering through the filesystem we used **local_root** directive and locked him/her in the home directory. However, this **chroot** operation forbids the user from accessing the repositories stored in the `/var/reps` file system subtree and we had to find some solution. Our current solution is to use the bind feature of standard Linux **mount** utility:
mount --bind /some/source /some/destination
By invoking the above command, one can mount local folder /some/source to another local folder /some/destination. The charon-admin administration utility takes care of mounting and unmounting the repositories from user home directories (see the chapter Charon-admin utility below for more information).
Charon-admin utility
Charon-admin is a part of the Charon package and is described in detail in a separate technical report called Charon – Resource Access Layer [19]. We use this tool to consolidate all administration scripts and hide them behind a single interface. Below is the list of available charon-admin commands:
1: charon-admin repository git create repository_id
2: charon-admin repository apt create repository_id gpg_key
3: charon-admin repository rpm create repository_id gpg_key
4: charon-admin repository tar create repository_id
5: charon-admin repository git|apt|rpm|tar repair
6: charon-admin view allowed [resource | -] [permission_type]
7: charon-admin view rights [account | -] [permission_type]
8: charon-admin do apt-update|rpm-update
9: charon-admin user create login alias Full Name
10: charon-admin user delete login
11: charon-admin ssh key add login
12: charon-admin service http|ftp|ssh|all start|stop|restart
We created six charon-admin modules to facilitate the administration tasks. The first module called repository provides commands for repository management related tasks (lines 1-5 in the example above). So far there are commands for creating all repository types and for repairing corrupted file system permissions. This module is responsible for creating all necessary file system objects. The second module is called view (lines 6-7 in the example above) and using this tool administrator can check the ACL for specific user or resource. S/he can check which users have what type of access to a particular or all resources. Or s/he can check what resources are accessible to a particular user. The do module (line 8 in the example above) provides commands for regenerating APT and RPM repository metadata and resigning the packages with the GPG key after new packages have been uploaded successfully. This module is rarely used by the administrator, but users themselves execute this script via SSH and charon once they have uploaded the packages, as mentioned in chapter SSH Publishing Service. Next, the user module (lines 9-10) is used to create local user accounts. The ssh module (line 11) is just a shortcut tool for easy setting some user’s public SSH key. Finally, the service module (line 12) can be used to start, stop and restart publishing services. When the service is started or restarted, the appropriate configuration files are first regenerated using the Charon’s ACL configurations.
Git repositories
Git is a very fast and popular distributed version control system. We use it to aid developing many projects. On the project support system, we can host multiple Git repositories and define very fine access restrictions for specific repositories. There is a possibility of having either public or private repositories. Both of these types are accessible for reading and writing to a defined set of users via SSH. Public repositories are also accessible for reading via HTTPS and anonymous FTP. Configuring all three (SSH, FTP, HTTPS) publishing services consistently is done by the charon-admin tool which generates the appropriate configurations based on the ACLs defined in Charon.
APT, RPM and TAR package repositories
APT repositories
APT package repositories are created and managed using the reprepro [15] tool. There is a great tutorial [16] in the Debian wiki documentation on using this tool which we followed during the implementation. But we automated and simplified the creation process by creating the charon-admin module to generate the necessary file system tree objects and configuration files for us. The directory structure looks as follows:
```
/var/reps/apt/repository_id:
-rw-r--r-- root root APT-GPG-KEY-repository_id
drwxr-xr-x root root conf
-rw-r--r-- root root distributions
-rw-r--r-- root root options
-rw-r--r-- root root override.squeeze
drwxrwxr-x charon charon db
drwxrwxr-x charon charon dists
drwxrwxr-x charon charon incoming
drwxr-xr-x root charon sid
drwxrwxr-x root charon squeeze
-rw-r--r-- root root repository_id.list
-rw-r--r-- root root repository_id-unstable.list
drwxrwxr-x charon charon pool
```
Directory conf contains configurations for reprepro. This configuration describes the repository structure, its components, architectures, distribution codename and suite (stable, unstable), GPG key for signing packages etc. Users upload their packages to the appropriate subdirectory of the incoming directory. Once all packages have been uploaded, the user must invoke the apt-update command via SSH. This wakes up the processing daemon. All packages will then be read, signed and moved to appropriate places within the repository structure. The repository root folder also contains GPG key and prepared sources.list files for the APT package manager. From the user point of view, using the repository is the matter of simply invoking the following commands as root:
```
wget -0 /etc/apt/sources.list.d/[reponame].list
https://server.domain/apt/[reponame]/[reponame].list
wget -0 https://server.domain/apt/[reponame]/APT-GPG-KEY-[reponame] | apt-key add -
```
aptitude update
**RPM repositories**
RPM package repositories are created and managed using the `createrepo` [17] tool. `Createrepo` is not as powerful a tool for RPM repositories as is `reprepro` for APT repositories. The administrator must create the whole directory structure himself and users are responsible for uploading packages to the correct places within the directory tree. `Createrepo` only creates the repository metadata files. It is not capable of signing the packages and moving them to the appropriate places. Below is an example of possible RPM repository structure. It can be customized to suit the actual needs:
```
/var/reps/rpm/repository_id:
-rw-r--r-- root root RPM-GPG-KEY-repository_id
drwxr-xr-x root root devel
drwxr-xr-x charon charon i386
drwxr-xr-x charon charon noarch
drwxr-xr-x charon charon SRPMS
drwxr-xr-x charon charon x86_64
drwxr-xr-x root root stable
drwxr-xr-x charon charon i386
drwxr-xr-x charon charon noarch
drwxr-xr-x charon charon SRPMS
drwxr-xr-x charon charon x86_64
```
After uploading packages to correct location, the user must invoke the `rpm-update` command via SSH which will resign all packages and regenerate the repository metadata.
**TAR repositories**
TAR package repositories are just a fancy name for a shared folder in which users may upload anything they need.
**Conclusions**
We developed a central project support system to remove some of the burden from our research teams. For our staff, we ensure the services of mature, feature rich and stable ticket tracking system Redmine. It allows read access to the Git repositories. Another features of our project support system are secure hosting of public and private Git repositories and hosting of APT, RPM and TAR package repositories for distribution of finished software packages. To prevent any duplicity, user access configuration is done by merging authorization data from Redmine and text files. Redmine accounts are mapped to local accounts and configurations for publishing services SSH, HTTP and FTP are automatically generated from that merged authorization data.
Currently our Redmine installation provides support for tens of projects, most of which also have Git repository created and assigned to them. There are a few projects without the Git repository and there are a few Git repositories without the Redmine support. When writing this report, there were only a handful of APT, RPM and TAR repositories active, but their numbers have been growing slowly. The whole system is used by some tens of users, a majority of our staff.
Acknowledgements
This work is supported by the research intent MSM6383917201 of the Ministry of Education, Youth and Sports of the Czech Republic.
References
[1]: SAML, http://saml.xml.org/
[8]: HTTP Authentication Plugin, https://github.com/AdamLantos/redmine_http_auth, Adam Lantos
[9]: mod_shib2, http://shibboleth.net/
[17]: createrepo, http://createrepo.baseurl.org/
[18]: OpenSSH, http://www.openssh.org/
|
{"Source-Url": "https://www.cesnet.cz/wp-content/uploads/2013/01/CESNET-Technical-Report-03.pdf", "len_cl100k_base": 4498, "olmocr-version": "0.1.48", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 28141, "total-output-tokens": 5355, "length": "2e12", "weborganizer": {"__label__adult": 0.00018775463104248047, "__label__art_design": 0.000301361083984375, "__label__crime_law": 0.00023114681243896484, "__label__education_jobs": 0.00127410888671875, "__label__entertainment": 4.667043685913086e-05, "__label__fashion_beauty": 8.594989776611328e-05, "__label__finance_business": 0.00045228004455566406, "__label__food_dining": 0.0001809597015380859, "__label__games": 0.00023031234741210935, "__label__hardware": 0.0007834434509277344, "__label__health": 0.00015878677368164062, "__label__history": 0.0001404285430908203, "__label__home_hobbies": 0.00010377168655395508, "__label__industrial": 0.0003056526184082031, "__label__literature": 0.00010907649993896484, "__label__politics": 0.00013709068298339844, "__label__religion": 0.0002067089080810547, "__label__science_tech": 0.0097503662109375, "__label__social_life": 0.00013554096221923828, "__label__software": 0.03009033203125, "__label__software_dev": 0.95458984375, "__label__sports_fitness": 0.0001067519187927246, "__label__transportation": 0.0002493858337402344, "__label__travel": 0.0001271963119506836}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 22292, 0.01793]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 22292, 0.33169]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 22292, 0.87305]], "google_gemma-3-12b-it_contains_pii": [[0, 837, false], [837, 2824, null], [2824, 5007, null], [5007, 6221, null], [6221, 7424, null], [7424, 10616, null], [10616, 11524, null], [11524, 12928, null], [12928, 15728, null], [15728, 18393, null], [18393, 20997, null], [20997, 22292, null]], "google_gemma-3-12b-it_is_public_document": [[0, 837, true], [837, 2824, null], [2824, 5007, null], [5007, 6221, null], [6221, 7424, null], [7424, 10616, null], [10616, 11524, null], [11524, 12928, null], [12928, 15728, null], [15728, 18393, null], [18393, 20997, null], [20997, 22292, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 22292, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 22292, null]], "pdf_page_numbers": [[0, 837, 1], [837, 2824, 2], [2824, 5007, 3], [5007, 6221, 4], [6221, 7424, 5], [7424, 10616, 6], [10616, 11524, 7], [11524, 12928, 8], [12928, 15728, 9], [15728, 18393, 10], [18393, 20997, 11], [20997, 22292, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 22292, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
b194736d9990cee02a289358e038053f26c3a55d
|
LiveCode 9.0.0-dp-4 Release Notes
Overview
Known issues
Platform support
- Windows
- Linux
- Mac
- iOS
- Android
- HTML5
Setup
- Installation
- Uninstallation
- Reporting installer issues
- Activating LiveCode Indy or Business edition
- Command-line installation
- Command-line uninstallation
- Command-line activation for LiveCode Indy or Business edition
Engine changes
- Maximum text length on iOS native input fields (9.0.0-dp-4)
- Throw error when changing behavior from behavior script (9.0.0-dp-4)
- Support for loading multi-module bytecode files (9.0.0-dp-4 - experimental)
- Ensure CMYK JPEGs display correctly on Mac (9.0.0-dp-4)
- Calling JavaScript from HTML5 (9.0.0-dp-2)
- Re-written LCB VM (9.0.0-dp-2)
- Undocumented multi-file libUrlMultipartFormAddPart removed (9.0.0-dp-2)
- libURLSetStatusCallback no longer requires a target object for the message (9.0.0-dp-1)
- Platform support end-of-life (9.0.0-dp-1)
- Field tab alignments in htmlText and styledText (9.0.0-dp-1)
- Specific engine bug fixes (9.0.0-dp-4)
- Specific engine bug fixes (9.0.0-dp-3)
- Specific engine bug fixes (9.0.0-dp-2)
- Specific engine bug fixes (9.0.0-dp-1)
IDE changes
- Show up to 10 nested behavior in the Project Browser (9.0.0-dp-4)
- Reinstate store tab of extension manager (9.0.0-dp-4)
- SVG icon support in the Extension Builder (9.0.0-dp-4)
- <Shift+Tab> reformats entire script (9.0.0-dp-4)
- Create script only stack behavior (9.0.0-dp-3)
- Allow substack to become a mainstack via property inspector (9.0.0-dp-2)
- Drag and drop stackfiles (9.0.0-dp-2)
- Specific IDE bug fixes (9.0.0-dp-4)
- Specific IDE bug fixes (9.0.0-dp-3)
Overview
LiveCode 9.0 enables access to libraries and platform APIs written in many other languages thanks to the community-funded 'Infinite LiveCode' project.
This includes a greatly improved LiveCode Builder virtual machine.
LiveCode 9.0 contains many additional improvements to support LiveCode app developers, including:
- A new "spinner" widget
- OAuth2 authentication library for use with web APIs (e.g. Facebook, Google and GitHub)
- A command argument parser library for building command-line standalones
- Updates and performance improvements for existing widgets
Known Issues
- The installer will currently fail if you run it from a network share on Windows. Please copy the installer to a local disk before launching on this platform.
- The browser widget does not work on 32-bit Linux.
64-bit standalones for Mac OS X do not have support for audio recording or the revVideoGrabber external.
**Platform support**
The engine supports a variety of operating systems and versions. This section describes the platforms that we ensure the engine runs on without issue (although in some cases with reduced functionality).
**Windows**
LiveCode supports the following versions of Windows:
- Windows 7 (both 32-bit and 64-bit)
- Windows Server 2008
- Windows 8.x (Desktop)
- Windows 10
**Note:** On 64-bit Windows installations, LiveCode runs as a 32-bit application through the WoW layer.
**Linux**
LiveCode supports the following Linux distributions, on 32-bit or 64-bit Intel/AMD or compatible processors:
- Ubuntu 14.04 and 16.04
- Fedora 23 & 24
- Debian 7 (Wheezy) and 8 (Jessie) [server]
- CentOS 7 [server]
LiveCode may also run on Linux installations which meet the following requirements:
- Required dependencies for core functionality:
- glibc 2.13 or later
- glib 2.0 or later
- Optional requirements for GUI functionality:
- GTK/GDK 2.24 or later
- Pango with Xft support
- esd (optional, needed for audio output)
- mplayer (optional, needed for media player functionality)
- icms (optional, required for color profile support in images)
- gksu (optional, required for privilege elevation support)
**Note:** If the optional requirements are not present then LiveCode will still run but the specified features will be disabled.
**Note:** The requirements for GUI functionality are also required by Firefox and Chrome, so if your
Linux distribution runs one of those, it will run LiveCode.
**Note:** It may be possible to compile and run LiveCode Community for Linux on other architectures but this is not officially supported.
### Mac
The Mac engine supports:
- 10.9.x (Mavericks) on Intel
- 10.10.x (Yosemite) on Intel
- 10.11.x (El Capitan) on Intel
- 10.12.x (Sierra) on Intel
### iOS
iOS deployment is possible when running LiveCode IDE on a Mac, and provided Xcode is installed and has been set in LiveCode Preferences (in the Mobile Support pane).
Currently, the supported versions of Xcode are:
- Xcode 6.2 on MacOS X 10.9
- Xcode 6.2 and 7.2 on Mac OS X 10.10
- Xcode 8.1 on MacOS X 10.11
- Xcode 8.1 on MacOS 10.12
It is also possible to set other versions of Xcode, to allow testing on a wider range of iOS simulators. For instance, on MacOS 10.12 (Sierra), you can add Xcode 6.2 in the Mobile Support preferences, to let you test your stack on the iOS Simulator 8.2.
We currently support deployment for the following versions of iOS:
- 8.2 [simulator]
- 9.2
- 10.1
### Android
LiveCode allows you to save your stack as an Android application, and also to deploy it on an Android device or simulator from the IDE.
Android deployment is possible from Windows, Linux and Mac OSX.
The Android engine supports devices using ARMv7 or ARMv8 processors. It will run on the following versions of Android:
- 4.1-4.3 (Jelly Bean)
- 4.4 (KitKat)
- 5.0-5.1 (Lollipop)
- 6.0 (Marshmallow)
To enable deployment to Android devices, you need to download the Android SDK, and then use
the 'Android SDK Manager' to install:
- the latest "Android SDK Tools"
- the latest "Android SDK Platform Tools"
You also need to install the Java Development Kit (JDK). On Linux, this usually packaged as "openjdk". LiveCode requires JDK version 1.6 or later.
Once you have set the path of your Android SDK in the "Mobile Support" section of the LiveCode IDE's preferences, you can deploy your stack to Android devices.
Some users have reported successful Android Watch deployment, but it is not officially supported.
**HTML5**
LiveCode applications can be deployed to run in a web browser, by running the LiveCode engine in JavaScript and using modern HTML5 JavaScript APIs.
HTML5 deployment does not require any additional development tools to be installed.
LiveCode HTML5 standalone applications are currently supported for running in recent versions of Mozilla Firefox, Google Chrome or Safari. For more information, please see the "HTML5 Deployment" guide in the LiveCode IDE.
**Setup**
**Installation**
Each version of LiveCode installs can be installed to its own, separate folder. This allow multiple versions of LiveCode to be installed side-by-side. On Windows (and Linux), each version of LiveCode has its own Start Menu (or application menu) entry. On Mac OS X, each version has its own app bundle.
On Mac OS X, install LiveCode by mounting the .dmg file and dragging the app bundle to the Applications folder (or any other suitable location).
For Windows and Linux, the default installation locations when installing for "All Users" are:
<table>
<thead>
<tr>
<th>Platform</th>
<th>Path</th>
</tr>
</thead>
<tbody>
<tr>
<td>Windows</td>
<td><x86 program files folder>/RunRev/LiveCode <version></td>
</tr>
<tr>
<td>Linux</td>
<td>/opt/livecode/livecode-<version></td>
</tr>
</tbody>
</table>
The installations when installing for "This User" are:
<table>
<thead>
<tr>
<th>Platform</th>
<th>Path</th>
</tr>
</thead>
<tbody>
<tr>
<td>Windows</td>
<td><user roaming app data folder>/RunRev/Components/LiveCode <version></td>
</tr>
<tr>
<td>Linux</td>
<td>~/.runrev/components/livecode-<version></td>
</tr>
</tbody>
</table>
**Note:** If installing for "All Users" on Linux, either the `gksu` tool must be available, or you must
manually run the LiveCode installer executable as root (e.g. using `sudo` or `su`).
Uninstallation
On Windows, the installer hooks into the standard Windows uninstall mechanism. This is accessible from the "Add or Remove Programs" applet in the Windows Control Panel.
On Mac OS X, drag the app bundle to the Trash.
On Linux, LiveCode can be removed using the `setup.x86` or `setup.x86_64` program located in LiveCode's installation directory.
Reporting installer issues
If you find that the installer fails to work for you then please report it using the LiveCode Quality Control Centre or by emailing support@livecode.com.
Please include the following information in your report:
- Your platform and operating system version
- The location of your home or user folder
- The type of user account you are using (guest, restricted, admin etc.)
- The installer log file.
The installer log file can be located as follows:
<table>
<thead>
<tr>
<th>Platform</th>
<th>Path</th>
</tr>
</thead>
<tbody>
<tr>
<td>Windows 2000/XP</td>
<td><code><documents and settings folder>/<user>/Local Settings/</code></td>
</tr>
<tr>
<td>Windows Vista/7</td>
<td><code><users folder>/<user>/AppData/Local/RunRev/Logs</code></td>
</tr>
<tr>
<td>Linux</td>
<td><code><home>/.runrev/logs</code></td>
</tr>
</tbody>
</table>
Activating LiveCode Indy or Business edition
The licensing system ties your product licenses to a customer account system, meaning that you no longer have to worry about finding a license key after installing a new copy of LiveCode. Instead, you simply have to enter your email address and password that has been registered with our customer account system and your license key will be retrieved automatically.
Alternatively it is possible to activate the product via the use of a specially encrypted license file. These will be available for download from the customer center after logging into your account. This method will allow the product to be installed on machines that do not have access to the internet.
Command-line installation
It is possible to invoke the installer from the command-line on Linux and Windows. When doing command-line installation, no GUI will be displayed. The installation process is controlled by arguments passed to the installer.
Run the installer using a command in the form:
```
<installer> install noui [OPTION ...]
```
where `<installer>` should be replaced with the path of the installer executable or app (inside the DMG) that has been downloaded. The result of the installation operation will be written to the console.
The installer understands any of the following `OPTION`s:
<table>
<thead>
<tr>
<th>Option</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>-allusers</td>
<td>Install the IDE for "All Users". If not specified, LiveCode will be installed for the current user only.</td>
</tr>
<tr>
<td>-desktopshortcut</td>
<td>Place a shortcut on the Desktop (Windows-only)</td>
</tr>
<tr>
<td>-startmenu</td>
<td>Place shortcuts in the Start Menu (Windows-only)</td>
</tr>
<tr>
<td>-location LOCATION</td>
<td>The folder to install into. If not specified, the LOCATION defaults to those described in the "Installation" section above.</td>
</tr>
<tr>
<td>-log LOGFILE</td>
<td>The file to which to log installation actions. If not specified, no log is generated.</td>
</tr>
</tbody>
</table>
**Note:** the command-line installer does not do any authentication. When installing for "All Users", you will need to run the installer command as an administrator.
As the installer is actually a GUI application, it needs to be run slightly differently from other command-line programs.
On Windows, the command is:
```
start /wait <installer> install noui [OPTION ...]
```
### Command-line uninstallation
It is possible to uninstall LiveCode from the command-line on Windows and Linux. When doing command-line uninstallation, no GUI will be displayed.
Run the uninstaller using a command of the form:
```
<uninstaller> uninstall noui
```
Where is `.setup.exe` on Windows, and `.setup.x86` on Linux. This executable, for both of the platforms, is located in the folder where LiveCode is installed.
The result of the uninstallation operation will be written to the console.
**Note:** the command-line uninstaller does not do any authentication. When removing a version of
LiveCode installed for "All Users", you will need to run the uninstaller command as an administrator.
**Command-line activation for LiveCode Indy or Business edition**
It is possible to activate an installation of LiveCode for all users by using the command-line. When performing command-line activation, no GUI is displayed. Activation is controlled by passing command-line arguments to LiveCode.
Activate LiveCode using a command of the form:
```
<livecode> activate -file LICENSEFILE -passphrase SECRET
```
where `<livecode>` should be replaced with the path to the LiveCode executable or app that has been previously installed.
This loads license information from the manual activation file `LICENSEFILE`, decrypts it using the given `SECRET` passphrase, and installs a license file for all users of the computer. Manual activation files can be downloaded from the [My Products] page in the LiveCode account management site.
It is also possible to deactivate LiveCode with:
```
<livecode> deactivate
```
Since LiveCode is actually a GUI application, it needs to be run slightly differently from other command-line programs.
On Windows, the command is:
```
start /wait <livecode> activate -file LICENSE -passphrase SECRET
start /wait <livecode> deactivate
```
On Mac OS X, you need to do:
```
<livecode>/Contents/MacOS/LiveCode activate -file LICENSE -passphrase SECRET
<livecode>/Contents/MacOS/LiveCode deactivate
```
**Engine changes**
**Maximum text length on iOS native input fields (9.0.0-dp-4)**
It is now possible to set/get the maximum number of characters that can be entered into an iOS native single-line field, using
Throw error when changing behavior from behavior script (9.0.0-dp-4)
Previously it was theoretically possible to change the behavior of an object from that object’s existing behavior script. This will now result in an execution error
```
parentScript: can't change parent while parent script is executing
```
This change was necessarily as the engine would occasionally crash when changing a behavior this way, and would be guaranteed to crash if stepping over the behavior script line that changes the behavior.
Support for loading multi-module bytecode files (9.0.0-dp-4 - experimental)
The `load extension` command is now able to load LiveCode Builder bytecode files (.lcm files) that contain multiple modules' bytecode.
The first module in each .lcm file is treated as the "main module" of the module (i.e. the library or widget), and other modules are treated as support modules.
Support modules only remain loaded if they are used by the main module, and support modules must be submodules of the main module. For example, if the main module is "com.livecode.newbutton", then all other modules in the bytecode file must have names like "com.livecode.newbutton.<something>".
**Important:** This feature is currently experimental. This means it may not be complete, or may fail in some circumstances that you would expect it to work. Please do not be afraid to try it out as we need feedback to develop it further.
Ensure CMYK JPEGs display correctly on Mac (9.0.0-dp-4)
This fixes the incorrect rendering of CMYK JPEGs containing an ICC profile on Mac.
Calling JavaScript from HTML5 (9.0.0-dp-2)
JavaScript has been added to the `alternateLanguages` on the HTML5 platform.
It is now possible to call JavaScript code from HTML5 standalones by using the `do <script> as <alternateLanguage>` form of the `do` command.
This allows HTML5 standalones to interact with the browser within which they are running. The
The value of the JavaScript expression will be placed in the `result` variable:
```plaintext
local tDocTitle
do "document.title" as "JavaScript"
put the result into tDocTitle
```
**Re-written LCB VM (9.0.0-dp-2)**
The "virtual machine" used to run LiveCode Builder code has been re-written from scratch. This new VM provides a framework enabling better extensibility, better error reporting and, in future, more comprehensive optimizations.
Most existing LCB code should run without any changes. There may be some code that worked on the previous VM but doesn't in the new VM due to more comprehensive run-time checking; this is usually fixable with only very minor changes to the source code.
**Undocumented multi-file libUrlMultipartFormAddPart removed (9.0.0-dp-2)**
Previously, the `libUrlMultipartFormAddPart` command had the undocumented capability to accept multiple file names separated by commas. The handler failed to work for files that had commas in the name, however. The undocumented behaviour has been removed. To add multiple files to a form, call `libURLMultipartFormAddPart` once for each file.
**libURLSetStatusCallback no longer requires a target object for the message (9.0.0-dp-1)**
Passing an object reference as a second parameter to `libURLSetStatusCallback` is no longer required. If no object is passed in then the message will be sent to `revLibURL` itself and you can handle the message anywhere in the message path.
**Platform support end-of-life (9.0.0-dp-1)**
As announced on the LiveCode blog, running LiveCode on the following platforms is no longer officially supported from LiveCode 9.0 onwards:
- Windows XP
- Windows Server 2003
- Windows Vista
- Android Gingerbread (2.3.3-2.3.7)
- Android Ice Cream Sandwich (4.0)
- OS X Snow Leopard (10.6)
- OS X Lion (10.7)
- OS X Mountain Lion (10.8)
- iOS Simulator 6.1
- iOS Simulator 7.1
Field tab alignments in htmlText and styledText (9.0.0-dp-1)
The styledText and htmlText of a field now include tab alignment information. The htmlText uses a new tabalign attribute with a list of alignments, e.g.
```html
<p tabalign='left,center,right'>left\09;middle\09;right\09;</p>
```
The styledText stores tab alignment in a "tabalign" key in each paragraph's "style" array, e.g.
```javascript
get tStyledText[1]["style"]["tabalign"]
```
Specific engine bug fixes (9.0.0-dp-4)
11039 Throw error when changing behavior from behavior script
13055 Improve formatting of try syntax description
13150 Ensure tabStops property docs describe relationship with indent properties
13151 Correct example of setting "listIndent" for whole field
13696 The "volumes" function is only supported on Mac & Windows
13880 Fix formatting in scrollbarWidth property documentation
14172 Clarify that the "combine" command works in lexicographic key order
14247 Clarify insertion point location when field is focused
14363 The "startup" message is sent to the first card of the initial stack
14473 Provide a complete example for revZipAddItemWithData
14801 Correct examples in "split" command documentation
14867 The tabStops property can't be set to a boolean
15117 The "lineOffset" function can search for multiline substrings
15470 Correct "tool" property docs to be clear that it is not a stack property
15604 Update dictionary links to PCRE pattern documentation
16511 Make examples of "borderPixel" use its main synonym
16658 Document the fact that "the environment" may be "server"
18264 Don't fail standalone build completely if unlicensed platforms are selected
18277 Calculate the height of the mac desktop space correctly
18652 Fix occasional crash when getting the clipboarddata["text"] on Windows.
18738 Fix data loss when cr inserted into a styledText run
18852 Fix exception thrown in IDE when saving standalone with more than one stack
18853 Support for loading multi-module bytecode files (experimental)
18923 Ensure CMYK JPEGs display correctly on Mac
18925 Prevent crashes on memory exhaustion
18948 Make 'obj of me' consistent across all control types
Specific engine bug fixes (9.0.0-dp-3)
13370 Correct terminology in "convert" command documentation
14080 Fix find command not finding in specified field when not on current card
18295 Fix empty specialFolderPath("resources") on Windows
18392 Ensure "ceiling" is listed as a reserved word
18686 Fix a crash related to the message watcher
18755 Fix loss of BMP as supported clipboard image format
18762 Fix a rare crash on saving after cloning a field
18873 Fix documentation of valid values for lineSize property
18890 Fix crash when playing non-imported audioclip
18893 Fix formatting in description of stack mode property
Specific engine bug fixes (9.0.0-dp-2)
12196 Correct documentation for "do" command
18147 The scriptExecutionErrors property not listed in dictionary
18231 Fixed documentation formatting issues for binaryEncode and binaryDecode
18350 Fix spurious type errors for repeat variables in LCB
18353 Remove duplicated urlResponse documentation
18495 Undocumented multi-file libUrlMultipartFormAddPart removed
18539 Don't change the defaultFolder on startup
18600 Fix crash when quitting from script editor
18632 Mark the copyResource function as deprecated
18651 Ensure "10 garbage" is never a number
18666 Fix crash when find command matches text in sharedText field on non-current card
18724 Fix incorrect cross-references in lockLocation dictionary entry
18743 Fix missing cross-references in "keys" dictionary entry
18774 Fix errors in "write to file" dictionary entry
18821 Report all LCB stack frames in LCS error info
Specific engine bug fixes (9.0.0-dp-1)
14645 Field tab alignments in htmlText and styledText
14651 There is no documentation entry for "currentcard"
15865 Fixed Dictionary description for "is not among"
16211 Fix compilation errors with MacOSX SDK 10.10 and higher
18111 Make PDF user guide typography match dictionary view
18125 Fix Dictionary example for is within
18254 Improve efficiency of equality operators on binary data
18297 Broken references in "filename of stack" dictionary entry
Show up to 10 nested behavior in the Project Browser (9.0.0-dp-4)
It is now possible to view up to 10 nested behaviors of an object in the PB. The behaviors are shown using oval graphics. Clicking on the graphic takes you to the script of the behavior. The tooltip of the graphic shows the long name of the behavior.
Reinstate store tab of extension manager (9.0.0-dp-4)
The store tab of the extension manager has been reinstated and the revBrowser implementation has been replaced with a browser widget.
SVG icon support in the Extension Builder (9.0.0-dp-4)
The 'Extension Builder' now displays LiveCode Builder extensions' SVG icons, if present. You can add an SVG icon to an LCB extension by setting its "svgicon" metadata to an SVG path that could be displayed by the 'SVG Icon' widget.
When an extension provides an SVG icon, packaging the extension no longer requires you to choose bitmap icon files.
<Shift+Tab> reformats entire script (9.0.0-dp-4)
Holding down the Shift key while pressing the Tab key will reformat the entire script in the Script Editor.
Create script only stack behavior (9.0.0-dp-3)
The menu for assigning a behavior to a control has two additional options:
- Create behavior from new script only stack
- Create behavior using control script and script only stack Either option will prompt you for a stack name and a location for the script only stack. The new stack will be saved, assigned as the behavior of the control, and then added to the stackfiles property of control's stack.
Allow substack to become a mainstack via property inspector (9.0.0-dp-2)
The property inspector Basic pane for substacks now has a button beneath the mainstack labelled "Make mainstack". Note that once pressed this button will disappear, as the stack will no longer a substack.
Drag and drop stackfiles (9.0.0-dp-2)
You can now drag and drop stack files onto the stackFiles field in the PI.
Specific IDE bug fixes (9.0.0-dp-4)
17889 Repaired confusing layout of fill gradient control in Property Inspector
18930 Reinstate store tab of extension manager
18932 SVG icon support in the Extension Builder
18937 <Shift+Tab> reformats entire script
18956 Make sure oauth2 library is loaded correctly
18966 Remove size limitation for creating graphics
18981 Added tooltip to iOS icon and splash screen selection
18987 Reinstate fixedLineHeight for tableField
Specific IDE bug fixes (9.0.0-dp-3)
15917 Font Size for Project Browser can now be set from LiveCode Preferences -> Project Browser
18037 Apply property defaults from metadata when testing widgets
18586 Make sure the Project Browser stack/card/group view can always expand
18897 "Show Sections" for Project Browser can now be set from LiveCode Preferences -> Project Browser
18920 Reinstate that a single char can be selected with the mouse in ScriptEditor
Specific IDE bug fixes (9.0.0-dp-2)
18302 Retain custom prop changes when clicking on tree view in editor
18393 [Project Browser] Change "Sort controls by number" to "Sort controls by layer" to avoid confusion + make sure they are sorted numerically
18491 Allow substack to become a mainstack via property inspector
18595 Clicking left of text now moves caret to the beginning of text
18631 Only use development team preferences when running from the repository
18637 Fix searching in "Stack File and its stack files" from the script editor
18644 Deactivate breakpoints correctly
18701 Prevent over-enthusiastic save prompts
18726 Make sure the Bug Report checkmark appears at the correct place in Standalone Settings
18804 Update locked inspectors when mainstack names change
18835 linkVisitedColor and linkHiliteColor can now be set from property inspector
Setting stackFiles in PI causes an error if you "cancel" the file dialog or select multiple files.
Drag and drop stackfiles
Specific IDE bug fixes (9.0.0-dp-1)
- Fix issue creating breakpoints via the new breakpoint dialog
- Improve user feedback for invalid breakpoint conditions
- Add warning about numerical names to user guide.
- Bring script editor and documentation stacks to front if the stack is already open when navigating to content
- textFont of control does not get set when tabbing out of textFont comboBox in P.I.
LiveCode Builder changes
LiveCode Builder Standard Library
Assertions
- Checks for handler preconditions can now be included using the new `expect` statement. For example, the following statement will throw an error if the value `pProbability` is out of the valid range for probabilities:
```plaintext
expect that (pProbability >= 0 and pProbability <= 1) \
because "Probabilities must be in the range [0,1]"
```
The `that` keyword and the `because <Reason>` clauses are optional.
LiveCode Builder Language
Identifiers
- Unqualified identifiers are now expected to match `[A-Z0-9_]`. The `_` symbol is interpreted as a namespace operator.
Namespaces
- Identifiers in LCB can now be qualified, to disambiguate between symbols defined in different namespaces.
Literals
- Base 2 (binary) integer literals can now be specified by using a "0b" prefix, e.g.
- Base 16 (hexadecimal) integer literals can now be specified by using a "0x" prefix. e.g.
```
0xdeadbeef
0x0123fedc
```
### LiveCode Builder Tools
**lc-compile**
**Errors**
- A new error has been added for identifiers in declaration context that contain `-` - identifiers should always be declared without qualification.
- Parsing of numeric literals, in general, has been tightened up. In particular, the compiler will detect invalid suffixes on numeric literals meaning you cannot accidentally elide a number with an identifier.
```
1.344foo -- ERROR
0xabcdefgh -- ERROR
0b010432 -- ERROR
```
**Messages**
- Errors, warnings and informational messages now display the affected line of code and visually indicate the position where the problem was found. For example, the output might look like:
```
foo.lcb:2:26: error: Identifier 'InvalidExpression' not declared
constant kBadConstant is InvalidExpression
```
**lc-run**
- **lc-run** now has the *experimental* ability to load and run bytecode assemblies containing multiple LCB modules. To construct a multi-module bytecode assembly, simply concatenate several `.lcm` module files together. The first module found in a bytecode assembly is treated as its main module.
LiveCode Builder Documentation
Style guide
- Updated naming guide for handlers and types
- Added indentation and wrapping guidelines
- New section with widget-specific recommendations
Specific LCB bug fixes (9.0.0-dp-4)
18107 Do not permit namespace operator in unqualified identifiers.
18929 Update LiveCode Builder ABI version for LiveCode 9
Specific LCB bug fixes (9.0.0-dp-2)
18856 Use cached numeric values when converting numbers to/from strings
Specific LCB bug fixes (9.0.0-dp-1)
18086 Improve and expand LCB style guide
18385 lc-run: Load multi-module bytecode assemblies.
18463 Show correct error position when source line includes tabs
LiveCode extension changes
Spinner widget
Spinner widget
A new spinner or activity indicator widget has been implemented. Spinners provide visual feedback to users use when performing an activity for an unknown duration such as processing a large amount of data or presenting a complex user interface.
Line Graph widget
Markers
- The new markerScale property controls the size of graph point markers.
- Any named icon from the SVG icon library can now be used as a graph point marker.
Tree View widget
Performance
- Previously when an array was expanded in the Tree View widget, all of the display calculations for were done before the next redraw. Now the keys are sorted (as before) but the display calculations are made for a maximum of 1000 rows. When more rows are needed due to scrolling, another 1000 are calculated at that point.
This provides a near-continuous scrolling experience for arrays with large numbers of keys, and ties the expense of expanding an array to that of sorting its keys.
Segmented Control widget
Appearance and theming
- Dividers between segments are no longer drawn when the `showBorder` property is `false`.
Properties
- Setting the `itemCount` now updates all other properties immediately, rather than at the next redraw.
- All list-like properties now contain exactly `itemCount` items at all times.
- The `itemNames` property may now include duplicated and/or empty segment names.
JSON Library
JSON parser improvements
- `JsonImport()` no longer incorrectly accepts garbage at the end of a JSON file.
- `JsonImport()` no longer incorrectly accepts unescaped control characters in strings.
- "null" is a valid JSON file, and `JsonImport("null")` no longer throws an error. It returns `nothing` in LCB and the empty string in LiveCode Script.
- A number by itself is a valid JSON file, and `JSONImport("25")` now returns 25, rather than throwing a syntax error.
JSON parser security fixes
- Some crafted JSON files could cause `JsonImport` to use excessive amounts of CPU time. The `JsonImport` function will now reject inputs with more than 500 levels of structure nesting.
oauth2 script library
OAuth2 dialog library
A new library has been implemented for presenting an OAuth2 authorization dialog for any web
service that supports OAuth2 Authorization Code Flow
getopt script library
Command-line option parsing support
The new **getopt** library provides support for parsing Linux-style command-line options.
Specific extension bug fixes (9.0.0-dp-3)
18908 Fix parsing of JSON files containing only a single-digit integer
Specific extension bug fixes (9.0.0-dp-2)
18500 Ensure color properties are documented correctly
18693 Prevent long delays when expanding arrays with many keys
18697 Fix parsing of "lonely number" JSON files
18707 Fix possible denial of service via crafted JSON inputs
18714 Ensure all itemNames, itemLabels etc. can be set to empty
18779 Do not draw borders when showBorder is disabled
Previous release notes
- LiveCode 8.1.1 Release Notes
- LiveCode 8.1.0 Release Notes
- LiveCode 8.0.2 Release Notes
- LiveCode 8.0.1 Release Notes
- LiveCode 8.0.0 Release Notes
- LiveCode 7.1.4 Release Notes
- LiveCode 7.1.3 Release Notes
- LiveCode 7.1.2 Release Notes
- LiveCode 7.1.1 Release Notes
- LiveCode 7.1.0 Release Notes
- LiveCode 7.0.6 Release Notes
- LiveCode 7.0.4 Release Notes
- LiveCode 7.0.3 Release Notes
- LiveCode 7.0.1 Release Notes
- LiveCode 7.0.0 Release Notes
- LiveCode 6.7.9 Release Notes
- LiveCode 6.7.8 Release Notes
- LiveCode 6.7.7 Release Notes
- LiveCode 6.7.6 Release Notes
- LiveCode 6.7.4 Release Notes
- LiveCode 6.7.2 Release Notes
- LiveCode 6.7.11 Release Notes
LiveCode 6.7.10 Release Notes
LiveCode 6.7.1 Release Notes
LiveCode 6.7.0 Release Notes
LiveCode 6.6.2 Release Notes
LiveCode 6.6.1 Release Notes
LiveCode 6.6.0 Release Notes
LiveCode 6.5.2 Release Notes
LiveCode 6.5.1 Release Notes
LiveCode 6.5.0 Release Notes
LiveCode 6.1.3 Release Notes
LiveCode 6.1.2 Release Notes
LiveCode 6.1.1 Release Notes
LiveCode 6.1.0 Release Notes
LiveCode 6.0.2 Release Notes
LiveCode 6.0.1 Release Notes
LiveCode 6.0.0 Release Notes
|
{"Source-Url": "https://downloads.livecode.com/livecode/9_0_0/LiveCodeNotes-9_0_0_dp_4.pdf", "len_cl100k_base": 8092, "olmocr-version": "0.1.50", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 40217, "total-output-tokens": 9234, "length": "2e12", "weborganizer": {"__label__adult": 0.0002751350402832031, "__label__art_design": 0.00023448467254638672, "__label__crime_law": 0.00014221668243408203, "__label__education_jobs": 0.00021076202392578125, "__label__entertainment": 5.3882598876953125e-05, "__label__fashion_beauty": 9.41753387451172e-05, "__label__finance_business": 0.00010955333709716796, "__label__food_dining": 0.00017642974853515625, "__label__games": 0.0005984306335449219, "__label__hardware": 0.00055694580078125, "__label__health": 0.00010591745376586914, "__label__history": 8.624792098999023e-05, "__label__home_hobbies": 4.3332576751708984e-05, "__label__industrial": 0.00015401840209960938, "__label__literature": 0.0001112818717956543, "__label__politics": 8.040666580200195e-05, "__label__religion": 0.00023746490478515625, "__label__science_tech": 0.0007948875427246094, "__label__social_life": 4.8220157623291016e-05, "__label__software": 0.0194854736328125, "__label__software_dev": 0.97607421875, "__label__sports_fitness": 0.00013780593872070312, "__label__transportation": 0.00013315677642822266, "__label__travel": 0.0001112818717956543}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 32902, 0.04871]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 32902, 0.18141]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 32902, 0.77718]], "google_gemma-3-12b-it_contains_pii": [[0, 1648, false], [1648, 2452, null], [2452, 4027, null], [4027, 5593, null], [5593, 7653, null], [7653, 9979, null], [9979, 12063, null], [12063, 13712, null], [13712, 15640, null], [15640, 17519, null], [17519, 19706, null], [19706, 21746, null], [21746, 23345, null], [23345, 25448, null], [25448, 26848, null], [26848, 28083, null], [28083, 29254, null], [29254, 31015, null], [31015, 32438, null], [32438, 32902, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1648, true], [1648, 2452, null], [2452, 4027, null], [4027, 5593, null], [5593, 7653, null], [7653, 9979, null], [9979, 12063, null], [12063, 13712, null], [13712, 15640, null], [15640, 17519, null], [17519, 19706, null], [19706, 21746, null], [21746, 23345, null], [23345, 25448, null], [25448, 26848, null], [26848, 28083, null], [28083, 29254, null], [29254, 31015, null], [31015, 32438, null], [32438, 32902, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 32902, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 32902, null]], "pdf_page_numbers": [[0, 1648, 1], [1648, 2452, 2], [2452, 4027, 3], [4027, 5593, 4], [5593, 7653, 5], [7653, 9979, 6], [9979, 12063, 7], [12063, 13712, 8], [13712, 15640, 9], [15640, 17519, 10], [17519, 19706, 11], [19706, 21746, 12], [21746, 23345, 13], [23345, 25448, 14], [25448, 26848, 15], [26848, 28083, 16], [28083, 29254, 17], [29254, 31015, 18], [31015, 32438, 19], [32438, 32902, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 32902, 0.03876]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
3c06af6ffcedaf723b79237c6ca4ab7df7121eca
|
Computing Requirements: Cognitive Approaches to Distributed
Requirements Engineering
Sean W. Hansen
Rochester Institute of Technology
shansen@saunders.rit.edu
William N. Robinson
Georgia State University
wrobinson@cis.gsu.edu
Kalle J. Lyytinen
Case Western Reserve University
kalle@case.edu
Abstract
We present a study of on the goal-oriented modeling of RE processes executed by a practicing systems development team. The research combines an empirical case study of RE practices with the evaluation and simulation capability of i* modeling. Our analysis focuses on a system implementation project at a mid-size U.S. university and applies the theory of distributed cognition to generate a range of design insights for goal identification and process enhancement.
1. Introduction
Since its inception in the 1970s, the bulk of requirements engineering (RE) research has focused on mechanisms for requirements specification – documenting and formally encoding software requirements. This orientation emphasizes the generation of models that are complete, consistent, and which support downstream design tasks. Despite this persistent focus, RE has remained challenged facet of software development, engendering significant impediments to project success and failing to live up to its original expectations [3, 20, 53].
Importantly, the strong orientation towards specification correctness and completeness is closely tied to traditional sequential development approaches. The recent success of less conventional methodologies, most notably in open source software development [OSSD; 48] and agile development [21, 39] highlight the potential of approaches that rely on natural language, lightweight documentation, constant intertwining of requirements and design, and ongoing discussions of requirements for bringing about better outcomes.
In this article, we suggest that one of the pitfalls of the traditional approaches to RE is that scholars have rarely analyzed how requirements actually get computed as an ongoing cognitive activity within a complex and distributed socio-technical system.
Furthermore, researchers have given meager attention to how properties of such a cognitive system either impede or enable ‘effective’ computation of requirements for the artifact being designed. In this paper we seek to address this oversight by proposing a socio-technical model to analyze and account for RE as a distributed cognitive process. We base our treatment primarily on Hutchin’s [24] theory of distributed cognition. We contend that in any RE undertaking a socio-technical system consisting of people and artifacts engages in an ongoing computation of a varying requirement set through tasks of requirements identification, specification, negotiation, and validation.
In developing a distributed process model we outline how the set of requirements is determined using a set of requirements-oriented meta-goals (i.e., organizational goals, general RE goals, and the goals of effective distributed cognitive work environments), the system properties that are represented in a requirement sets, and the goals for a system design process. We outline how the satisfaction of goals at different levels can be analyzed using goal-oriented i* models [60, 61]. These models enable us to determine how specific meta-requirements for RE systems can be derived for an effective requirements computation process based on the theory of distributed cognition. We use scenarios from a relatively complex RE case study involving the adoption, modification, and implementation of an ERP system within a university context. Our analysis demonstrates how the use of socio-technical computational model can help identify pitfalls in the ongoing process and reveal reasons for observed positive and less positive outcomes. Based on this analysis we discuss reasons why open source or agile development processes may experience higher success rates in light of the developed model of requirements computation. Several recommendations for future research on requirements engineering are outlined.
2. Requirements Engineering
For the present study, two facets of RE research warrant a brief discussion: 1) a general overview of approaches to RE, and 2) goal-oriented RE. We offer therefore background of RE research for engaging in the present discussion on RE tasks and goal oriented modeling. We introduce next theoretical foundations related to distributed cognition used in this study.
2.1. Traditional Approaches to RE
A wide array of textbooks and reviews are available, advising practitioners on the effective approaches to RE [e.g., 11, 23, 29, 50]. By comparison, a relatively small percentage of the literature has focused on advancing a theoretical or empirical understanding of how requirements are discovered, defined, negotiated, and managed, and why these processes are so difficult [30]. Moreover, the prescriptive modeling and process methodologies have seldom been subjected to empirical scrutiny [55].
As Hansen et al. [19] note, most research approaches to RE betray a number of assumptions about requirements and the role stakeholders. These assumptions include beliefs that 1) RE facets (e.g., elicitation, specification) can be distinguished in an unproblematic manner, 2) distinct information system components and functionality can be readily delineated, 3) ISD projects are time-bound efforts for the creation of an artifact that is “complete” at some point in time, and 4) designers can be regarded as an outside party in the application domain [19].
To illustrate the tenacity of these assumptions underlying established approaches to RE, we next briefly review the ways in which the field has been segmented. Just as RE represents one facet of ISD, so too have RE processes been divided into a number of distinct discourses. While researchers have posited anywhere from two to seven primary requirements tasks [12], a widely-employed categorization of the RE process suggests three core facets: 1) elicitation, 2) specification, and 3) validation & verification [38].
Requirements elicitation is generally framed as the first component of a design effort – the process by which a designer determines what organizational or customer needs must be addressed by the proposed artifact [16, 33, 38]. While a variety of terms (e.g., discovery, determination, identification) has been used to indicate this facet of the design, the label of elicitation is most commonly employed, reflecting the assumption that knowledge about requirements fundamentally rests with users and must be teased out by the designer.
Requirements specification is the process by which the design team acquires, abstracts, and represents the requirements for a design effort [38, 55]. In this context, modeling refers to the creation of abstracted representations of the real world through the use of established and formalized symbol systems [41]. The resulting specification represents a transition point where the needs of stakeholders are extended with functional and technical implications that flow from them. A specification must support ease of interpretation and understanding by all stakeholders, while presenting a sufficient foundation for the subsequent technical development. No subject has received more attention within the RE literature than requirements modeling and formulation of formal notation techniques for the specification [53]. In fact, it has been argued that modeling lies at the heart of the RE undertaking [5].
Requirements validation and verification ensure that the requirements are 1) of high quality, 2) address the users’ needs, 3) are appropriate for the design effort, and 4) have no inconsistencies or errors [4]. Validation and verification address questions of whether or not the designers have conducted the RE processes effectively and the degree to which the specifications will support a productive design effort.
Through the strict segmentation of requirements processes and a focus on related solutions RE researchers have tended to promote a techno-centric approach to RE tasks [37, 49]. For example, during specification the primacy of the designer’s perspective on the development process has been reinforced [19]. This has led to a heavy emphasis on formal notation systems and modeling approaches within the RE research [53].
2.2. Goal-oriented RE
A recent extension to the development of modeling frameworks has been to relate approaches or to formulate modes that describe, guide, or organize RE tasks using system ontologies, notations, processes, or goals [31]. An important element in this trend is the emergence of goal-oriented requirements engineering (GORE) [46, 43, 62, 10, 52]. The GORE research focuses on modeling the objectives, or goals under which a system development effort is undertaken [52]. Consequently, several GORE languages have been developed such as i* [59, 61], GRL [60, 1], KAOS [54, 10], and Tropos [7, 42]. A GORE approach allows also for the identification of distinct types of system goals, such as the distinction between functional (i.e., relating to services that system will provide) and non-functional (i.e., relating to quality characteristics or constraints to which the system must conform) goals [52].
3. Theory of Distributed Cognition
Distributed cognition (DCog) is a branch of cognitive science pioneered by Edwin Hutchins and his colleagues in the 1990s [22, 24, 26, 25, 27, 28]. The central tenet of theory asserts that cognitive processes, such as memory, decision making, and reasoning, are not limited to the mental states of an individual. The development of the theory was motivated by research on teams engaged in complex tasks. In these settings information processing activities are not localized to individuals, but are distributed across members. Furthermore, a significant portion of the cognitive workload is “shouldered” by the technical artifacts employed by group members.
By conceptualizing cognition as “the propagation of representational state across representational media” [24: p. 118], distributed cognition expands the unit of cognitive analysis from that of the individual to that of the entire team attending to a specific task. With this fundamental shift in perspective on cognitive activity, the theory lends itself to at least three significant assertions [25]: 1) the thought process is distributed among members of social groups, 2) cognition employs both internal and external structures, and 3) cognitive processes are distributed over time.
The DCog theory contends that cognitive processes are distributed across members of a group. Each member may play a specific role with respect to the processing of information and the initiation of cognitive action. This idea of social distribution has obvious ramifications for the study of distributed RE processes. Nearly all software design efforts are executed through a team structure [18]. Moreover, one essential characteristic of development teams is the diversity of knowledge [8, 36, 57]. While addressing complex design challenges teams must bring together individuals from a wide variety of technical and functional domains. The cognitive task of arriving at stable requirements set, which we referred above as the computation of requirements, cannot be localized to any one of these participants, such as a designer (as is often assumed). Rather, it resides in the holistic process of cognitive computation that enables requirements to emerge as a quality of the social system.
The second implication of distributed cognition is that cognitive processes intertwine internal and external structure. While traditional cognitive perspectives focus on the internal states of mind, a DCog approach highlights the ways in which individuals and groups integrate external material elements of the environment as part of their thought processes. The distribution of cognitive activity through the use of external structure is readily apparent in prevailing RE practice. Indeed, the development of formal models in RE can be seen as creating external structures that support subsequent cognitive processes necessary to design. Some of these representations can be materialized in CASE tools that support and integrate cognitive processes embedded in requirements capture and software design [34, 56]. Consequently, existing artifacts serve as a significant external source of computing design requirements – setting the initial conditions which both enable and constrain design [2, 63, 19].
Finally, DCog theory contends that cognitive processes may be distributed not only in social and spatial terms, but also with respect to time- i.e. cognition is path dependent. Earlier actions influence the cognitive processes enacted later. Temporal distribution of cognition is present in any context where heuristics have been formulated for generating appropriate cognitive activity. Design efforts draw heavily upon requirements and artifacts inherited from earlier projects. For example, formal information architectures (e.g., enterprise and product architectures) often act as a mechanism to ensure consistency across multiple designs [19]. An extensive literature on requirements reuse suggests multiple approaches to distribute requirements computation over time [9, 35, 44]. In addition, some researchers have investigated temporal distribution of requirements while emphasizing iteration and evolution [e.g., 2, 14, 32].
The three facets of distributed cognition (i.e., social distribution, the use of external structure, and temporal extension) have often been highlighted in isolation. Naturally, they are closely linked in practice: the distribution of cognition over time implies the use of both social transmission (e.g., project team interaction) and material artifacts (i.e., legacy systems, enterprise architecture) to support memory. Similarly, socially-distributed cognitive processes are likely to employ both internal and external structures during individually-intensive cognitive tasks. In all we posit that theory of DCog offers a fruitful lens for assessing the ways in which requirements computation is distributed across individuals, organizations, and artifacts in today’s design environments.
4. Research Approach
The present study leverages two complementary research approaches. The initial phase of research was centered on a case study of a complex systems development project. The case analysis provides us the grounding for the identification of development goals that inform a distributed cognitive perspective. In the second phase of the research, we build upon the case to
develop a simulation of goal satisfaction using a GORE/i* modeling tool.
4.1. Case Study Design
As noted DCog processes form an emerging phenomenon that is not subject to straightforward manipulation. Accordingly, a case study approach is warranted to provide an occasion for rich exploration of the practical activities of designers and other stakeholders during RE [13, 58]. Indeed, for this very reason, case studies have been a favored approach for empirical work in RE research [17, 45]. Indeed, several scholars have employed the case studies in their attempts to generate rich theory-yielding insights about RE processes and outcomes [64].
Therefore, we conducted an exploratory case study of RE-related cognition within a development team focusing on the modification and implementation of a large enterprise resource planning (ERP) system at a mid-size university in the Midwestern U.S. The case inquiry was conducted in accordance with prevailing case study field procedures, including the development of a case study protocol prior to data collection, triangulation using multiple sources of evidence, and the maintenance of a chain of evidence [58]. The data collection included interviews, direct observation of project interactions, and documentary review (e.g., specification documents, customization requests, business process models, design mock-ups). Interview transcripts and observational field notes were coded using Atlas.ti. The coding centered on a thematic analysis of the data [6] and was conducted conforming to principles of grounded theory [15, 51]. This included constant comparison and open, axial, and selective coding. Our approach differs from some interpretations of grounded theory in that the final analysis was informed by constructs from RE research, such as goal differentiation, and the DCog theory.
4.2. Case Summary: University SIS Project
In 2006, a mid-sized Midwestern U.S. university initiated the acquisition, customization, and implementation of the PeopleSoft Student Information System (SIS) ERP. The SIS Project was intended to integrate all student information and student-facing administrative functions across the university’s nine distinct schools. Key functions supported by the envisioned platform included admissions, financial aid, course selection and enrollment, grading, degree tracking, and transcript management. The initial roll-out of the system was completed in fall 2008, with additional functionality rolled out over the course of the subsequent academic year. The installation of the SIS platform was considered a successful effort, including the management of platform requirements.
The organization is a mid-size private university. The university serves nearly 10,000 students (4,200 undergraduate, 2,200 graduate, and 3,500 professional students) across seven distinct schools. Traditionally, each school managed its own student records, with some aggregation of basic student information in the university’s legacy student information system. Different administrative functions were managed using a collection of distinct software applications. The SIS Project was undertaken in an effort to integrate various student-related data sources and functions across the entire university.
The SIS was the third phase of a broader ERP installation program. The university had selected Oracle’s PeopleSoft platform as the ERP package. In 2005 and 2006, the university had rolled out two installations of the platform, covering the Financial and Human Capital Management components. The SIS was the final major installation necessary for the achievement of a comprehensive enterprise-level information system serving the university.
4.3. Simulation
In the second phase of the research, we built upon the case analysis findings to analyze the interplay of various goal types and the RE-oriented activities of the SIS project team. Specifically, we created goal-oriented models of the SIS development process using an Eclipse-based i* star modeling tool, jUCMNav [40, 47]. These models incorporated the requirements processes identified in the case analysis as well as the goal taxonomy developed from the initial analysis. The jUCMNav tool was used to create a model of mutual dependencies between requirements tasks and identified goals. The models were then used to conduct a series of simulations to assess the impact of variable execution of requirements tasks on goals at varying levels.
4.4. Goal Model Evaluation
Simulating the evaluation of goal models involves four steps. The first step is to specify goal models from common perspectives [1]. We did so for general project goals, SIS-specific project goal, requirements engineering goals, and distributed cognition goals. A portion of the distributed cognition goals model is shown in Figure 1. (With regard to goal modeling, you may ignore the tasks that are associated with the goals at the bottom of the figures). Each model includes goals and relationships as identified in the literature.
Although incomplete, the models represent the most relevant goals for the problem.
Each oval represents a softgoal, while the links represent contributions that are positive (+) or negative (-) to the satisfaction of the goal at the arrowhead. Goal satisfaction is calculated by propagating values through the goal graph according to the specified node satisfaction value and the weighted contributions [1].
The models include quantitative numbers that indicate the contributions goals have on each other. We chose to model subgoals as totaling 100 percent contribution to allow us to analyze the relative influence of goals and their realized tasks. It implies a complete decomposition. However, our model is incomplete. When a new contributing goal is added, then the contributions are modified to reestablish their aggregation to 100 percent.
The project specific (SIS) model provides a place to add goals that are not common to most projects. Thus, the modeling is a combination of:
1. Creating a project specific (SIS) model for project specific goals.
2. Linking the project specific goals to the existing (pre-defined) goal models
3. Adding project specific tasks and linking them to new project goal model and the existing (pre-defined) goal models
The second step is to specify goal models for the problem. We did so for the SIS project. The model consists of a goal model and a task model. The third step is to specify scenarios for the problem. The variable tasks that we considered are based on observations of the SIS case. These tasks are associated with the goal models. The final step is to evaluate the impact that each scenario has on the goal models. We take this up in the following section on Findings.
5. Findings
Given the two-phase structure of the research effort, we report the findings for each phase separately. As noted above, the design and execution of the simulation phase of the research built upon the findings from the case analysis phase.
5.1. SIS Project RE Activities
The University SIS project supported a number of findings regarding both the nature of the RE tasks pursued and the goals implicit in the processes. The SIS project reflected both higher-level design processes focused on the discovery, specification, and validation of project requirements and lower-level tasks variably employed within the broader RE-oriented processes. At the higher-level, the project employed a four-stage process for progressive elaboration of user requirements. Importantly, our findings revealed that the processes were not executed in a universal manner – i.e., some RE-oriented activities were omitted or bypassed at various times. This variable execution of tasks is relevant for our later simulation of RE outcomes.
Interactive design and prototyping. The initial effort at requirements discovery in the SIS project was called the Interactive Design and Prototyping (IDP) process. The IDP process sought to inform key stakeholders about the functionality of PeopleSoft and to elicit statements of need for customization or modification. Thus, IDP was at its core a gap analysis. The IDP process consisted of JAD-style focus group discussions scheduled with each of the over 100 functional offices on campus. The IDP sessions included the project leadership, functional area leads, and technical experts, and focused on the input of office personnel regarding the appropriateness of the PeopleSoft system for their business functions. The result of each session was the articulation of desired modifications.
Interactive engagement with users. While not formally labeled by the project team, the second core RE task focused on iterative discussion between project functional leads/consultants and user representatives for distinct business units or schools. We have labeled this process Iterative Engagement with Users (IEU). The IEU discussions centered on review of the document developed as part of the IDP process and discussion of specific functional modifications desired by the users. As an outcome of the IEU process, the functional leads/consultants developed a Preliminary Specification Document and submitted it for review and validation by the users. As the name implies, the IEU process was repeated until users felt that their desired modifications were appropriately captured.
Structured walkthrough. Consensus around specifications and change requests on the part of the project team members was achieved through the third RE task, Structured Walkthrough. The walkthroughs were attended by the leadership of the project team, including the Project Director; Functional, Technical, and Project Management Leads; the consulting Project Manager and lead functional and technical consultants; and training team representatives. No users, functional SMEs, or technical experts were in attendance. During the walkthroughs, a specification developer would guide the participants through a detailed discussion of a requested change. Questions were raised and debated by the entire project team. The walkthroughs generally resulted in one of three outcomes: 1) the specification...
was accepted and the Technical Lead took responsibility for scheduling modifications, 2) the discussion raised sufficient problems with the current status of the specification so that a decision was made to revise the specification, or 3) the specification was tabled for later discussion.
**Design review.** The final core RE task employed on the SIS project was the Design Review. In Design Review, a technical developer or consultant met with user representatives to review a proposed resolution. Generally, this task centered on review of solution prototype that the developer had created based on the specification accepted by the project team leadership. If users are satisfied with resolution, then developers would proceed to final implementation of the modification. Conversely, if users desired additional changes to the proposed resolution, then the developer pursued additional prototyping of the solution until satisfaction was achieved. Importantly, of tall the RE tasks outlined, Design Review was the most variable, with the option of prototyping and review left largely to the discretion of individual developers.
**Ancillary RE Tasks.** In addition to the four high-level RE activities, the SIS project entailed several detail-level RE tasks that were again variably executed over the course of requirements determination. Observed lower-level tasks included the following:
- **Business Process Modeling:** The development of business process models for distinct schools or individual business units. When executed, the business process modeling was generally associated with the IDP process, and intended to support an understanding of a business unit’s current state.
- **Scenario Development:** The generation of multiple scenarios for design modifications. This task was most commonly observed in the structured walkthrough process, and provided a mechanism for the design team to explore users’ stated requirements at a deeper level.
- **Mock-ups:** The creation of mock-ups or “throw-away” prototypes to illustrate modification options. This rapid prototyping was generally employed as a requirements validation technique and most frequently associated with the Design Review process.
As noted above, all of these RE activities (both higher-level formal processes and detailed tasks) were variably executed on the SIS project. We did not observe any “hard” rules for when a given activities would or would not be executed; rather, the execution of RE tasks appeared to largely reflect individual preferences or design expertise.
### 5.2. A Goal Taxonomy for the SIS Project
In addition to illustrating the different types of RE tasks executed, the analysis of the SIS case revealed the distinct categories of goals that were relevant to the design effort. Specifically, we identified four distinct categories of goals within the SIS project:
- **Common project goals:** This class of goals represented project objectives that are relevant for almost all IT implementation projects. These goals largely relate to the project management triple constraint of time, cost, and quality/functionality.
- **Idiosyncratic project goals:** In this class of goals, we identified objectives that appear to be specific to the SIS project or projects of a similar focus.
- **RE goals:** These are goals associated with commonly-held measures of requirements quality.
- **DCog Goals:** Perhaps most criticality for the present analysis, we identified a number of goals that are implied by the application of DCog theory. These are characteristics of a cognitive system that will support system effectiveness and robustness, ensuring that the socio-technical system (i.e., people and supporting artifacts) can react to changing conditions and reconfigure its computational structure when necessary (e.g., if a given individual or artifact is removed).
A summary of the resulting goal taxonomy is provided in Table 1.
#### Table 1. Summary of SIS Goal Taxonomy
<table>
<thead>
<tr>
<th>Goals</th>
<th>Descriptions</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Common Project Goals</strong></td>
<td></td>
</tr>
<tr>
<td>System adoption</td>
<td>Ensuring that users accept and use the functionality provided in the system</td>
</tr>
<tr>
<td>Minimize duration</td>
<td>Seeking adherence to project timelines and positive schedule variance</td>
</tr>
<tr>
<td>Maximize implemented functionality</td>
<td>Implemented as much system functionality as possible within time and budgetary constraints</td>
</tr>
<tr>
<td>Minimize project costs</td>
<td>Managing the project budget to ensure cost effective implementation</td>
</tr>
<tr>
<td>Accuracy of status reporting</td>
<td>Keeping executive management informed about the status of the project</td>
</tr>
<tr>
<td>Supporting collaboration</td>
<td>Ensuring effective collaborative work among project team members</td>
</tr>
<tr>
<td><strong>Idiosyncratic Project Goals</strong></td>
<td></td>
</tr>
<tr>
<td>Minimize platform modifications</td>
<td>Keeping modifications of the platform to the minimum required for desired functional support</td>
</tr>
<tr>
<td>Training effectiveness</td>
<td>Ensuring that users were adequately trained for system use</td>
</tr>
<tr>
<td>Ensure integration</td>
<td>Achieving data integration between the vendor platform and legacy systems</td>
</tr>
</tbody>
</table>
Limiting business process changes to those that were absolutely necessary for effective system use.
**RE Goals**
| Completeness | Ensuring that all substantial requirements are identified and addressed in the design |
| Consistency | Ensuring that requirements did not conflict with one another |
| Adequacy | Ensuring that requirements will meet the information needs of stakeholders |
| Clarity | Avoiding ambiguous requirements (i.e., competing interpretations) |
| Correctness | Ensuring that stated requirements actually reflect the intent of users |
| Traceability | Ensuring that requirements can be traced to both relevant business objectives and designed features |
**DCog Goals**
| Maintaining common knowledge | Creating common understanding of system requirements and business process; knowledge redundancy |
| Clarity of processes | Ensuring that project team members know the processes for requirements identification and incorporation |
| Transparency of action | Enabling team members to “see” what others members of the system are doing |
| Common language | Reinforcing shared mechanisms for communicating requirements |
| Temporal distribution | Embedding requirements knowledge in artifacts; requirements reuse |
### 5.2. Simulation Results
Having established a goal taxonomy for the SIS project, we used the jUCMNav tool to create an i* model of the SIS project. The resulting i* models incorporated the goals identified, their inter-relationship, and their impact on the RE-oriented tasks executed on the project (see Sections 4.4. and 5.1.). Importantly, the tool also enabled us to model the relationships between the goals themselves. While length restrictions prohibit a full presentation of the models generated, Figure 1 presents a portion of the distributed cognitive goals model for illustration.
In addition to modeling the relationships between goals, the jUCMNav tool enabled goal model evaluation based on the four-step process outlined in Section 4.4. For the simulation exercise, we focused on the RE-tasks observed to be most variable in the SIS case: design review, mock-up generation, business process modeling, and individual specification review for structured walkthroughs.
Here we evaluate the impact that each scenario has on the goal models. Table 2 summarizes the values for the root nodes of the three goal models, indicating how much the perspectives are satisfied by each scenario. (Note: The SIS-specific goal model links into the other three models, so the evaluation is reflected in the three other models). Figure 1 illustrates the DCog goal model for one scenario (No Design Review). The value of the goal analysis is in the relative impact that different scenarios have on goal satisfaction.
In reviewing Table 2, each row represents a scenario. In the first row, all tasks are included in the development process, while the last row represents no tasks in the development process. The intervening rows show results for the other scenarios evaluated. The scenario “Except Design Review” includes all tasks except the design review task. Notice that its average evaluation of the three root nodes is 58%, which is the lowest average evaluation. Thus, this scenario has the greatest impact on the development process, according to the goal models.
**Table 2. Goal satisfaction values of scenarios**
<table>
<thead>
<tr>
<th>Scenarios</th>
<th>System Adoption <<PM>></th>
<th>Maintain Common Knowl. <<DC>></th>
<th>Reqs. adequacy <<RE>></th>
<th>Avg.</th>
</tr>
</thead>
<tbody>
<tr>
<td>All</td>
<td>100%</td>
<td>100%</td>
<td>100%</td>
<td>100%</td>
</tr>
<tr>
<td>Except Design Review</td>
<td>46%</td>
<td>75%</td>
<td>52%</td>
<td>58%</td>
</tr>
<tr>
<td>Except BPM</td>
<td>100%</td>
<td>91%</td>
<td>88%</td>
<td>93%</td>
</tr>
<tr>
<td>Except Ind. Spec. Review</td>
<td>100%</td>
<td>92%</td>
<td>95%</td>
<td>96%</td>
</tr>
<tr>
<td>Except Scenarios</td>
<td>100%</td>
<td>92%</td>
<td>95%</td>
<td>96%</td>
</tr>
<tr>
<td>Except UI Mockups</td>
<td>100%</td>
<td>98%</td>
<td>97%</td>
<td>98%</td>
</tr>
<tr>
<td>No Tasks</td>
<td>0%</td>
<td>0%</td>
<td>0%</td>
<td>0%</td>
</tr>
</tbody>
</table>
6. Discussion and Conclusions
This research presents the initiation of a broader program of study focusing on the role of distributed cognitive processes in the practice of contemporary information systems design. While the study represents a proof-of-concept around the modeling of distributed cognitive dynamics in formal goal models, we believe it suggests several significant contributions to RE research and practice.
First, the study combines the empirical insights of *in situ* case analysis with the simulation and goal evaluation capabilities of i* modeling. In this way, the study illustrates the potential for reorienting RE research from a purely prescriptive outlook to one grounded in the experiences of practicing IS designers.
Secondly, the research extends the theory of distributed cognition through a focus on the practical design principles (i.e., DCog goals) that can be derived from the theory’s application as an analytical tool. By applying the theory to an existing IS design context and deriving distinct goals implied by its perspective on socio-technical cognitive systems, we have generated a series of preliminary concepts for subsequent IS development process design and a mechanism for evaluation of their relative efficacy.
Third, the research calls attention to the value of analyzing RE as a socio-technical process which must be approached with an eye to the intricate web of interactions between diverse social actors and the artifacts which they employ. This systems-oriented perspective offers us insights for both addressing persistent challenges to effective RE and capitalizing on opportunities for greater innovation and design breakthroughs.
Finally, the combined case analysis and goal-oriented modeling approach creates a common basis for evaluation of distinct IS development methods. The analysis and modeling process outlined here may be extended to the evaluation of emergent approaches, such as OSSD and agile development. In particular, we are interested in modeling the different computational structures that are implied by these diverse approaches to IS design.
8. References
|
{"Source-Url": "https://jyx.jyu.fi/bitstream/handle/123456789/80162/Computing_Requirements_Cognitive_Approaches_to_Distributed_Requirements_Engineering.pdf;jsessionid=23997F0C6DF235160B6390E2CB099D3F?sequence=1", "len_cl100k_base": 7038, "olmocr-version": "0.1.50", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 35005, "total-output-tokens": 11138, "length": "2e12", "weborganizer": {"__label__adult": 0.0005197525024414062, "__label__art_design": 0.0009751319885253906, "__label__crime_law": 0.00035881996154785156, "__label__education_jobs": 0.00850677490234375, "__label__entertainment": 0.00012302398681640625, "__label__fashion_beauty": 0.0002872943878173828, "__label__finance_business": 0.0009899139404296875, "__label__food_dining": 0.0004508495330810547, "__label__games": 0.0009326934814453124, "__label__hardware": 0.0007677078247070312, "__label__health": 0.0008096694946289062, "__label__history": 0.0004944801330566406, "__label__home_hobbies": 0.0001461505889892578, "__label__industrial": 0.0006461143493652344, "__label__literature": 0.000885009765625, "__label__politics": 0.00033020973205566406, "__label__religion": 0.0006594657897949219, "__label__science_tech": 0.061279296875, "__label__social_life": 0.00019371509552001953, "__label__software": 0.0080718994140625, "__label__software_dev": 0.9111328125, "__label__sports_fitness": 0.0003616809844970703, "__label__transportation": 0.0006785392761230469, "__label__travel": 0.0002446174621582031}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 48296, 0.03304]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 48296, 0.25498]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 48296, 0.90232]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 4093, false], [4093, 9300, null], [9300, 14696, null], [14696, 19727, null], [19727, 24860, null], [24860, 30272, null], [30272, 34715, null], [34715, 39536, null], [39536, 44168, null], [44168, 48296, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 4093, true], [4093, 9300, null], [9300, 14696, null], [14696, 19727, null], [19727, 24860, null], [24860, 30272, null], [30272, 34715, null], [34715, 39536, null], [39536, 44168, null], [44168, 48296, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 48296, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 48296, null]], "pdf_page_numbers": [[0, 0, 1], [0, 4093, 2], [4093, 9300, 3], [9300, 14696, 4], [14696, 19727, 5], [19727, 24860, 6], [24860, 30272, 7], [30272, 34715, 8], [34715, 39536, 9], [39536, 44168, 10], [44168, 48296, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 48296, 0.16923]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
e705d929ad564a87857959dfc122c5a3ce96b567
|
Internship Report
New Delhi - India
Writer: Jens Langner (10895)
Supervisor: Prof. Dr.-Ing. Koch
Period: August 2000 - February 2001
Contents
1 Introduction .................. 3
1.1 Where I did the internship ......................................... 3
1.2 Why abroad ? ......................................................... 3
1.3 The company I worked for ........................................... 3
2 Responsibilities .............. 4
2.1 network and system administrator ................................... 4
2.2 JaVA developer ...................................................... 4
3 Network administration ...... 5
3.1 Network planning & installation .................................... 5
3.2 Internet connectivity ................................................ 5
3.3 Administration ....................................................... 6
3.3.1 Network administration ........................................ 6
3.3.2 Server administration .......................................... 7
3.3.3 System administration ........................................ 8
4 JaVA development ............ 9
4.1 The project .......................................................... 9
4.2 The procurement process ........................................... 9
4.3 Approval process .................................................... 11
5 Summary ....................... 13
1 Introduction
1.1 Where I did the internship
Within the studies at the University of Applied Sciences in Dresden every student has to attend a half years internship within a company of his choice.
Working for newtron AG Dresden since a year now, gave me the chance to strengthen my experiences in the fields of network administration and JaVA programming in a foreign country.
newtron AG, a one years old e-commerce company located at Dresden, was actually in the progress in founding a new branch in New Delhi, India when I asked if they are able to send students abroad. To strengthen my english and computer science skills in mind I was offered to help newtron AG building up their branch office in india in the fields of network planning and administration.
1.2 Why abroad ?
As I were always interested in foreign cultures and countries it was clear to me that as soon as I was reaching the term of my internship I wanted to apply for a job abroad. India was a very good chance for me to improve my english knowledge by working together with english speaking people.
As India has a famous culture and way of living I was also very interested in getting a deeper knowledge of this culture and how the people are living and working there. Also the fact that india is quite popular in the fields of computer science has strengthen my decision to make my internship abroad.
1.3 The company I worked for
newtron IT Labs Pvt. Ltd., located at the southern part of New Delhi (Gurgaon) was actually founded days before I arrived in india. It is a branch of newtron AG, Dresden for whom I am working since October 1999. newtron AG Dresden was founded in October 1999 as a B2B eCommerce company and is a maintainer of several internet portals for B2B auctions & trading.
2 Responsibilities
In this chapter I would like to give a short overview of the different responsibilities I had in India and in which field I have strengthened my knowledge.
2.1 network and system administrator
Working as an independent network administrator for several companies since 1993 I joined newtron AG in late 1999 as a consultant. As newtron AG was also founded in 1999 I was highly involved in the general network planning and setup in all other branches (Dresden, Frankfurt, Singapore, Vienna).
With this experiences in mind newtron AG was very pleased that I was interested in working in India. In coordination with Germany, I was responsible for network planning and installation of the Indian office. Also setting up the whole server park was one of my responsibilities there.
As the time went by in my half-year internship it was also my responsibility to hire and train a Indian network administrator so that after my internship this administrator was able to takeover my work.
So working as the main network administrator for the Indian office was the main part of my internship because I brought 8 years of experience in networks and communications into this responsibility.
2.2 JaVA developer
Beside the job as a network administrator I were also interested to strengthen my knowledge in the fields of object-oriented programming & design and finally I got the chance to get involved in the new main project of newtron in India. As newtron is only developing JaVA-based technology it was my opportunity to learn JaVA in a real software project.
I got involved into a brand new project which newtron was starting in India. It is called “newtron Procurement Desk” and is a intranet based procurement solution for MRO goods based procurement.
MRO goods are all type of goods in a procurement process, where their price is lower than the costs of the process they are causing.
It was one of my responsibilities to design and program one module of this software project and also train a Indian developer to take over this module after my travel back to Germany.
3 Network administration
As pointed out in earlier chapters it was my main responsibility to plan and administrate the network in the new office in New Delhi. In this chapter I want to go into some details of my daily work in Delhi and which experiences I made there.
3.1 Network planning & installation
Starting from the first week of my arrival in India I was responsible for planning the internal network (LAN) in our office. I searched for several companies who were able to do the wiring of the office rooms as fast as possible with quality in mind.
It was a 100MBit based Twisted Pair (TP) network which I have chosen for connecting the computers together. Beside from acquiring different companies for the network installation it was my job to specify the server architecture that should be used in the office.
As in the office in India only software development take place it was only required to acquire a office solution server as a PDC and File sharing server. After some deeper market analysis I have chosen a NT 4 Server from Hewlett-Packard with a 30GB RAID-5 system and 512MB RAM.
This server had only the job to serve the developers as an Exchange, FileSharing and primary Domain Controller.
Also the internet connectivity required a separate server as a Internet-Gateway. As I had configured and designed the network of the other branches also it was clear that the network structure in India should match the same requirements. This leads to the decision to have a separate Linux server as the primary Internet gateway and also as a secondary firewall system beside the Hardware firewall which I brought from Germany.
Later on we also received a SUN Solaris server as the primary development database server for India. This decision was made because the software our Indian colleagues are developing will be sold to work on a Oracle database which normally runs on Solaris. So it was also my responsibility to configure & install this server in India.
3.2 Internet connectivity
As the main purpose of the Indian office is software development and not internet based services I decided to also search for a internet provider in that region that could offer us a bandwidth of a minimum of 512kbps leased line. In the first weeks I quickly came to the point were I recognized that there are only few internet providers (ISP) in India and especially in the region the office was operating.
The problem is that India is actually in progress of allowing private ISPs to operate there but still the government provides the largest ISP. This ISP is beside of the infrastructure not able to provide a fast connection to Europe which was absolutely necessary for Neutron because of the VPN connections between the different offices.
Another problem is that whole India has only one deep-sea connection directly to the US and none to Europe. Direct Europe connections were only possible through satellite connections which results in a minimum response time of 500ms because of the long distances between the satellite and Earth.
The first offer from an ISP took place at the start of August 2000 and after some first tests and discussions Neutron and the new ISP (delDSL) negotiated a one years contract. delDSL was a new founded company from Delhi which focused especially on the region where we had our office (Gurgaon). The connectivity from their Gateway to our office were provided through a direct Microwave link. This was necessary because the infrastructure in Delhi suffers a lot and it was not possible for them to guarantee a stable connection on normal land lines.
After installing this microwave link we were able to access their microwave hub via a 512kbps SDSL connection. Over a period of half a year we discovered many problems with that provider because they weren’t able to provide a 24/7 connectivity. So after receiving another offer from an ISP (spectranet) we decided to switch to this provider because they were also able to provide us a deep-sea connection to the US which gave us a response time of around 300ms which was necessary for our direct VPN connection to Germany which we are using for database connections.
In generally I have to say that all ISPs in Delhi suffer from the bad infrastructure there. The cause is not only the missing land line connections, it’s also the unstructured work of different organisations there, where it is very easy possible that one company cuts another company’s connection and sometimes not only by fault. Also the very bad power situation in Delhi was the cause of many internet cut offs during the first 3 months. Sometimes our office were completely closed because of a long time power breakdown. This power situation is also the cause why the whole office is connected via uninterruptable power supplies (UPS) where normally only servers are connected to such supplies.
### 3.3 Administration
#### 3.3.1 Network administration
In the first 3 months of my internship I was very busy in setting up the whole network including the server connectivity. This includes the network and server planning along with acquiring all necessary network hardware. Also a daily job was to check the performance of the network and eventually fix some problems where users couldn’t access different sources.
Because of the bad infrastructure in Delhi I was mainly busy in calling our local ISP (delDSL) because of some internet breakdowns and bad connections. Also the installation and configuration of the firewall systems in the office in India was part of my job there.
Another main responsibility was the configuration of our virtual private network (VPN) connection to Germany. As newtron is running different VPN connections to all other branches it was also required to setup a direct VPN connection to our german branch. With this VPN connection we were able to access the newtron wide intranet which includes newtron internal information and contacts.
As I have set up the VPN connections in all other branches also, my experiences in this field were quite helpful. It took me only several hours to completely connect our indian branch to the newtron VPN\(^1\).
### 3.3.2 Server administration
As described earlier I also acquired the server for the office in Delhi and so also administrating and installing this servers was my responsibility. This included the complete installation of Windows NT 4 Server as a PDC and MS Exchange 5.5 as well as configuring it as a normal File Sharing Server.
Beside this NT-based Office Solution Server I installed and administrated a Linux Server which had the main tasks to serve the users as a Internet Gateway. As I am administrating Linux since 1996, it took me only some few hours to get this Linux Server running with all needed network connections. This server is also responsible to act as an development version control server. As newtron is using CVS (Concurrent Versions System) as a revision control system for development and the fact that I am the main CVS Administrator of newtron it was also my responsibility to install and configure the Linux server in India as the second main CVS server. Newtron has 2 main CVS servers and it was also my job to create a possibility that this two servers synchronise their sourcecode once a day. I did this via several different shell scripts that will be run via a cronjob.
This server is also responsible to act as a second firewall system behind the main hardware firewall which is installed in every branch of newtron. The firewall on the Linux server is a ipchains based software firewall which has only restrictions to access from outside. The server has two network cards where one is directly connected to the internal network (LAN) and the other one directly to the hardware firewall. With that 2-firewall solution, which every newtron branch have, the office in India is also very secure from attacks. On the other hand the server is also configured to serve the workstations as a transparent web-proxy/cache so that every HTTP-Request is cached on the local drive which results in faster transfers from the internet. Also this Linux Server is configured to act as a samba file-sharing server.
Another server which I was responsible for is the SUN Solaris based Ultra 10L server which serves the development team as the main ORACLE database server. After a complete installation and configuration of Solaris 7 & Oracle 8 this server acts as a development platform for testing the database connectivity of the software newtron is developing in India.
---
\(^1\)VPN: Virtual Private Network, a full encrypted transparent layer for private communication over the internet.
To achieve all the above solutions it was also needed to create some enhancements and scripts for the server for which I mainly used Perl and bash.
### 3.3.3 System administration
Beside from the servers I had to administrate, to acquire and to configure all workstations in the office in Delhi. As we had good contacts to Hewlett-Packard India we also ordered an overall of 30 workstations with P-III 700MHz, 17” inch monitors and 128MB RAM as development stations.
This workstations were preinstalled with Windows 98 but we needed Windows 2000 for all clients, so I created an image with Norton Ghost and installed it on every client. I also had to install several different software packages (i.e. Borland JBuilder 3) to give the developers the ability to work properly with their computers.
Also helping the developers with daily related problems of their workstations was my responsibility. In fact I were the representative for all technical related question concerning newtron IT Labs, India.
4 JaVA development
In the second part of my internship I was also involved in a new software project newtron IT Labs has started in india. As newtron AG, the founder of newtron IT Labs, is working on eBuisness based webapplications for companies the new project was a intranet-based webapplication to give companies the ability to automate their procurement processes via a intranet software solution.
4.1 The project
“newtron Procurement Desk” is a web application which is based on the internal web-framework of newtron AG. This procurement solution should provide companies with a cheap and easy way to handle all different types of transactions in a procurement process via a web-based software solution that should be located in the intranet of the company, so that every employee can access this solution directly from his desk without having to install a special software excluding a web-browser.
There exists many other procurement solution on the market but the newtron procurement desk is focusing on the low cost market where it is specialied on the MRO market.
4.2 The procurement process
To give you a deeper view into the newtron procurement desk project you should have a good overview of the traditional procurement process and how it still happens in many companies.
In former days companies had one central procurement department where all requisition came in and this was also the central point where information got stuck. Normally every employee had to go to his boss if he needs to get some material for his job and his boss could either accept or reject his requisition. Also if his boss was approving his requisition the requisition were transferred to the purchase department. All this was mainly based on manual interaction and communication.
This is the point where such a procurement solution takes place. As the former explained traditional procurement process was mainly a manual process, it should also be clear that such a process costs lots of administrative money. The idea of eProcurement solutions is based on that fact, because with such an electronical equivalent solution the company has the ability to automate this process and reduce his costs.
4.3 Approval process
In a procurement process the approval is one of the most important modules because it is very important that requisitions got approved correctly and that the processes are exactly built upon the hierarchical structure of the company.
In my internship it was my responsibility to design this part of the “newtron Procurement desk” and to make sure that it will be well developed until I left India.
The development of this approval module had several different steps and especially because newtron is using object oriented software engineering tools like ObjectiF.
In the first step I had to analyse the requirements and needed features for such an approval engine. This phase took me about two weeks, where I created a separate document that I am going to attach to this report. After this first analysis I had to design the workflows with help of ObjectiF and UML\(^1\). This workflows are also described in a non-UML manner in the attached document.
As in every other software engineering process, I had also to design the classes in UML. Normally also ObjectiF could be used to automatically create the needed source code out of the class design, but as ObjectiF and every other software engineering tool suffers here I had to manually create the classes and it’s corresponding methods, but I
\(^1\)UML: Unified Modelling Language
was also very happy in doing this, because in that way I had no problems in strengthen my Java knowledge.
As the framework of newtron also supplies a separate database part, I was also responsible for the database design which the approval module is using. This database design will also be further described in detail in the attached document.
Not only the database framework of newtron is a powerful tool, moreover this framework also includes a own webservice that also supports services like load-balancing and caching of often used parts.
One can compare the newtron framework against other public ones like JSP/ASP\(^2\). But learning more about such a web/database framework, gave me the opportunity to compare this framework technologies myself.
As I am working for newtron under a non-disclosual agreement I am sorry to explain, that it is not possible to show any part of the source code nor give any detailed information about the source-part of the approval module I was responsible for. But to give you a better overview of the approval module itself I hope that the attached specification which I have written in November 2000 will be enough for you to understand what this is all about.
\(^2\)JSP/ASP: Java Server Pages / Active Server Pages
5 Summary
As a summary about the internship in India I have to say that the experiences I made there were very interesting, but sometimes also very shocking.
Beside the experiences as a software engineer and network administrator, I had the chance to live in a completely different culture for 6 months which were totally different from the thoughts I had before I arrived in India.
Not only the experience in working together with Indian software developers were different from my thoughts, also the whole living as an European there with all the very poor people around me, was very different and hard.
Also often I worked around 12-14 hours a day including the weekends, because there are only very few places where you can go as an European to spend your spare time.
But still overall I would say that it was very interesting to make this experience and to work together with Indian software developers, even if they are not as good as I have heard and thought before.
|
{"Source-Url": "https://jens-maus.de/ftp/IReport.pdf", "len_cl100k_base": 4116, "olmocr-version": "0.1.50", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 21698, "total-output-tokens": 4677, "length": "2e12", "weborganizer": {"__label__adult": 0.001758575439453125, "__label__art_design": 0.0012969970703125, "__label__crime_law": 0.001537322998046875, "__label__education_jobs": 0.4111328125, "__label__entertainment": 0.0002913475036621094, "__label__fashion_beauty": 0.0008039474487304688, "__label__finance_business": 0.0044097900390625, "__label__food_dining": 0.0010576248168945312, "__label__games": 0.0015306472778320312, "__label__hardware": 0.0016775131225585938, "__label__health": 0.000926971435546875, "__label__history": 0.0010728836059570312, "__label__home_hobbies": 0.0006546974182128906, "__label__industrial": 0.0012216567993164062, "__label__literature": 0.0011339187622070312, "__label__politics": 0.0006999969482421875, "__label__religion": 0.0012798309326171875, "__label__science_tech": 0.004322052001953125, "__label__social_life": 0.0026683807373046875, "__label__software": 0.0066986083984375, "__label__software_dev": 0.548828125, "__label__sports_fitness": 0.0012788772583007812, "__label__transportation": 0.002140045166015625, "__label__travel": 0.0013513565063476562}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 20757, 0.02979]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 20757, 0.02294]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 20757, 0.97113]], "google_gemma-3-12b-it_contains_pii": [[0, 136, false], [136, 1429, null], [1429, 3204, null], [3204, 5294, null], [5294, 7708, null], [7708, 10846, null], [10846, 13960, null], [13960, 14964, null], [14964, 16254, null], [16254, 17157, null], [17157, 18518, null], [18518, 19780, null], [19780, 20757, null]], "google_gemma-3-12b-it_is_public_document": [[0, 136, true], [136, 1429, null], [1429, 3204, null], [3204, 5294, null], [5294, 7708, null], [7708, 10846, null], [10846, 13960, null], [13960, 14964, null], [14964, 16254, null], [16254, 17157, null], [17157, 18518, null], [18518, 19780, null], [19780, 20757, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 20757, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 20757, null]], "pdf_page_numbers": [[0, 136, 1], [136, 1429, 2], [1429, 3204, 3], [3204, 5294, 4], [5294, 7708, 5], [7708, 10846, 6], [10846, 13960, 7], [13960, 14964, 8], [14964, 16254, 9], [16254, 17157, 10], [17157, 18518, 11], [18518, 19780, 12], [19780, 20757, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 20757, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
b5ada35520923584bb43c8cd85a28dde4eea2fec
|
Quiz 1
Solutions to Sample Problems
Problem 1 What will Scheme print in response to the following statements? Assume that they are each evaluated in order in a single Scheme buffer. Write your answer below each statement. You may write “procedure” if a procedure [object] would be returned, or “error” if an error message would be returned. (This problem spans two pages.)
To generate the solutions for this problem, we typed each expression to the Scheme interpreter (yes, yes, that’s cheating, but we wanted to make sure we had the Right Answer). Anywhere the interpreter returned a “[compound procedure …]” expression, we accepted the answer “procedure,” and anywhere an error was triggered, we accepted “error.” The prefix “;Value: ” was not required in your answer.
```
(define x 2)
;Value: "x --> 2" (read: “x is bound to 2”; the answer “x” was also accepted)
x
;Value: 2
(x)
;The object 2 is not applicable
;Type D to debug error, Q to quit back to REP loop:
(define (y) (* x 2))
;Value: "y --> #[compound-procedure 2 y]"
y
;Value: #[compound-procedure 2 y]
(y)
;Value: 4
```
(define (a) (lambda (x) (+ x 1)))
;Value: "a --> #[compound-procedure 3 a]"
a
;Value: #[compound-procedure 3 a]
(a)
;Value: #[compound-procedure 4]
(let ((x 1)
(y 2)
(z (+ x 4)))
(+ x y z))
;Value: 9
(define (if a b c) (+ a b c))
;SYNTAX: define: redefinition of syntactic keyword if
;Type D to debug error, Q to quit back to REP loop:
Many students erroneously thought that if could be redefined (it cannot be redefined as it is a special form). This question was not penalized, as it is linked to the question below; meaning if you got this one wrong, you only lost points on the one below.
(if 2 3 4)
;Value: 3
Problem 2 Write a function called new-add which returns the sum of two \[ \text{integers} \]. Do not use the internal functions \(+\) and \(-\), but instead, use \texttt{inc} (an already defined Scheme procedure which takes one argument and returns the sum of that argument and 1) and \texttt{dec} (a similar procedure which returns the sum of its argument and \(-1\)).
All solutions to this problem require that an iteration variable count through one argument (without loss of generality, the first, \(a\)) and as that variable counts, the other argument (the second, \(b\)) is incremented or decremented and equal number of times.
The most elegant solution we could come up with (during discussions with students directly after the exam) is based on the observation that arranging the iteration variable to count from 0 up to \(a\), whether \(a\) is positive or negative, allows us to use the same counting operation (\texttt{inc} or \texttt{dec}) on the iteration variable as will be used on the second argument \(b\). We capture the counting operator \texttt{op} inside a \texttt{let}, and write a small helper function to iterate through the range 0–\(a\).
```
(defun (new-add a b)
(let ((op (if (< 0 a) ; determine the counting operator
inc ; if going up, use increment
dec))) ; if going down, use decrement
(define (new-add-helper i sum) ; done?
(if (= i a) ; yes, return the computed sum
sum
(new-add-helper (op i) (op sum)))) ; no, recurse (inc/dec i *and* sum)
(new-add-helper 0 b))) ; start running sum with b
```
However, we accepted the following solution (and, like many of you, it was the first one we wrote):
```
(defun (new-add a b)
(cond ((= a 0) b) ; return b if a has made it to zero
((< a 0) ; counting up to 0 (is a negative)?
(new-add (inc a) (dec b))) ; yes, increment a, decrement b
(else ; otherwise, counting down to 0
(new-add (dec a) (inc b)))))) ; so, decrement a, increment b
```
The first solution is advantageous because only one comparison on \(a\) is ever made, and it might be considered more elegant all told. The second solution, despite the fact that \(a\) is examined for being above/below 0 on every iteration, is perhaps easier to understand.
Is your procedure recursive?
\underline{yes}
Is your procedure tail-recursive?
\underline{yes}
Is the process it generates recursive or iterative?
\underline{iterative}
**Problem 3** Assume that we have defined `sum` and `square` as follows:
```scheme
(define (sum term a next b)
(if (> a b) 0
(+ (term a) (sum term (next a) next b))))
(define (square x) (* x x))
```
Determine the order of growth in time for the following functions using Θ notation. (Hint: you will only need to use one or more of the following for your answers: Θ(1), Θ(log n), Θ(n), Θ(n^2) and Θ(2^n). All classes might not be used.) Write your answer in the blanks to the right of each function.
```scheme
(define (integrate abfd x)
(sum (lambda (x) (* (f x) dx)) a (lambda (x) (+ x dx)) b))
```
The astute reader would realize that the answer here really depends on the order of f, which has not been specified (bonus points for those who wrote something about that). Our answer assumes that f is a constant-time function.
- Θ(n)
```scheme
(define (number-of-bits-in n)
(if (< n 2) 1 (+ 1 (number-of-bits-in (/ n 2)))))
```
- Θ(log n)
```scheme
(define (times-5 x)
(* 5 x))
```
- Θ(1)
```scheme
(define (exp a b)
(cond ((< b 0) (error "Oops! b cannot be negative"))
((= b 0) 1)
(else (if (odd? b)
(* a (exp a (- b 1)))
(square (exp a (/ b 2)))))))
```
- Θ(log n)
```scheme
(define (sum-of-squares x y)
(+ (square x) (square y)))
```
- Θ(1)
```scheme
(define (triangle-sum n)
(sum (lambda (m) (sum (lambda (x) x) 1 inc m)) 1 inc n))
```
- Θ(n^2)
Problem 4 Write a procedure \texttt{power-close-to} that takes two non-zero positive integers (\(b\) and \(n\)) as arguments and returns the smallest power of \(b\) that is greater than \(n\). That is, it should return the smallest integer \(i\) such that \(b^i > n\). You may use the Scheme procedure \texttt{(expt b i)} which raises \(b\) to the power \(i\).
Our answer is a standard tail-recursive function which increments a counter \(i\) at each iteration, checking if the desired condition has been met. If so, the counter is returned as value (not the value \(\texttt{(expt b n)}\)); if not, the counter is incremented and recursive execution continues.
A fully-scored answer (with bonus) checked for the degenerate conditions when \(b\) is 1 (\(1^i = 1\) for all integers \(i > 0\)), and the algorithm would otherwise enter an infinite loop.
\begin{verbatim}
(define (power-close-to b n)
(define (pct i)
(if (> (expt b i) n)
i
(pct (inc i))))
(if (= b 1)
(error "Uh, sorry, there is no answer when b is 1!"))
(pct 1))
\end{verbatim}
To assist in evaluating answers, the following helper procedure was written. This uses the function \texttt{format} which has not been discussed in class (but is described in the Scheme Info entry!) to format the output in a nice, readable way.
\begin{verbatim}
(define (check-pct)
(define (helper n)
(if (< n 10)
(let* ((b 2)
(r (power-close-to b n)))
(format #t
"~A^~A = @2A and should be greater than @A~%"
b r (expt b r) n)
(helper (inc n))))
(newline)
(helper 1)
#t)
\end{verbatim}
Correctly running code should produce the following output (without checking on the degenerate case of \(b = 1\)).
\begin{verbatim}
(check-pct)
2^1 = 2 and should be greater than 1
2^2 = 4 and should be greater than 2
2^2 = 4 and should be greater than 3
2^3 = 8 and should be greater than 4
2^3 = 8 and should be greater than 5
2^3 = 8 and should be greater than 6
2^3 = 8 and should be greater than 7
2^4 = 16 and should be greater than 8
2^4 = 16 and should be greater than 9
;Value: #t
\end{verbatim}
Does your procedure generate an iterative process or a recursive process? \hspace{1cm} \underline{iterative}
Problem 5 A local bookstore has contracted aD University to provide an inventory system for their web site. We can create a database of books using Scheme. The constructor for a single book will be called make-book and takes the name of a book and its price as parameters.
\[
muchos (define (make-book name price)
(cons name price))
\]
Write the selectors book-name and book-price.
\[
muchos (define (book-name b) (car b))
muchos (define (book-price b) (cdr b))
\]
The inventory of books will be stored in a list. The selectors for our inventory data structure are first-book and rest-books, defined as follows:
\[
muchos (define first-book car)
muchos (define rest-books cdr)
\]
Write the constructor make-inventory.
Any of
\[
muchos (define make-inventory list)
muchos (define make-inventory . books books)
muchos (define make-inventory (lambda books books))
\]
would work.
Draw the box-and-pointer diagram that results from the evaluation of
\[
muchos (define store-inventory
(make-inventory (make-book 'sicp 60)
(make-book 'collecting-pez 15)
(make-book 'the-little-schemer 35)))
\]
Problem 5 (continued) Write a procedure called `find-book` which takes the name of a book and an inventory as parameters and returns the book's data structure (name and price) if the book is in the store's inventory, and `[nil]` otherwise.
```
(define (find-book name inventory)
(if ((null? inventory) nil)
((eqv? (book-name (first-book inventory)) name)
(first-book inventory))
(else
(find-book name (rest-books inventory))))
```
*Points were commonly lost here for breaking the abstraction barriers and for passing a book instead of the book's name.*
The bookstore has asked us to change our system to include a count of the number of copies of each book the store has on hand. We redefine our book constructor as follows:
```
(define (make-book name price num-in-stock)
(list name price num-in-stock))
```
```
(define book-name car)
(define book-price cadr)
(define book-stock caddr)
```
Will `find-book` need to be changed to accommodate our new representation?
*no (unless barriers were broken above)*
Problem 5 (continued) Now that we are storing the number of copies in stock, write a procedure called `in-stock?` that takes a book name and an inventory as the parameters, and returns #t if at least one copy of the book is in stock, or #f otherwise. If the book is not listed in the inventory at all, `in-stock?` should also return #f. You may want to use your `find-book` procedure from above.
```lisp
(define (in-stock? name inventory)
(let ((book (find-book name inventory)))
(and book
(> 0 (book-stock book)))))
```
Notice the use of `and` instead of `if`; remember that `and` evaluates its arguments in left-to-right order, each in turn, and only until the first evaluated argument is false (or it runs out of arguments). If all arguments evaluate non-false, then the value returned is the value of the last argument. If you used `if`, that would have been just fine.
Problem 6 Write a function add-n of one argument n that returns a procedure. The returned procedure takes one argument x and returns the sum of x and n.
\[
(\text{define (add-n n)} \\
(\text{lambda (x) (+ x n)})))
\]
Using add-n, and without using the built-in Scheme procedure *, write mult which takes two integer arguments a and b and returns their product.
There were three kinds of answers to this question. The first was along the lines of the formulation of new-add from the first quiz; the second used repeated (which worked only for positive values for the variable chosen for iteration, and thus lost points); the third was a very nice recursive formulation to deal with negative numbers. The three approaches are shown below.
\[
(\text{define (mult a b)} \\
(\text{let ((op-b (add-n b)) ; save the operator for b} \\
(\text{op-i (if (< a 0) dec inc))) ; same for the iterator} \\
(\text{define (m-helper i prod)} ; the helper function to iterate \\
(\text{if (= i a)} ; are we done? \\
prod \\
(\text{m-help (op-i i) (op-b prod))}) ; yes, return the answer \\
(m-helper 0 0))) ; no, advance counter and result) \\
(\text{start out at zero} \\
(m-helper 0 0))) \]
\]
\[
(\text{define (mult a b)} \\
(\text{let ((op-b (add-n b))} ; save the operator for b \\
(\text{op-i (if (< a 0) dec inc))) ; same for the iterator \\
(\text{define (m-helper i prod)} ; the helper function to iterate \\
(\text{if (= i a)} ; are we done? \\
prod \\
(\text{m-help (op-i i) (op-b prod))})) ; yes, return the answer \\
(m-helper 0 0))) ; no, advance counter and result \\
(\text{start out at zero} \\
(m-helper 0 0))) \]
\]
\[
(\text{define (mult a b)} \\
(\text{let ((op-b (add-n b))} ; save the operator for b \\
(\text{op-i (if (< a 0) dec inc))) ; same for the iterator \\
(\text{define (m-helper i prod)} ; the helper function to iterate \\
(\text{if (= i a)} ; are we done? \\
prod \\
(\text{m-help (op-i i) (op-b prod))})) ; yes, return the answer \\
(m-helper 0 0))) ; no, advance counter and result \\
(\text{start out at zero} \\
(m-helper 0 0))) \]
\]
\[
(\text{define (mult a b)} \\
(\text{let ((op-b (add-n b))} ; save the operator for b \\
(\text{op-i (if (< a 0) dec inc))) ; same for the iterator \\
(\text{define (m-helper i prod)} ; the helper function to iterate \\
(\text{if (= i a)} ; are we done? \\
prod \\
(\text{m-help (op-i i) (op-b prod))})) ; yes, return the answer \\
(m-helper 0 0))) ; no, advance counter and result \\
(\text{start out at zero} \\
(m-helper 0 0))) \]
\]
Problem 7 Assume the following expressions have been evaluated in the order they appear.
```
(define a (list (list 'q) 'r 's))
(define b (list (list 'q) 'r 's))
(define c a)
(define d (cons 'p a))
(define e (list 'p (list 'q) 'r 's))
```
Complete the table below with the result of applying the functions eq?, eqv?, and equal? to the two expressions on the left of each row. For example, the elements of the top row will represent the result from evaluating (eq? a c), (eqv? a c), and (equal? a c). Your result should be written as #t, #f or undefined [or unspecified].
<table>
<thead>
<tr>
<th>⟨operand₁⟩</th>
<th>⟨operand₂⟩</th>
<th>eq?</th>
<th>eqv?</th>
<th>equal?</th>
</tr>
</thead>
<tbody>
<tr>
<td>a</td>
<td>c</td>
<td>#t</td>
<td>#t</td>
<td>#t</td>
</tr>
<tr>
<td>a</td>
<td>b</td>
<td>#f</td>
<td>#f</td>
<td>#t</td>
</tr>
<tr>
<td>a</td>
<td>(cdr d)</td>
<td>#t</td>
<td>#t</td>
<td>#t</td>
</tr>
<tr>
<td>d</td>
<td>e</td>
<td>#f</td>
<td>#f</td>
<td>#t</td>
</tr>
<tr>
<td>(car a)</td>
<td>(car e)</td>
<td>#f</td>
<td>#f</td>
<td>#f</td>
</tr>
<tr>
<td>(car a)</td>
<td>(cadr e)</td>
<td>#f</td>
<td>#f</td>
<td>#t</td>
</tr>
<tr>
<td>(caar a)</td>
<td>(caadr e)</td>
<td>#t</td>
<td>#t</td>
<td>#t</td>
</tr>
</tbody>
</table>
Problem 8 Write occurrences, a procedure of two arguments s and tree that returns the number of times the first argument (an atom) appears in the second (a tree). You may find accumulate-tree, shown below, to be helpful.
\[
(\text{define (accumulate-tree tree term combiner null-value)}
\begin{cases}
((\text{null? tree}) \text{ null-value}) \\
((\text{not (pair? tree)}) (\text{term tree})) \\
(\text{else (combiner (accumulate-tree (car tree) term combiner null-value)}
\text{ (accumulate-tree (cdr tree) term combiner null-value)))})
\end{cases}
\]
Many, but not all of the students used accumulate-tree in their answer, as suggested by the hint.
\[
(\text{define (occurrences s tree)}
\text{(accumulate-tree tree}
\begin{cases}
(\lambda (x) \text{ (if (eqv? x s) 1 0)}) \\
+ \\
0)
\end{cases}
\]
Those who did not use accumulate-tree ended up writing a procedure with the same functionality.
Problem 9 Draw the box and pointer diagrams for the following structures.
'(1)
'(1 2 3 4)
'((1))
'(1 . 2) (3 . 4)
'(1 (2 3) (4 (5) (6 7)))
|
{"Source-Url": "http://www.cs.uml.edu/~holly/91.301/Fall2002/Q1sample-solutions.pdf", "len_cl100k_base": 4540, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 23538, "total-output-tokens": 5313, "length": "2e12", "weborganizer": {"__label__adult": 0.0004634857177734375, "__label__art_design": 0.0007343292236328125, "__label__crime_law": 0.00037217140197753906, "__label__education_jobs": 0.0182037353515625, "__label__entertainment": 0.00016105175018310547, "__label__fashion_beauty": 0.0002334117889404297, "__label__finance_business": 0.00025081634521484375, "__label__food_dining": 0.0006852149963378906, "__label__games": 0.0012836456298828125, "__label__hardware": 0.0010652542114257812, "__label__health": 0.00048828125, "__label__history": 0.00047206878662109375, "__label__home_hobbies": 0.00028014183044433594, "__label__industrial": 0.0006456375122070312, "__label__literature": 0.0007410049438476562, "__label__politics": 0.0002849102020263672, "__label__religion": 0.0007872581481933594, "__label__science_tech": 0.0238800048828125, "__label__social_life": 0.00029850006103515625, "__label__software": 0.00846099853515625, "__label__software_dev": 0.93896484375, "__label__sports_fitness": 0.0003743171691894531, "__label__transportation": 0.0006928443908691406, "__label__travel": 0.00025153160095214844}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 15677, 0.0211]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 15677, 0.42007]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 15677, 0.82784]], "google_gemma-3-12b-it_contains_pii": [[0, 37, false], [37, 1093, null], [1093, 1729, null], [1729, 4195, null], [4195, 5633, null], [5633, 7885, null], [7885, 9008, null], [9008, 10140, null], [10140, 11029, null], [11029, 13531, null], [13531, 14582, null], [14582, 15533, null], [15533, 15677, null]], "google_gemma-3-12b-it_is_public_document": [[0, 37, true], [37, 1093, null], [1093, 1729, null], [1729, 4195, null], [4195, 5633, null], [5633, 7885, null], [7885, 9008, null], [9008, 10140, null], [10140, 11029, null], [11029, 13531, null], [13531, 14582, null], [14582, 15533, null], [15533, 15677, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, true], [5000, 15677, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 15677, null]], "pdf_page_numbers": [[0, 37, 1], [37, 1093, 2], [1093, 1729, 3], [1729, 4195, 4], [4195, 5633, 5], [5633, 7885, 6], [7885, 9008, 7], [9008, 10140, 8], [10140, 11029, 9], [11029, 13531, 10], [13531, 14582, 11], [14582, 15533, 12], [15533, 15677, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 15677, 0.02875]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
86a26c6f7c0b879b5110442b853fbb3384b96a4c
|
The iflang package
Heiko Oberdiek*
2018/01/21 v1.7
Abstract
This package provides expandible checks for the current language based on macro \languagename or hyphenation patterns.
Contents
1 Documentation 2
2 Implementation 2
2.1 Reload check and package identification 3
2.2 Tools 4
2.2.1 Provide some basic macros of \LaTeX 4
2.2.2 Expandible existence check for macros 5
2.2.3 Macros for messages 5
2.2.4 Support for etex.src 5
2.3 \IfLanguagePatterns 6
2.4 \IfLanguageName 6
2.5 Check plausibility of \languagename 8
3 Installation 8
3.1 Download 8
3.2 Bundle installation 8
3.3 Package installation 8
3.4 Refresh file name databases 9
3.5 Some details for the interested 9
4 Acknowledgement 9
5 History 9
[2007/04/10 v1.0] 9
[2007/04/11 v1.1] 9
[2007/04/12 v1.2] 10
[2007/04/26 v1.3] 10
[2007/11/11 v1.5] 10
[2016/05/16 v1.6] 10
[2018/01/21 v1.7] 10
6 Index 10
*Please report any issues at https://github.com/ho-tex/oberdiek/issues
1 Documentation
Package babel defines \iflanguage. As first argument it takes a language name and executes the second or third argument depending on the current language. This language test is based on hyphenation patterns. However, it is possible that different languages or dialects share the same patterns. In such cases \iflanguage fails.
However, package babel and some other packages such as german or ngerman store the language name in the macro \language if \selectlanguage is called.
\IfLanguageName{(lang)}{(then)}{(else)}
Makro \IfLanguageName compares language \langle lang \rangle with the current setting of macro \language. If both contains the same name then the \langle then \rangle part is called, otherwise the \langle else \rangle part.
The macro is expandable. Thus it can be safely used inside \edef or \csname. If case of errors like an undefined \language the \langle else \rangle part is executed.
Note: Macro \IfLanguageName relies on the fact, that \language is set correctly:
Package babel:
- Full support of \language in its language switching commands.
Format based on babel (language.dat):
- If package babel is not used (or not yet loaded), then babel's hyphen.cfg has set \language to the last language in language.dat, but \language (current patterns) is zero and points to the first language. Thus the value of \language is basically garbage. Package iflang warns if \language and \language do not fit. This can be fixed by loading package babel previously.
Format based on ε-Tex's etex.src (language.def):
- Unhappily it does not support \language. Thus this package hooks into \uselanguage to get \language defined and updated there. At package loading time the changed \uselanguage has not been called yet. Thus package iflang tries USenglish. This is the definite default language of etex.src. If the current patterns suit this default language, an undefined \language remains undefined and a warning is given.
\IfLanguagePatterns{(lang)}{(then)}{(else)}
This macro behaves similar to \IfLanguageName. But the language test is based on the current pattern in force (\language). Also this macro is expandable, in case of errors the \langle else \rangle part is called.
The following naming convention for the pattern are supported:
\texttt{babel/language.dat : \l@\langle language \rangle}
\texttt{etex/src/language.def : \lang@\langle language \rangle}
Package iflang looks for \et@xp@t@xpatterns (defined in etex.src) to find out the naming convention in use.
2 Implementation
2.1 Reload check and package identification
Reload check, especially if the package is not used with \LaTeXX.
\begin{verbatim}
\begingroup\catcode61\catcode48\catcode32=10\relax%
\catcode13=5 % ^^M
\endlinechar=13 %
\catcode35=6 % #
\catcode39=12 % ,
\catcode44=12 % ,
\catcode45=12 % -
\catcode46=12 % .
\catcode58=12 % :
\catcode64=11 % @
\catcode123=1 % {
\catcode125=2 % }
\endlinechar=13 %
\catcode35=6 % #
\catcode39=12 % ,
\catcode44=12 % ,
\catcode46=12 % .
\catcode47=12 % /
\catcode58=12 % :
\catcode64=11 % @
\catcode123=1 % {
\catcode125=2 % }
\endlinechar=13 %
\endgroup
\end{verbatim}
Package identification:
\begin{verbatim}
\begingroup\catcode61\catcode48\catcode32=10\relax%
\catcode13=5 % ^^M
\endlinechar=13 %
\catcode35=6 % #
\catcode39=12 % ,
\catcode44=12 % ,
\catcode45=12 % -
\catcode46=12 % .
\catcode47=12 % /
\catcode58=12 % :
\catcode64=11 % @
\catcode123=1 % {
\catcode125=2 % }
\endlinechar=13 %
\endgroup
\end{verbatim}
\makeatletter
\begin{verbatim}
\expandafter\expandafter\expandafter\edef\expandafter\@tempa
\expandafter\expandafter\expandafter{%}
\expandafter\edef\expandafter\@tempb
\expandafter\expandafter\expandafter{%}
\end{verbatim}
\makeatother
\@firstoftwo
\@secondoftwo
2.2.2 Expandible existence check for macros
\IfLang@ifDefined
2.2.3 Macros for messages
\IfLang@prefix
2.2.4 Support for etex.src
\IfLang@prefix
The first `\uselanguage` that is executed as last line in `language.def` cannot patched this way. However, `language.def` is very strict. It forces the first added and used language to be `USenglish`. Thus, if `\languagename` is not defined, we can quite safely assume `USenglish`. As additional safety precaution the actual used patterns are checked.
```latex
\begingroup\expandafter\expandafter\expandafter\endgroup
\expandafter\ifx\csname languagename\endcsname\relax
\begingroup\expandafter\expandafter\expandafter\endgroup
\expandafter\ifx\csname lang@USenglish\endcsname\relax
\@PackageWarningNoLine@iflang}{%\string\lang@USenglish\space is missing%}
\else
\@PackageWarningNoLine@iflang}{%\string\languagename\space is not set,\MessageBreak current language is unknown%}
\fi
\fi
\fi
\begingroup\expandafter\expandafter\expandafter\endgroup
\expandafter\ifx\csname languagename\endcsname\relax
\@PackageInfoNoLine@iflang}{%\string\languagename\space is not set%}
\fi
```
### 2.3 `\IfLanguagePatterns`
```latex
\def\IfLanguagePatterns#1{%\IfLang@prefix\endcsname=\language
\def\IfLanguagePatterns#1{%\IfLang@prefix\endcsname=\language
\ifnum\IfLang@IfDefined{\IfLang@prefix\endcsname}{%\string\languagename\space is not set,\MessageBreak current language is unknown%}
\else
\@PackageWarningNoLine@iflang}{%\string\languagename\space is not set%}
\fi
\fi
\begingroup\expandafter\expandafter\expandafter\endgroup
\expandafter\ifx\csname pdf@strcmp\endcsname\relax
\@firstoftwo
\else
\@secondoftwo
\fi
\expandafter\@firstoftwo
\fi
```
### 2.4 `\IfLanguageName`
```latex
\begingroup\expandafter\expandafter\expandafter\endgroup
\expandafter\ifx\csname pdf@strcmp\endcsname\relax
\@firstoftwo
\else
\@secondoftwo
\fi
\expandafter\@firstoftwo
\fi
```
We do not have \pdfstrcmp (and \pdfstrcmpp). Thus we must define our own expandable string comparison. The following implementation is based on a \TeX pearl from David Kastrup, presented at the conference Bacho\TeX\ 2005: \url{http://www.gust.org.pl/projects/pearls/2005p/david-kastrup/bachotex2005-david-kastrup-pearl1.pdf}
The original code allows macros inside the second string. Because also \texttt{languagename} might consists of further macros, we need a variant that allows macros in the first string, too.
\begin{verbatim}
\def\IfLang@StrNil{\relax}%
\def\IfLang@StrEqual#1{%
\number\IfLang@StrEqualStart{}{}#1\IfLang@StrNil
}
\def\IfLang@StrEqualStart#1#2#3{%
\ifx#3\IfLang@StrNil
\IfLang@StrEqualStop
\fi
\ifcat\noexpand#3\relax
\IfLang@StrExpand{#1}{#2}#3%
\fi
\IfLang@StrEqualStart{\if#3#1}{#2\fi}%
}%
\def\IfLang@StrEqualStop\fi#1\IfLang@StrEqualStart#2#3#4{%
\fi
#2#4\relax'#313%
}%
\def\IfLang@StrExpand#1#2#3\fi\IfLang@StrEqualStart#4#5{%
\fi
\IfLang@@StrExpand{#1}{#2}#3%
}%
\def\IfLang@@StrExpand#1#2#3\IfLang@StrNil{%
\expandafter\IfLang@@@StrExpand#3\IfLang@StrNil{#1}{#2}%
}%
\def\IfLang@@@StrExpand#1\IfLang@StrNil#2#3{%
\IfLang@StrEqualStart{#2}{#3}#1\IfLang@StrNil
}%
\end{verbatim}
\texttt{\IfLanguageName}
\begin{verbatim}
\def\IfLanguageName#1{%
\ifnum\IfLang@IfDefined{languagename}{%
\if\expandafter\IfLang@StrEqual\expandafter%
{\languagename}{#1}%
0%
\else
1%
\fi
\expandafter\@firstoftwo
}%
\end{verbatim}
\texttt{\IfLanguageName}
2.5 Check plausibility of \languagename
\begingroup \expandafter \expandafter \expandafter \endgroup
\expandafter \ifx \csname \languagename \endcsname \relax
\else
\IfLanguagePatterns{\languagename}{%}
\PackageWarningNoLine{iflang}{% Mismatch between \string\languagename\space (patterns)\MessageBreak and setting of \string\languagename
}%
\fi
\IfLangAtEnd
⟨/package⟩
3 Installation
3.1 Download
Package. This package is available on CTAN:\textsuperscript{1}:
\url{CTAN:macros/latex/contrib/oberdiek/iflang.dtx} The source file.
\url{CTAN:macros/latex/contrib/oberdiek/iflang.pdf} Documentation.
Bundle. All the packages of the bundle ‘oberdiek’ are also available in a TDS compliant ZIP archive. There the packages are already unpacked and the documentation files are generated. The files and directories obey the TDS standard.
\url{CTAN:install/macros/latex/contrib/oberdiek.tds.zip}
TDS refers to the standard “A Directory Structure for \TeX\ Files” (\url{CTAN:pkg/tds}). Directories with texmf in their name are usually organized this way.
3.2 Bundle installation
Unpacking. Unpack the oberdiek.tds.zip in the TDS tree (also known as texmf tree) of your choice. Example (linux):
unzip oberdiek.tds.zip -d ~/texmf
3.3 Package installation
Unpacking. The .dtx file is a self-extracting docstrip archive. The files are extracted by running the .dtx through plain \TeX:\
tex iflang.dtx
\textsuperscript{1}CTAN:pkg/iflang
TDS. Now the different files must be moved into the different directories in your installation TDS tree (also known as texmf tree):
- `iflang.sty` → `tex/generic/oberdiek/iflang.sty`
- `iflang.pdf` → `doc/latex/oberdiek/iflang.pdf`
- `iflang.dtx` → `source/latex/oberdiek/iflang.dtx`
If you have a `docstrip.cfg` that configures and enables `docstrip`’s TDS installing feature, then some files can already be in the right place, see the documentation of `docstrip`.
3.4 Refresh file name databases
If your \TeX\ distribution (\TeX\ Live, MiK\TeX\, …) relies on file name databases, you must refresh these. For example, \TeX\ Live users run `texhash` or `mktexlsr`.
3.5 Some details for the interested
Unpacking with \LaTeX. The `.dtx` chooses its action depending on the format:
plain \TeX: Run `docstrip` and extract the files.
\LaTeX: Generate the documentation.
If you insist on using \LaTeX\ for `docstrip` (really, `docstrip` does not need \LaTeX\), then inform the autodetect routine about your intention:
```latex
\let\install=y\input{iflang.dtx}
```
Do not forget to quote the argument according to the demands of your shell.
Generating the documentation. You can use both the `.dtx` or the `.drv` to generate the documentation. The process can be configured by the configuration file `ltxdoc.cfg`. For instance, put this line into this file, if you want to have A4 as paper format:
```
\PassOptionsToClass{a4paper}{article}
```
An example follows how to generate the documentation with pd\LaTeX:\
```bash
pdflatex iflang.dtx
makeindex -s gind.ist iflang.idx
pdflatex iflang.dtx
makeindex -s gind.ist iflang.idx
pdflatex iflang.dtx
```
4 Acknowledgement
I wish to thank:
Markus Kohm Useful hints for version 1.2.
5 History
[2007/04/10 v1.0]
- First public version.
[2007/04/11 v1.1]
- Line ends sanitized.
[2007/04/12 v1.2]
• Initialization of \languagename in case of etex.src.
• Some sanity tests added.
• Documentation improved.
[2007/04/26 v1.3]
• Use of package infwarerr.
[2007/09/09 v1.4]
• Bug fix: \IfLang@StrEqual \rightarrow \IfLangStrEqual (Gabriele Balducci).
• Catcode section rewritten.
[2007/11/11 v1.5]
• Use of package pdftexcmds for LuaTEX support.
[2016/05/16 v1.6]
• Documentation updates.
[2018/01/21 v1.7]
• Fix test for etex.src.
6 Index
Numbers written in italic refer to the page where the corresponding entry is described; numbers underlined refer to the code line of the definition; plain numbers refer to the code lines where the entry is used.
Symbols
\@PackageInfoNoLine ... 159, 164, 194
\@PackageWarningNoLine 177, 184, 274
\@firstoftwo ................ 111, 119, 128, 143, 206, 213, 254, 264
\@secondoftwo ............. 114, 121, 126, 145, 208, 215, 256, 266
\@undefined ................ 58
\aftergroup .................. 29
\catcode 2, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 33, 34, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 69, 70, 72, 73, 74, 78, 79, 80, 81, 82, 83, 84, 87, 88, 90, 91, 92, 93, 97, 99
\csname ...................... 14, 21, 50, 66, 76, 111, 114, 118, 125, 134, 150, 158, 174, 176, 193, 200, 212, 271
\endcsname ................. 14, 21, 50, 66, 76, 111, 114, 118, 125, 134, 150, 158, 174, 176, 193, 200, 212, 271
\endinput .................. 4, 33, 71, 77, 89
\endlinechar ............... 29, 110
\IfLang@@@StrExpand ........ 240, 242
\IfLang@@StrExpand ......... 237, 239
\IfLang@AtEnd ............... 95, 96, 110, 281
\IfLang@IfDefined ........... 117, 199, 246, 261
\IfLang@OrgUseLanguage ...... 168, 171
\IfLang@prefix .............. 157, 199, 200
\IfLang@StrEqual ........... 219, 247
\IfLang@StrEqualStart ...... 220, 222, 229, 231, 235, 243
\IfLang@StrEqualStop ...... 220, 222, 229, 231, 235, 243
\IfLang@StrExpand ........... 227, 235
\IfLang@StrNil ............. 218, 220, 223, 239, 240, 242, 243
\IfLanguageName ............ 2, 245, 260
\IfLanguagePatterns ....... 2, 198, 273
|
{"Source-Url": "https://mirrors.concertpass.com/tex-archive/macros/latex/contrib/oberdiek/iflang.pdf", "len_cl100k_base": 4501, "olmocr-version": "0.1.50", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 30682, "total-output-tokens": 5506, "length": "2e12", "weborganizer": {"__label__adult": 0.00023806095123291016, "__label__art_design": 0.00037169456481933594, "__label__crime_law": 0.00020122528076171875, "__label__education_jobs": 0.00038814544677734375, "__label__entertainment": 9.006261825561523e-05, "__label__fashion_beauty": 8.58306884765625e-05, "__label__finance_business": 0.00011259317398071288, "__label__food_dining": 0.0002048015594482422, "__label__games": 0.0004553794860839844, "__label__hardware": 0.00027561187744140625, "__label__health": 0.00010102987289428712, "__label__history": 0.00012564659118652344, "__label__home_hobbies": 5.704164505004883e-05, "__label__industrial": 0.0001544952392578125, "__label__literature": 0.00025177001953125, "__label__politics": 0.00016260147094726562, "__label__religion": 0.00025653839111328125, "__label__science_tech": 0.0022640228271484375, "__label__social_life": 0.00010204315185546876, "__label__software": 0.04766845703125, "__label__software_dev": 0.9462890625, "__label__sports_fitness": 0.00013184547424316406, "__label__transportation": 9.614229202270508e-05, "__label__travel": 0.00015485286712646484}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 13562, 0.09251]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 13562, 0.27265]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 13562, 0.62794]], "google_gemma-3-12b-it_contains_pii": [[0, 1042, false], [1042, 3574, null], [3574, 4770, null], [4770, 4797, null], [4797, 4947, null], [4947, 6701, null], [6701, 8228, null], [8228, 9675, null], [9675, 11509, null], [11509, 13562, null], [13562, 13562, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1042, true], [1042, 3574, null], [3574, 4770, null], [4770, 4797, null], [4797, 4947, null], [4947, 6701, null], [6701, 8228, null], [8228, 9675, null], [9675, 11509, null], [11509, 13562, null], [13562, 13562, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 13562, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 13562, null]], "pdf_page_numbers": [[0, 1042, 1], [1042, 3574, 2], [3574, 4770, 3], [4770, 4797, 4], [4797, 4947, 5], [4947, 6701, 6], [6701, 8228, 7], [8228, 9675, 8], [9675, 11509, 9], [11509, 13562, 10], [13562, 13562, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 13562, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
aa56903786f7b71c945a96e41ae6681e1d433e45
|
Lecture 2:
Memory in C
Office Hours
• Hank’s OH: (tentative)
– Tues 1:30pm-2:30pm
– Fri 1pm-2pm (but not today!!)
• Hank’s OH Location: 301 Deschutes Hall
• Kewen's Office Hours: Weds 3:30pm-5:00pm
• Brent's Office Hours: Mon 3:30pm-5:00pm
• Sam's Office Hours: Thurs 4:00pm-5:30pm
• TA OH Location: 232 Deschutes Hall
Piazza + Windows
Ubuntu VM Workshop TUESDAY APRIL 5TH 2PM DESCHUTES 100
Based on the poll
What exactly do we turn in?
This is my first experience with Unix and Vim, so when we are turning in the assignment do we just create a file through
Ubuntu Virtual Machine for Windows
http://www.wikihow.com/Install-Ubuntu-on-VirtualBox I can set up a time in Deschutes 100 for a group setup. Please nic
Note on Homeworks
- Project 1A: assigned Weds, due on Monday
- \(\rightarrow\) means 6am Tues
- Will discuss again in 10 slides
- Project 2A: assigned today, due in class on Weds
- Project 1B, more assigned next week
Plan for today
• Quick review of Unix basics
• Project 1A
• Baby steps into C and gcc
• Memory
Plan for today
• Quick review of Unix basics
• Project 1A
• Baby steps into C and gcc
• Memory
Files
• Unix maintains a file system
– File system controls how data is stored and retrieved
• Primary abstractions:
– Directories
– Files
• Files are contained within directories
Directories are hierarchical
• Directories can be placed within other directories
• “/” -- The root directory
– Note “/”, where Windows uses “\”
• “/dir1/dir2/file1”
– What does this mean?
File file1 is contained in directory dir2, which is contained in directory dir1, which is in the root directory
Home directory
• Unix supports multiple users
• Each user has their own directory that they control
• Location varies over Unix implementation, but typically something like “/home/username”
• Stored in environment variables
```
fawcett:~ childs$ echo $HOME
/Users/childs
```
File manipulation
Last login: Tue Apr 1 04:56:14 on ttys005
New commands: mkdir, cd, touch, ls, rmdir, rm
cd: change directory
- The shell always has a “present working directory”
- directory that commands are relative to
- “cd” changes the present working directory
- When you start a shell, the shell is in your “home” directory
Unix commands: mkdir
• mkdir: makes a directory
– Two flavors
• Relative to current directory
– mkdir dirNew
• Relative to absolute path
– mkdir /dir1/dir2/dirNew
» (dir1 and dir2 already exist)
Unix commands: rmdir
• rmdir: removes a directory
– Two flavors
• Relative to current directory
– rmdir badDir
• Relative to absolute path
– rmdir /dir1/dir2/badDir
» Removes badDir, leaves dir1, dir2 in place
• Only works on empty directories!
– “Empty” directories are directories with no files
Most Unix commands can distinguish between absolute and relative path, via the “/” at beginning of filename.
(I’m not going to point this feature out for subsequent commands.)
Unix commands: touch
• touch: “touch” a file
• Behavior:
– If the file doesn’t exist
• → create it
– If the file does exist
• → update time stamp
Time stamps record the last modification to a file or directory
Why could time stamps be useful?
Unix commands: ls
• **ls**: list the contents of a directory
– Note this is “LS”, not “is” with a capital ‘i’
• Many flags, which we will discuss later
– A flag is a mechanism for modifying a Unix programs behavior.
– Convention of using hyphens to signify special status
• “ls” is also useful with “wild cards”, which we will also discuss later
Important: “man”
• Get a man page:
• → “man rmdir” gives:
RMDIR(1) BSD General Commands Manual RMDIR(1)
NAME
rmdir -- remove directories
SYNOPSIS
rmdir [-p] directory ...
DESCRIPTION
The rmdir utility removes the directory entry specified by each directory argument, provided it is empty.
Arguments are processed in the order given. In order to remove both a parent directory and a subdirectory of that parent, the subdirectory must be specified first so the parent directory is empty when rmdir tries to remove it.
The following option is available:
-p Each directory argument is treated as a pathname of which all components will be removed, if they are empty, starting with the last most component. (See rm(1) for fully non-discriminant
File Editors
• Brent taught you ‘vi’, so I’m not going to
• But ask me for tips any time you see me editing
vi / vim graphical cheat sheet
Main command line commands ('ex'):
:w (save), :q (quit), :q! (quit w/o saving)
:e f (open file f),
:%s/x/y/g (replace 'x' by 'y' filewide),
:h (help in vim), :new (new file in vim),
Other important commands:
CTRL-R: redo (vim),
CTRL-F/-B: page up/down,
CTRL-E/-Y: scroll line up/down,
CTRL-V: block-visual mode (vim only)
Visual mode:
Move around and type operator to act on selected region (vim only)
Notes:
(1) use "x before a yank/paste/del command to use that register ('clipboard') (x=a..z,*)
(e.g.: "ay$ to copy rest of line to reg 'a')
(2) type in a number before any action to repeat it that number of times
(e.g.: 2p, d2w, 5i, d4j)
(3) duplicate operator to act on current line
(4) ZZ to save & quit, ZQ to quit w/o saving
(5) zt: scroll cursor to top,
zb: bottom, zz: center
(6) gg: top of file (vim only),
gf: open file under cursor (vim only)
For a graphical vi/vim tutorial & more tips, go to www.viemu.com - home of ViEmu, vi/vim emulation for Microsoft Visual Studio
Plan for today
• Quick review of Unix basics
• Project 1A
• Baby steps into C and gcc
• Memory
Project 1A
• Practice using an editor
• Must be written using editor on Unix platform
– I realize this is unenforceable.
– If you want to do it with another mechanism, I can’t stop you
• But realize this project is simply to prepare you for later projects
Project 1A
• Write $\geq$300 words using editor (vi, emacs, other)
• Topic: what you know about C programming language
• Can’t write 300 words?
– Bonus topic: what you want from this course
• How will you know if it is 300 words?
– Unix command: “wc” (word count)
Unix command: wc (word count)
(fawcett:~ childs$ vi hanks_essay
fawcett:~ childs$ wc -w hanks_essay
252 hanks_essay
fawcett:~ childs$ wc hanks_essay
63 252 1071 hanks_essay
fawcett:~ childs$)
(63 = lines, 252 = words, 1071 = character)
CIS 330: Project #1A
Assigned: March 30, 2016
Due April 4, 2016
(which means submitted by 6am on April 5th, 2016)
Worth 1% of your grade
Assignment:
1) On a Unix platform (including Mac), use an editor (vi, emacs, other) to write a 300 word “essay”
a. The purpose of the essay is to practice using an editor.
i. Grammar will not be graded
b. I would like to learn more about what you know about C and want from this class ... I recommend you each write about that.
c. If you run out of things to say, you don’t have to write original words (do a copy/paste using vi commands: yyp)
Do not write this in another editor and copy into vi.
Also, do not put more than 100 characters onto any given line. (I want you to practice having multiple lines and navigating.)
How to submit
• Canvas
• If you run into trouble:
– Email me your solution
Plan for today
• Quick review of Unix basics
• Project 1A
• Baby steps into C and gcc
• Memory
GNU Compilers
• GNU compilers: open source
– gcc: GNU compiler for C
– g++: GNU compiler for C++
Our first gcc program
```c
#include <stdio.h>
int main()
{
printf("hello world!\n");
}
```
Unix command that prints contents of a file
Invoke gcc compiler
Name of file to compile
Default name for output programs
You should use this for Proj 2A.
Plan for today
- Quick review of Unix basics
- Project 1A
- Baby steps into C and gcc
- Memory
Why C?
• You can control the memory
• That helps get good performance
• If you don’t control the memory (like in other programming languages), you are likely to get poor performance
• ... so let’s talk about memory
Motivation: Project 2A
Assignment: fill out this worksheet.
<table>
<thead>
<tr>
<th>Location</th>
<th>0x8000</th>
<th>0x8004</th>
<th>0x8008</th>
<th>0x800c</th>
<th>0x8010</th>
<th>0x8014</th>
<th>0x8018</th>
</tr>
</thead>
<tbody>
<tr>
<td>Value</td>
<td>0</td>
<td>1</td>
<td>1</td>
<td>2</td>
<td>3</td>
<td>5</td>
<td>8</td>
</tr>
<tr>
<td>Location</td>
<td>0x801c</td>
<td>0x8020</td>
<td>0x8024</td>
<td>0x8028</td>
<td>0x802c</td>
<td>0x8030</td>
<td>0x8034</td>
</tr>
<tr>
<td>Value</td>
<td>13</td>
<td>21</td>
<td>34</td>
<td>55</td>
<td>89</td>
<td>144</td>
<td>233</td>
</tr>
<tr>
<td>Location</td>
<td>0x8038</td>
<td>0x803c</td>
<td>0x8040</td>
<td>0x8044</td>
<td>0x8048</td>
<td>0x804c</td>
<td>0x8050</td>
</tr>
<tr>
<td>Value</td>
<td>377</td>
<td>610</td>
<td>987</td>
<td>1597</td>
<td>2584</td>
<td>4181</td>
<td>6765</td>
</tr>
</tbody>
</table>
Code:
int *A = 0x8000;
Note: “NOT ENOUGH INFO” is a valid answer.
<table>
<thead>
<tr>
<th>Variable</th>
<th>Your Answer</th>
<th>Variable</th>
<th>Your Answer</th>
</tr>
</thead>
<tbody>
<tr>
<td>A</td>
<td>0x8000</td>
<td>(A+6)-(A+3)</td>
<td></td>
</tr>
<tr>
<td>&A</td>
<td>NOT ENOUGH INFO</td>
<td><em>(A+6)-</em>(A+4)</td>
<td></td>
</tr>
<tr>
<td>*A</td>
<td></td>
<td>(A+6)-B[0]</td>
<td></td>
</tr>
</tbody>
</table>
Important Memory Concepts in C (1/9): Stack versus Heap
- You can allocate variables that only live for the invocation of your function
- Called stack variables (will talk more about this later)
- You can allocated variables that live for the whole program (or until you delete them)
- Called heap variables (will talk more about this later as well)
Important Memory Concepts in C (2/9): Pointers
• Pointer: points to memory location
– Denoted with ‘*’
– Example: “int *p”
• pointer to an integer
– You need pointers to get to heap memory
• Address of: gets the address of memory
– Operator: ‘&’
– Example:
```c
int x;
int *y = &x;
```
Important Memory Concepts in C (3/9): Memory allocation
• Special built-in function to allocate memory from heap: **malloc**
– Interacts with Operating System
– Argument for malloc is how many bytes you want
• Also built-in function to deallocate memory: **free**
free/malloc example
Enables compiler to see functions that aren’t in this file. More on this next week.
```c
#include <stdlib.h>
int main()
{
/* allocates memory */
int *ptr = malloc(2*sizeof(int));
/* deallocates memory */
free(ptr);
}
```
sizeof is a built in function in C. It returns the number of bytes for a type (4 bytes for int).
don’t have to say how many bytes to free ... the OS knows
Important Memory Concepts in C (4/9): Arrays
- Arrays lie in contiguous memory
- So if you know address to one element, you know address of the rest
- `int *a = malloc(sizeof(int)*1);`
- a single integer
- ... or an array of a single integer
- `int *a = malloc(sizeof(int)*2);`
- an array of two integers
- first integer is at ‘a’
- second integer is at ‘a+4’
Dereferencing
• There are two operators for getting the value at a memory location: *, and []
– This is called dereferencing
• * = “dereference operator”
• int *p = malloc(sizeof(int)*1);
• *p = 2; /* sets memory p points to to have value 2 */
• p[0] = 2; /* sets memory p points to to have value 2 */
Important Memory Concepts in C (6/9): pointer arithmetic
- `int *p = malloc(sizeof(int)*5);`
- Compiler allows you to modify pointer with math operations
- Called pointer arithmetic
- "Does the right thing" with respect to type
- `int *p = malloc(sizeof(int)*5);`
- `p+1` is 4 bytes bigger than `p`!!
- Then:
- "p+4" is the same as "&(p[4])" (ADDRESSES)
- "*(p+4)" is the same as "p[4]" (VALUES)
Important Memory Concepts in C (7/7)
**Pointers to pointers**
- `int *p = malloc(sizeof(int *)*5);`
- `p[0] = malloc(sizeof(int)*50);`
- `...`
Important Memory Concepts in C (8/9): Hexadecimal address
- Addresses are in hexadecimal
Important Memory Concepts in C (9/9)
NULL pointer
• int *p = NULL;
• often stored as address 0x0000000
• used to initialize something to a known value
– And also indicate that it is uninitialized...
Project 2A
• You now know what you need to do Project 2A
– But: practice writing C programs and testing yourself!!
– Hint: you can printf with a pointer
Project 2A
• Assigned Saturday AM
• Worksheet. You print it out, complete it on your own, and bring it to class.
• Due Weds, 10am, in class
– Graded in class
• No Piazza posts on this please
• Practice with C, vi, gcc, printf
Memory Segments
- Von Neumann architecture: one memory space, for both instructions and data
- So break memory into “segments”
- ... creates boundaries to prevent confusion
- 4 segments:
- Code segment
- Data segment
- Stack segment
- Heap segment
Code Segment
• Contains assembly code instructions
• Also called text segment
• This segment is modify-able, but that’s a bad idea
– “Self-modifying code”
• Typically ends in a bad state very quickly.
Data Segment
• Contains data not associated with heap or stack
– global variables
– statics (to be discussed later)
– character strings you’ve compiled in
```
char *str = "hello world\n"
```
Stack: data structure for collection
• A stack contains things
• It has only two methods: push and pop
– Push puts something onto the stack
– Pop returns the most recently pushed item (and removes that item from the stack)
• LIFO: last in, first out
Imagine a stack of trays.
You can place on top (push).
Or take one off the top (pop).
Stack
- Stack: memory set aside as scratch space for program execution
- When a function has local variables, it uses this memory.
- When you exit the function, the memory is lost
Stack
• The stack grows as you enter functions, and shrinks as you exit functions.
– This can be done on a per variable basis, but the compiler typically does a grouping.
• Some exceptions (discussed later)
• Don’t have to manage memory: allocated and freed automatically
Heap
- Heap (data structure): tree-based data structure
- Heap (memory): area of computer memory that requires explicit management (malloc, free).
- Memory from the heap is accessible any time, by any function.
- Contrasts with the stack
Memory Segments
- text (fixed size)
- data (fixed size)
- stack
- growth
- free
- heap
- growth
Source: http://www.cs.uwm.edu/classes/cs315/Bacon/
# Stack vs Heap: Pros and Cons
<table>
<thead>
<tr>
<th></th>
<th>Stack</th>
<th>Heap</th>
</tr>
</thead>
<tbody>
<tr>
<td>Allocation/Deallocation</td>
<td>Automatic</td>
<td>Explicit</td>
</tr>
</tbody>
</table>
How stack memory is allocated into Stack Memory Segment
```c
void foo()
{
int stack_varA;
int stack_varB;
}
int main()
{
int stack_varC;
int stack_varD;
foo();
}
```
How stack memory is allocated into Stack Memory Segment
```c
void foo()
{
int stack_varA;
int stack_varB;
}
int main()
{
int stack_varC;
int stack_varD;
foo();
}
```
How stack memory is allocated into Stack Memory Segment
```c
void foo()
{
int stack_varA;
int stack_varB;
}
int main()
{
int stack_varC;
int stack_varD;
foo();
}
```
How stack memory is allocated into Stack Memory Segment
```c
void foo()
{
int stack_varA;
int stack_varB;
}
int main()
{
int stack_varC;
int stack_varD;
foo();
}
```
How stack memory is allocated into Stack Memory Segment
```c
int doubler(int A)
{
int stack_varA;
stack_varA = 2*A;
return stack_varA;
}
int main()
{
int stack_varC;
int stack_varD = 3;
stack_varC = doubler(stack_varD);
}
```
How stack memory is allocated into Stack Memory Segment
```c
int doubler(int A)
{
int stack_varA;
stack_varA = 2*A;
return stack_varA;
}
int main()
{
int stack_varC;
int stack_varD = 3;
stack_varC = doubler(stack_varD);
}
```
How stack memory is allocated into Stack Memory Segment
```c
int doubler(int A) {
int stack_varA;
stack_varA = 2*A;
return stack_varA;
}
int main() {
int stack_varC;
int stack_varD = 3;
stack_varC = doubler(stack_varD);
}
```
Code
Data
Stack
- stack_varC
- stack_varD
<info for how to get back to main>
A (= 3)
<Location for RV>
Free
Heap
How stack memory is allocated into Stack Memory Segment
```c
int doubler(int A) {
int stack_varA;
stack_varA = 2*A;
return stack_varA;
}
int main() {
int stack_varC;
int stack_varD = 3;
stack_varC = doubler(stack_varD);
}
```
Return copies into location specified by calling function
How stack memory is allocated into Stack Memory Segment
```c
int doubler(int A)
{
int stack_varA;
stack_varA = 2*A;
return stack_varA;
}
int main()
{
int stack_varC;
int stack_varD = 3;
stack_varC = doubler(stack_varD);
}
```
This code is very problematic ... why?
```c
int *foo()
{
int stack_varC[2] = { 0, 1 };
return stack_varC;
}
int *bar()
{
int stack_varD[2] = { 2, 3 }; // foo and bar are returning addresses that are on the stack ... they could easily be overwritten
return stack_varD;
}
int main()
{
int *stack_varA, *stack_varB;
stack_varA = foo();
stack_varB = bar();
stack_varA[0] *= stack_varB[0];
}
```
(foo and bar’s stack_varD overwrites foo’s stack_varC in this program)
int main()
{
int stack_varA;
{
int stack_varB = 3;
}
}
Nested Scope
int main()
{
int stack_varA;
{
int stack_varB = 3;
}
}
Nested Scope
Code
Data
Stack
stack_varA
stack_varB
Free
Heap
int main()
{
int stack_varA;
{
int stack_varB = 3;
}
}
You can create new scope within a function by adding '{' and '}'.
```c
int main()
{
int stack_varA;
{
int stack_varB = 3;
}
}
```
# Stack vs Heap: Pros and Cons
<table>
<thead>
<tr>
<th></th>
<th>Stack</th>
<th>Heap</th>
</tr>
</thead>
<tbody>
<tr>
<td>Allocation/Deallocation</td>
<td>Automatic</td>
<td>Explicit</td>
</tr>
<tr>
<td>Access</td>
<td>Fast</td>
<td>Slower</td>
</tr>
</tbody>
</table>
Memory pages associated with stack are almost always immediately available.
Memory pages associated with heap may be located anywhere ... may be caching effects.
## Stack vs Heap: Pros and Cons
<table>
<thead>
<tr>
<th></th>
<th>Stack</th>
<th>Heap</th>
</tr>
</thead>
<tbody>
<tr>
<td>Allocation/Deallocation</td>
<td>Automatic</td>
<td>Explicit</td>
</tr>
<tr>
<td>Access</td>
<td>Fast</td>
<td>Slower</td>
</tr>
<tr>
<td>Variable scope</td>
<td>Limited</td>
<td>Unlimited</td>
</tr>
</tbody>
</table>
Variable scope: stack and heap
```c
int *foo()
{
int stack_varA[2] = { 0, 1 };
return stack_varA;
}
int *bar()
{
int *heap_varB;
heap_varB = malloc(sizeof(int)*2);
heap_varB[0] = 2;
heap_varB[1] = 2;
return heap_varB;
}
int main()
{
int *stack_varA;
int *stack_varB;
stack_varA = foo(); /* problem */
stack_varB = bar(); /* still good */
}
```
bar returned memory from heap
The calling function – i.e., the function that calls bar – must understand this and take responsibility for calling free.
If it doesn’t, then this is a “memory leak”.
Memory leaks
It is OK that we are using the heap ... that's what it is there for
The problem is that we lost the references to the 49 allocations on heap
The heap’s memory manager will not be able to re-claim them ... we have effectively limited the memory available to the program.
```c
{
int i;
int stack_varA;
for (i = 0 ; i < 50 ; i++)
stack_varA = bar();
}
```
Running out of memory (stack)
```c
int endless_fun()
{
endless_fun();
}
int main()
{
endless_fun();
}
```
stack overflow: when the stack runs into the heap.
There is no protection for stack overflows.
(Checking for it would require coordination with the heap’s memory manager on every function calls.)
Running out of memory (heap)
```c
int *heaps_o_fun()
{
int *heap_A = malloc(sizeof(int)*1000000000);
return heap_A;
}
int main()
{
int *stack_A;
stack_A = heaps_o_fun();
}
```
If the heap memory manager doesn’t have room to make an allocation, then malloc returns NULL .... a more graceful error scenario.
## Stack vs Heap: Pros and Cons
<table>
<thead>
<tr>
<th></th>
<th>Stack</th>
<th>Heap</th>
</tr>
</thead>
<tbody>
<tr>
<td>Allocation/Deallocation</td>
<td>Automatic</td>
<td>Explicit</td>
</tr>
<tr>
<td>Access</td>
<td>Fast</td>
<td>Slower</td>
</tr>
<tr>
<td>Variable scope</td>
<td>Limited</td>
<td>Unlimited</td>
</tr>
<tr>
<td>Fragmentation</td>
<td>No</td>
<td>Yes</td>
</tr>
</tbody>
</table>
Memory Fragmentation
- Memory fragmentation: the memory allocated on the heap is spread out of the memory space, rather than being concentrated in a certain address space.
Memory Fragmentation
```c
int *bar()
{
int *heap_varA;
heap_varA = malloc(sizeof(int)*2);
heap_varA[0] = 2;
heap_varA[1] = 2;
return heap_varA;
}
int main()
{
int i;
int stack_varA[50];
for (i = 0 ; i < 50 ; i++)
stack_varA[i] = bar();
for (i = 0 ; i < 25 ; i++)
free(stack_varA[i*2]);
}
```
Negative aspects of fragmentation?
(1) can’t make big allocations
(2) losing cache coherency
Even if there is lots of memory available, the memory manager can only accept your request if there is a big enough contiguous chunk.
## Stack vs Heap: Pros and Cons
<table>
<thead>
<tr>
<th></th>
<th>Stack</th>
<th>Heap</th>
</tr>
</thead>
<tbody>
<tr>
<td>Allocation/Deallocation</td>
<td>Automatic</td>
<td>Explicit</td>
</tr>
<tr>
<td>Access</td>
<td>Fast</td>
<td>Slower</td>
</tr>
<tr>
<td>Variable scope</td>
<td>Limited</td>
<td>Unlimited</td>
</tr>
<tr>
<td>Fragmentation</td>
<td>No</td>
<td>Yes</td>
</tr>
</tbody>
</table>
Memory Errors
• Array bounds read
```java
int main()
{
int var;
int arr[3] = { 0, 1, 2 };
var=arr[3];
}
```
• Array bounds write
```java
int main()
{
int var = 2;
int arr[3];
arr[3]=var;
}
```
Memory Errors
- Free memory read / free memory write
```c
int main()
{
int *var = malloc(sizeof(int)*2);
var[0] = 0;
var[1] = 2;
free(var);
var[0] = var[1];
}
```
When does this happen in real-world scenarios?
Memory Errors
• Freeing unallocated memory
```c
int main()
{
int *var = malloc(sizeof(int)*2);
var[0] = 0;
var[1] = 2;
free(var);
free(var);
}
```
When does this happen in real-world scenarios?
Vocabulary: “dangling pointer”: pointer that points to memory that has already been freed.
Memory Errors
- Freeing non-heap memory
```c
int main()
{
int var[2]
var[0] = 0;
var[1] = 2;
free(var);
}
```
When does this happen in real-world scenarios?
Memory Errors
• NULL pointer read / write
```
int main()
{
char *str = NULL;
printf(str);
str[0] = 'H';
}
```
• NULL is never a valid location to read from or write to, and accessing them results in a “segmentation fault”
– ... remember those memory segments?
When does this happen in real-world scenarios?
Memory Errors
• Uninitialized memory read
```c
int main()
{
int *arr = malloc(sizeof(int)*10);
int V2=arr[3];
}
```
|
{"Source-Url": "https://classes.cs.uoregon.edu/16S/cis330/lectures/CIS330_S16_Lec2.pdf", "len_cl100k_base": 6795, "olmocr-version": "0.1.50", "pdf-total-pages": 81, "total-fallback-pages": 0, "total-input-tokens": 110541, "total-output-tokens": 9729, "length": "2e12", "weborganizer": {"__label__adult": 0.0006518363952636719, "__label__art_design": 0.0008177757263183594, "__label__crime_law": 0.0004496574401855469, "__label__education_jobs": 0.042572021484375, "__label__entertainment": 0.0001634359359741211, "__label__fashion_beauty": 0.0003147125244140625, "__label__finance_business": 0.0002849102020263672, "__label__food_dining": 0.000858306884765625, "__label__games": 0.0017919540405273438, "__label__hardware": 0.0018434524536132812, "__label__health": 0.0007047653198242188, "__label__history": 0.0004925727844238281, "__label__home_hobbies": 0.0003995895385742187, "__label__industrial": 0.0007123947143554688, "__label__literature": 0.0007505416870117188, "__label__politics": 0.00041365623474121094, "__label__religion": 0.0010976791381835938, "__label__science_tech": 0.01186370849609375, "__label__social_life": 0.0005893707275390625, "__label__software": 0.0086212158203125, "__label__software_dev": 0.92236328125, "__label__sports_fitness": 0.0006747245788574219, "__label__transportation": 0.0010480880737304688, "__label__travel": 0.0004024505615234375}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 22779, 0.01661]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 22779, 0.50268]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 22779, 0.75888]], "google_gemma-3-12b-it_contains_pii": [[0, 23, false], [23, 326, null], [326, 723, null], [723, 945, null], [945, 1041, null], [1041, 1137, null], [1137, 1326, null], [1326, 1633, null], [1633, 1910, null], [1910, 2019, null], [2019, 2247, null], [2247, 2473, null], [2473, 2981, null], [2981, 3239, null], [3239, 3592, null], [3592, 4340, null], [4340, 4449, null], [4449, 5483, null], [5483, 5579, null], [5579, 5844, null], [5844, 6116, null], [6116, 6368, null], [6368, 7147, null], [7147, 7226, null], [7226, 7322, null], [7322, 7424, null], [7424, 7679, null], [7679, 7775, null], [7775, 7993, null], [7993, 8959, null], [8959, 9315, null], [9315, 9633, null], [9633, 9903, null], [9903, 10320, null], [10320, 10693, null], [10693, 11004, null], [11004, 11417, null], [11417, 11562, null], [11562, 11652, null], [11652, 11855, null], [11855, 12013, null], [12013, 12242, null], [12242, 12501, null], [12501, 12709, null], [12709, 12919, null], [12919, 13261, null], [13261, 13444, null], [13444, 13724, null], [13724, 13965, null], [13965, 14118, null], [14118, 14298, null], [14298, 14486, null], [14486, 14674, null], [14674, 14862, null], [14862, 15050, null], [15050, 15302, null], [15302, 15554, null], [15554, 15928, null], [15928, 16239, null], [16239, 16491, null], [16491, 16990, null], [16990, 17079, null], [17079, 17221, null], [17221, 17448, null], [17448, 17888, null], [17888, 18192, null], [18192, 18784, null], [18784, 19174, null], [19174, 19489, null], [19489, 19814, null], [19814, 20163, null], [20163, 20336, null], [20336, 20776, null], [20776, 20910, null], [20910, 21351, null], [21351, 21602, null], [21602, 21835, null], [21835, 22144, null], [22144, 22320, null], [22320, 22654, null], [22654, 22779, null]], "google_gemma-3-12b-it_is_public_document": [[0, 23, true], [23, 326, null], [326, 723, null], [723, 945, null], [945, 1041, null], [1041, 1137, null], [1137, 1326, null], [1326, 1633, null], [1633, 1910, null], [1910, 2019, null], [2019, 2247, null], [2247, 2473, null], [2473, 2981, null], [2981, 3239, null], [3239, 3592, null], [3592, 4340, null], [4340, 4449, null], [4449, 5483, null], [5483, 5579, null], [5579, 5844, null], [5844, 6116, null], [6116, 6368, null], [6368, 7147, null], [7147, 7226, null], [7226, 7322, null], [7322, 7424, null], [7424, 7679, null], [7679, 7775, null], [7775, 7993, null], [7993, 8959, null], [8959, 9315, null], [9315, 9633, null], [9633, 9903, null], [9903, 10320, null], [10320, 10693, null], [10693, 11004, null], [11004, 11417, null], [11417, 11562, null], [11562, 11652, null], [11652, 11855, null], [11855, 12013, null], [12013, 12242, null], [12242, 12501, null], [12501, 12709, null], [12709, 12919, null], [12919, 13261, null], [13261, 13444, null], [13444, 13724, null], [13724, 13965, null], [13965, 14118, null], [14118, 14298, null], [14298, 14486, null], [14486, 14674, null], [14674, 14862, null], [14862, 15050, null], [15050, 15302, null], [15302, 15554, null], [15554, 15928, null], [15928, 16239, null], [16239, 16491, null], [16491, 16990, null], [16990, 17079, null], [17079, 17221, null], [17221, 17448, null], [17448, 17888, null], [17888, 18192, null], [18192, 18784, null], [18784, 19174, null], [19174, 19489, null], [19489, 19814, null], [19814, 20163, null], [20163, 20336, null], [20336, 20776, null], [20776, 20910, null], [20910, 21351, null], [21351, 21602, null], [21602, 21835, null], [21835, 22144, null], [22144, 22320, null], [22320, 22654, null], [22654, 22779, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 22779, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 22779, null]], "pdf_page_numbers": [[0, 23, 1], [23, 326, 2], [326, 723, 3], [723, 945, 4], [945, 1041, 5], [1041, 1137, 6], [1137, 1326, 7], [1326, 1633, 8], [1633, 1910, 9], [1910, 2019, 10], [2019, 2247, 11], [2247, 2473, 12], [2473, 2981, 13], [2981, 3239, 14], [3239, 3592, 15], [3592, 4340, 16], [4340, 4449, 17], [4449, 5483, 18], [5483, 5579, 19], [5579, 5844, 20], [5844, 6116, 21], [6116, 6368, 22], [6368, 7147, 23], [7147, 7226, 24], [7226, 7322, 25], [7322, 7424, 26], [7424, 7679, 27], [7679, 7775, 28], [7775, 7993, 29], [7993, 8959, 30], [8959, 9315, 31], [9315, 9633, 32], [9633, 9903, 33], [9903, 10320, 34], [10320, 10693, 35], [10693, 11004, 36], [11004, 11417, 37], [11417, 11562, 38], [11562, 11652, 39], [11652, 11855, 40], [11855, 12013, 41], [12013, 12242, 42], [12242, 12501, 43], [12501, 12709, 44], [12709, 12919, 45], [12919, 13261, 46], [13261, 13444, 47], [13444, 13724, 48], [13724, 13965, 49], [13965, 14118, 50], [14118, 14298, 51], [14298, 14486, 52], [14486, 14674, 53], [14674, 14862, 54], [14862, 15050, 55], [15050, 15302, 56], [15302, 15554, 57], [15554, 15928, 58], [15928, 16239, 59], [16239, 16491, 60], [16491, 16990, 61], [16990, 17079, 62], [17079, 17221, 63], [17221, 17448, 64], [17448, 17888, 65], [17888, 18192, 66], [18192, 18784, 67], [18784, 19174, 68], [19174, 19489, 69], [19489, 19814, 70], [19814, 20163, 71], [20163, 20336, 72], [20336, 20776, 73], [20776, 20910, 74], [20910, 21351, 75], [21351, 21602, 76], [21602, 21835, 77], [21835, 22144, 78], [22144, 22320, 79], [22320, 22654, 80], [22654, 22779, 81]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 22779, 0.04738]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
53d22c2e3d2955aa2c643ceac44c74128def18ae
|
Summary
The SMITE system will support high assurance, yet flexible multi-level secure applications. The SMITE multi-processor capability computer is being developed, based on RSRE's Flex computer architecture, to provide a suitable environment. This paper describes the protection mechanisms provided by the microcoded hardware and introduces the security mechanisms built in software on top of these.
1. Introduction
The SMITE system will support flexible multi-level secure applications that offer a high degree of assurance that the security is upheld. To provide this environment the SMITE multi-processor capability computer is being developed. It is based on the Flex computer architecture [Foster et al. 82] which was the product of RSRE's research into software engineering.
Capabilities are used as the basic means of controlling access to objects. However this is augmented with first class procedures for information hiding and a typing mechanism for authentication purposes. These are combined with a modular compilation system and structured backing store to give a powerful set of primitive mechanisms with which secure systems can be built.
This paper describes the protection mechanisms provided by the microcode hardware and system software, and introduces the security mechanisms built in software on top of these.
2. Pointers
The main memory of the SMITE computer is organised as a heap store. That is, it is divided into discrete blocks, of various sizes, each capable of containing a mixture of scalar data and pointers. A pointer is a primitive capability made up of the address of a block, and one access right. There are various types of block, and certain instructions only apply to particular block types.
SMITE has sixteen types of block. These are used for holding data, constants or instructions, and for representing procedures, abstract typed objects, types, hash tables, processes, semaphores, peripherals and storage resources.
Various instructions exist that create new blocks. The result of these is a pointer to the new block. Apart from copying existing pointers, this is the only way new pointers can be created. The computer maintains a distinction between scalar data and pointers, using hidden tag bits in the memory, thus preventing all software, however 'privileged', from treating scalar data as pointers or from otherwise forging pointers.
The SMITE instruction set offers no facility for deleting a block. Instead inaccessible storage is recovered by a garbage collector. This is performed by microprogram although it is invoked by software. The advantage of placing the garbage collector in the firmware, apart from improved performance, is that no software needs to be able to break the rules of capability protection. If it were performed by software, the garbage collector would have to be highly privileged and great care would be needed in controlling this privilege.
A block can therefore only be accessed if the program can obtain a pointer to it. This forms the basis of all protection mechanisms in the computer.
Unlike some other capability architectures, such as the Cambridge CAP [Needham & Walker 77] and Intel iAPX-432 [Tyner 81], pointers may be freely copied and used in any context without losing their meaning. Thus the computer has one uniform address space, even though the multi-processor hardware has separate physical memories, shared by all software. This plays a most important part in the provision of object oriented programming.
Pointers to some types of blocks have an access right associated with them. For example, pointers to blocks used for holding arbitrary data may have the right to write into the block. This right is initially granted, but an instruction...
exists to remove it from a particular pointer. A pointer without this right cannot be used to write data into the block to which it refers. Thus it is possible for 'read only' pointers to be used to give away limited access to data.
Such 'read only' access rights do not form a major part of SMITE's protection system, but often form a useful optimization when creating higher level protection mechanisms.
The pointer mechanism ensures that access to an object cannot be gained illicitly, however it does not provide a means of information hiding. The use of the Read Only access right is of limited use in this respect because it does not prevent the user from reading further pointers from the block and writing into the blocks they refer to. Mechanisms to prevent such problems occurring, such as the 'sense keys' of the KeyKos system [RajunasBE], have not been included. This is because the problems can be solved at higher levels of abstraction, as described in section 4.
3. Procedures
Information hiding is achieved using blocks of type procedure. Procedure blocks are a closure [Landin64] of the code to be executed and the environment in which it must execute. Procedures are therefore First Class data objects, in that they can be freely passed around and remain valid in any context. Procedures are called by supplying suitable parameters and appropriate results are returned.
Procedures are blocks, two words long, which hold a pointer to the code and constants of the procedure and a pointer to a data block which holds the non-local data that comprises its environment. Neither of these words may be altered and the code is always read-only, though the contents of the environment block could be changed.
All procedures in SMITE are implemented as closures, there is no subroutine mechanism provided to speed up calls to procedures declared in the same module, as is found in all other capability computers. This is because the calling mechanism is quite fast, largely due to the way procedure calling is incorporated into the high level language oriented instruction set.
Procedure blocks may only be called, no instruction exists which will read or write their contents (except for a highly privileged part of the backing store software). The information contained within them is therefore hidden, unless it is made available before the procedure was created or by the code of the procedure.
Abstract data objects may be implemented using procedures to provide the required abstract functions while hiding the underlying data from the user. Some care must be taken to ensure the implementation does not inadvertently pass the user a capability to access the hidden data, especially if exceptions occur, but this is relatively straightforward to verify.
Note that unlike earlier capability computers which have many general purpose registers, such as the Plessey PP258 [England75], each SMITE computer has only one, though this is capable of containing an object of arbitrary size. This register is used to pass parameters to procedures and return results or exceptions. Therefore it is not possible to mistakenly return sensitive capabilities in 'unused' registers.
For example, suppose make_buffer is a procedure which creates a simple buffer. Its environment contains a pointer to the system supplied procedure for
making n-valued semaphores (the instruction set only supplies binary semaphores). The code of make_buffer call this twice to create two new semaphores, one to control filling the buffer and the other emptying it. A suitable buffer is then created, along with appropriate index variables for the buffer.
In figure 3a, make_buffer is shown to be a procedure that takes no parameters and delivers a structure of two procedures. The first of these takes no parameter and delivers an integer, while the second takes an integer and delivers nothing.
```
make_buffer : Void -> ( Void -> Int x Int -> Void )
```
Fig3a: Procedures bind together the code and non-local environment, and hide them from the user.
Next two procedures are declared, these are for moving data into and out of the buffer. The declarations actually consist of some executable code. This creates the environment for the procedures and creates two closures by binding this with the code for put and get. The environment is simply a data block containing pointers to the buffer, buffer index variables and controlling semaphores.
The result of the call of make_buffer is the two procedures, put and get. These may be called to put data into the buffer, or take it out. However they do not give arbitrary access to the underlying data structure of the buffer, because they can only be called.
Note that with buffers implemented in this way, the operations put and get do not take a parameter specifying which buffer to operate on. Instead this is bound into them and for each buffer that has been created, different procedures for put and get are created. This is not inefficient because all versions, including those used by other processes, are able to share the code, since all software operates in one uniform address space.
4. Typed Objects
While information hiding is provided by procedures, users cannot, in general, determine what kind of procedure they possess. This is because procedures carry no distinguishing type information that can be interrogated by the caller. Without such a mechanism, a procedure received as a parameter cannot be passed sensitive data, in case it is not of the correct type and misuses that data. This severely restricts the functionality of the abstract types that can be implemented securely.
Fig 4a: The right to access data in a keyed block can be denied. However this right can be restored if the key, which is generally a pointer, is known.
Typed objects can be provided by using Keyed Blocks. Pointers to a keyed block have an access right associated with them. This is the right to access the data in the block. Without this right no access is possible, not even read, though there is an instruction which will grant the right and allow full read/write access. However, this Open operation must be provided with the key to the block, otherwise it will fail. The key is a single word value, which is stored in the first word of the block.
If the key were a scalar value, the user could quickly try all $2^{32}$ values and gain access to the protected data. However, the key may be a pointer and these cannot be forged or guessed. The pointer will be kept hidden by the Type Manager software, which will use it to create new data objects of the type and to gain access to the underlying data of objects passed to it.
The keyed block mechanism allows type information to be attached to an object and hides the underlying data from its users. The type is represented by a pointer to some block. This pointer is used as the key for all keyed blocks which represent objects of that type. Procedures which create the typed objects and access their representation keep the key in their non-local environment and do not disclose it.
For example, consider an alternative form of the implementation for the simple buffer described in section 3, in which a buffer is made a typed object. Four procedures would be supplied, for creating new buffers, moving data to and from a buffer and for deciding whether an object is a buffer. Note that the latter procedure will be needed only if the programming language does not provide type abstraction or cannot be trusted to enforce it properly.
```
make-buffer : Void -> Buffer
put : Buffer * Int -> Void
get : Buffer -> Int
is_buffer : Buffer -> Bool
```

**Fig4b:** Keyed blocks hide the representation of a buffer and attach type information. Procedures hide the key to the type.
The procedures for manipulating buffers are created when the type manager is
brought into existence. At this time a key is made which will represent the type 'buffer'. This key is simply a capability, which is created by generating a new block of store, which is not distributed to any but the four buffer procedures. In this example the block is not used to hold data and so will have zero size.
The procedures put, get and is_buffer all have the same environment, which is simply a block containing the key (a pointer) that represents the type Buffer. Procedure make_buffer also requires a pointer to the procedure for making n-valued semaphores.
A call of make_buffer generates space for the buffer and creates the controlling semaphores. Then a keyed block is generated and pointers to these are stored in it. The buffer type key is stored in the first word and a pointer to the new keyed block, with the access right denied, is delivered as a result of the call. No access to the underlying data that implements the buffer is permitted using this pointer. The access right may be restored if the key can be presented, but this is not given to the users of the type. Therefore users cannot gain access to the underlying data structure of the buffer.
The procedure is_buffer takes a pointer as a parameter. It attempts to grant the access right on the pointer, supplying the buffer key as a further operand to the instruction. If this succeeds the procedure simply returns true. If the instruction fails, either because the pointer did not refer to a keyed block or the key was wrong, false is returned. Note that the pointer which has the access right granted is not accessible to the caller.
The procedures put and get act similarly, first granting the access right on the pointer to the buffer and then performing the appropriate operation on the buffer. Note that, unlike the example given in section three which used procedures which had a buffer bound into them, the buffer to be manipulated must explicitly be passed as a parameter.
If a subsystem expects to receive a pointer to a buffer as a parameter, it can check that the pointer really does refer to a buffer using is_buffer. It can then use the buffer, safe in the knowledge that it will obey the rules about buffers. If the buffer was implemented using procedures, as in section 3, it would not be possible to determine whether the parameter (a procedure) gives access to a buffer.
The protection provided by keyed blocks is equivalent to seals (Redei174). However the implementation in SMITE is quite straightforward, since the type information is held in the object rather than in the capabilities (pointers). Therefore the pointers are all of fixed length, occupying just one 32 bit word.
5. Module Loading
Programs on SMITE are constructed using a modular compilation system which can allow mixed language working. However, programs are not 'linked' to form executable images which are then copied into store and run. Instead a form of linking loader is used which is much better suited to the use of capabilities. It also has the advantage that out of date images cannot be run.
A module is essentially a procedure which creates some data structure and delivers a capability to access it. A module may require the values created by other modules, but each module in a program must only be loaded once. This is achieved by passing a Loader to a module. The Loader is a procedure which, given a module, returns the value it keeps. The Loader records the value kept by each module it loads. If it is requested to load a module again, it simply returns the value associated with it.
A module must ensure that the parameter it is given is a proper Loader, and not just some arbitrary procedure even if it appears to act in the same way. This is because a program consisting of several modules may perform checks in one module and perform sensitive operations in another, based on the result of the check. By providing a special spoof loader, a user could construct a program which incorporated a fake checking routine, which would allow illegitimate use of the sensitive operation.
This could be prevented by typing the Loader using a keyed block. However, since modules are invariably brought in from backing store, so must the procedure for checking the type. This would seriously slow up the system, so special provision is made for the type Loader in the instruction set.
The ability to create a Loader is carefully controlled, whilst changing a Loader into a procedure of type Module X → X is an instruction that is universally available. In practice only the trusted type manager for loaders may produce new loaders. When a module needs to access the data structure kept by another, it first checks that the procedure it was handed as a parameter is a true Loader. The instruction to do this fails if it is not, but otherwise delivers the underlying procedure. This is called, passing the module to be 'loaded' as a parameter. The result is a read-only pointer to the object kept by the module.
A more detailed description of the SMITE modular compilation system, including details of language specific modules, is given in [Harrold88].
6. Modes
A frequently used form of abstract data type is where a copy of part of the underlying object is freely accessible. The implementation of Mode objects follows this pattern, in that the mainstore representation of a mode may be read at any time, while the alias on backing store must be kept hidden to prevent forgery.
A mode, which is the Algol68 term for type, is treated as an object in the Ten15 algebraic abstract machine [Core&Foster86], which is a generalisation of the ideas developed during the Flex project. Ten15 is effectively a strongly typed language system whose types include the universal union of all types. Mode objects are provided to implement this in such a way that the users cannot fabricate their own illegal modes, yet modes may be examined.
Mode objects could be implemented using a procedure which delivers the accessible part of the object while keeping the rest hidden, however they are made a special case because they are often used. A special kind of block, similar to the keyed block is provided. Here the access right gives full read/write access, but if a pointer is denied the right it can still be used to read all but the first two words of the block. The first word is the key for the block and the second word is the protected data. This may be a pointer to further protected data.
7. Revocation
Most capability systems provide a low level mechanism for revoking capabilities once they have been distributed. The use of such mechanisms can lead to complications, especially in parameter validation. This is because the mechanisms provided revoke access to data blocks, which are at the lowest level of abstraction. What is generally required is to revoke access to highly abstract objects, such as files and messages. By using a low level revocation mechanism, the high level object becomes inconsistent, as parts of it are removed, which causes obscure exceptions to be raised.
In SMITE revocation can be provided where it is needed, by building appropriate checks in the procedures which access the underlying data of an object. This is in contrast to the unwieldy low level mechanisms that have been proposed by others [Redell & Fabry 74], [Gligor 79], [Corsini et al. 84].
An example of revocation is found in classified files. Access to the underlying file is only permitted if the user has the necessary clearances. Whenever users attempt to gain a copy of the classified material in a file, a check is made to ensure that they have sufficient clearances. Revocation can be achieved by reducing a user's clearance or altering the file's classification or distribution list.
8. Process Context
Processes running in a system generally need access to generic resources, such as "my terminal" and "my current directory", which are provided by the operating system. In SMITE capabilities are used to control access to such resources, which are constructed as abstract data types. However, it is still necessary for a program which is loaded from backing store to gain access to its version of these capabilities.
The capabilities that refer to these generic resources are stored in the context of each process. The context is an association between unique identifiers and values. Each unique identifier represents one of the generic resources. An instruction allows a program to find out the current value attached to a particular identifier, and a special form of procedure call allows the context to be changed and restored in a stack like manner.
The unique identifiers will normally be pointers, so that they cannot be forged, which are kept hidden in much the same way as type keys. The capability for a generic resource will be obtained by calling a procedure that hides the key. This may be formed into a module so that it can be readily incorporated into programs.
When a process is launched, its initial context is set to that of the process which launches it. The lifetimes of processes are independent, therefore care must be taken when designing generic resources to ensure that they can safely be accessed after the process leaves the scope that designated them.
For example a procedure call may establish a new window as the "current window". When it exits the previous window will be restored to "current". Suppose a process was launched during the call and this remains active after the window is restored. This process will have inherited the new window and can access it even after it is supposed to be destroyed. Such disastrous results can be avoided easily by incorporating a revocation switch in the window description.
9. Write Once Structured Backing Store
SMITE provides a capability based structured backing store, organised in a write once fashion. Thus the backing store is a heap, able to store primitive objects (blocks of various types) much like main store, except that they cannot be overwritten. Procedures and an equivalent to keyed blocks may be stored, providing abstract type extension facilities.
The backing store is implemented entirely in software, using the abstract type mechanisms for protection. Access to objects in the backing store is governed by backing store capabilities, which are data objects constructed by the software. These are implemented as keyed blocks which contain the disc address and type information of the object on disc.
When a program stores data in the backing store it does not provide a destination address. Instead the backing store software places the data at some convenient free place and returns a backing store capability which can be used to access the data in the future. A program can only bring an object into main store if it possesses a capability for it. The result of doing this is a main store capability to a copy of the object in main store. This capability has read/write access revoked, so the copy cannot be altered. This facilitates sharing of data brought in from disc.
The backing store software does not provide an operation for overwriting objects. However, special objects called references, which can be atomically updated, are provided to support alterable objects such as directory structures and updatable program modules.
The advantage of such a backing store is that maintaining consistency is relatively simple. Directory structures can be built which cannot be damaged, even if the event of power failure. Also, since a garbage collector is used to recover inaccessible variables, complex structures can be implemented and maintained without "dangling reference" problems arising.
The write once organisation has a particular advantage for secure systems. Objects stored in the backing store are guaranteed to be unalterable, as long as they do not contain a reference. It is possible, using the other security mechanisms, to ensure that certain programs cannot use references. Therefore backing store objects produced by such a program cannot be used to communicate with that program.
In other words, untrusted software can share access to objects in backing store, and it is impossible for one to modify the object and thus send information to the other. Therefore the system offers separation between untrusted programs while having unrestricted sharing of backing store objects. Conventional systems cannot offer such flexibility because they do not provide a non overwriting file store.
Details of the implementation, including the on-the-fly garbage collection are given in [Wiseman88].
10. Security Mechanisms
The provision of multi-level computer security presents three separate problems: controlling access to classified information, preventing users being fooled into underclassifying information and preventing software altering access controls against the user's wishes. SMITE offers three solutions to these three problems: reference monitors, high water marks and the trusted path. A more detailed explanation is given in [WiseMay86a&b].
10.1 Reference Monitors
Reference Monitors are the most visible security mechanism in SMITE as they are responsible for controlling access to classified information. A reference monitor is placed between each function that accesses classified information and the users of that function. Its task is to check that the user is cleared to access the information, record the details of the access for later auditing and finally to perform the access.
![Security Access Policy Rules Diagram]
**Fig 10.1:** A reference monitor encapsulates a classified object. It ensures that the users have the necessary clearances to access the information it contains and maintains auditing information.
The reference monitors are implemented as an abstract type whose underlying type is the original object. The interface provided is the same as that of the underlying object. That is the security checks are hidden from the user. This can be implemented using either procedures or keyed blocks as appropriate.
10.2 High Water Marks
While reference monitors control access to classified information, they do not prevent erroneous software from accessing data which the user did not wish to be accessed. For example consider a user who is cleared to access Secret information but is creating a document which is to be Unclassified. "Trojan Horse" software may access some Secret information and incorporate this into the document without the user realising. This would cause the user to distribute the Secret information in a document marked Unclassified. In effect the "Trojan Horse" has fooled the user into underclassifying information.
To prevent this from happening a system of High Water Marks is maintained for objects created by untrusted software. That is, the system maintains the classification of the most classified information that the object could conceivably contain. If the user gives these objects a permanent
classification, a check is made to ensure that this dominates the object's high water mark.
![Diagram of high water mark]
Fig 10.2: Objects accessed by untrusted software all have the same high water mark.
Maintenance of the high water marks is the responsibility of the reference monitor functions. Whenever untrusted software accesses classified information, the reference monitor ensures that the caller's high water mark is increased accordingly.
10.3 Trusted Path
Some functions that the users of a system need to perform alter the access control information. That is, they affect which users can access what information. Functions in this category include giving new information its classification, regrading existing information, altering access control lists or distribution lists and changing users' clearances.
While a reference monitor checks whether the user is allowed to invoke such a function, it cannot check that the software invoking it is doing so because the user requested it or because it contains a "Trojan Horse". To exclude the latter, these functions must only allow themselves to be invoked by the Trusted Path. This is interface software which is guaranteed not to contain a "Trojan Horse". It will at least comprise all software that controls the screen, keyboard and mouse, along with menu and window software.
The use of the type abstraction facilities offered by the SMITE architecture allows the trusted path to be implemented as a set of small independent modules. This will allow code level proofs of correctness to be tackled, giving the necessary high degree of assurance.
11. Summary
The microcoded hardware provides capability protection in the main memory of a SMITE computer. Possession of a capability for an object entitles the holder to access it, while it is impossible to access it without a capability. The objects in memory are typed, so each type of object is accessed in different ways. In particular, procedures and keyed blocks are offered as mechanisms for information hiding and for creating user defined abstract types.
These primitive protection mechanisms allow software to be written to provide flexible security mechanisms. Moreover, the fine granularity of protection
allows the different concerns of security to be split up, allowing correctness to be established more easily, yet without loss of performance.
Acknowledgements
Thanks go to Michael Foster, Ian Currie and numerous others at RSRE who were involved in the Flex project, for the basic research which is being used as the foundation for SMITE. Also, special thanks are due to Phil Terry of TSL Communications, who has contributed a great deal towards understanding security issues within SMITE, and Peter Bottomley for commenting on earlier drafts of this paper.
References
P.W.Core & J.M.Foster
Ten15: An Overview
RSRE Memo 3977, September 1986
P.Corsini, G.Frosini & L.Lopriore
Distributing and Revoking Access Authorizations on Abstract Objects: A Capability Approach
Software - Practice and Experience, Vol 14, Num 10, pp931..943
October 1984
D.M.England
Capability Concept Mechanisms and Structure in System 250.
Vol 9, Sept 75, pp47..62
J.M.Foster, I.F.Currie & P.W.Edwards
Flex: A Working Computer Based on Procedure Values.
RSRE Memo 3500
Also in: Procs. Int. Workshop on High Level Language Computer Architecture
Fort Lauderdale, Florida, December 1982
V.D.Gligor
Review and Revocation of Access Privileges Distributed through Capabilities
C.L.Harrold
The SMITE Modular Compilation System
to appear
1988
P.J.Landin
The Mechanical Evaluation of Expressions
Computer Journal, Vol 6, Num 4, pp308..329, January 1964
R.M.Needham & R.D.H.Walker
The Cambridge CAP Computer and its Protection System.
Operating System Reviews
Vol 11, Num 5, Nov 77, pp1..10
S.A.Rajunas, N.Hardy, A.C.Bomberger, W.S.Frantz and C.R.Landau
Security in KeyKos
Procs. IEEE Symposium on Security and Privacy
Oakland, California, April 1986
D.D. Redell
Naming and Protection in Extendible Operating Systems
MIT Project MAC Technical Report MAC-TR-140
November 1974
D.D. Redell & R.S. Fabry
Selective Revocation of Capabilities
procs. IRIA Workshop, pp 197-209
1974
P. Tyner
Intel Corp. Jan 1981
S.R. Wiseman
A Secure Capability Computer System
Proceedings IEEE Symposium on Security and Privacy
Oakland, California, April 1986
S.R. Wiseman
A Capability Approach to Multi-Level Security
Proceedings IFIP/Sec '86
4th International Conference on Computer Security
Monte Carlo, Monaco, December 1986
S.R. Wiseman
The SMITE Object Oriented Backing Store
to appear
1988
The SMITE system will support high assurance, yet flexible multi-level secure applications. The SMITE multi-processor capability computer is being developed, based on RSRE's Flex computer architecture, to provide a suitable environment. This paper describes the protection mechanism provided by the microcoded hardware and introduces the security mechanisms built in software on top of these.
|
{"Source-Url": "http://www.dtic.mil/get-tr-doc/pdf?AD=ADA193652", "len_cl100k_base": 6256, "olmocr-version": "0.1.50", "pdf-total-pages": 19, "total-fallback-pages": 0, "total-input-tokens": 40039, "total-output-tokens": 7155, "length": "2e12", "weborganizer": {"__label__adult": 0.0004286766052246094, "__label__art_design": 0.0005035400390625, "__label__crime_law": 0.000904083251953125, "__label__education_jobs": 0.0003085136413574219, "__label__entertainment": 7.468461990356445e-05, "__label__fashion_beauty": 0.0001785755157470703, "__label__finance_business": 0.00027942657470703125, "__label__food_dining": 0.0003314018249511719, "__label__games": 0.0007920265197753906, "__label__hardware": 0.01342010498046875, "__label__health": 0.00038695335388183594, "__label__history": 0.0002384185791015625, "__label__home_hobbies": 0.00014543533325195312, "__label__industrial": 0.0008616447448730469, "__label__literature": 0.00017344951629638672, "__label__politics": 0.0002543926239013672, "__label__religion": 0.0004444122314453125, "__label__science_tech": 0.07269287109375, "__label__social_life": 5.3048133850097656e-05, "__label__software": 0.017608642578125, "__label__software_dev": 0.88916015625, "__label__sports_fitness": 0.0002689361572265625, "__label__transportation": 0.0005502700805664062, "__label__travel": 0.0001627206802368164}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 31792, 0.04754]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 31792, 0.76474]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 31792, 0.92925]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 0, null], [0, 0, null], [0, 403, false], [403, 3755, null], [3755, 7098, null], [7098, 8895, null], [8895, 10053, null], [10053, 11629, null], [11629, 15213, null], [15213, 18110, null], [18110, 21380, null], [21380, 24245, null], [24245, 26623, null], [26623, 28861, null], [28861, 30700, null], [30700, 31399, null], [31399, 31792, null], [31792, 31792, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 0, null], [0, 0, null], [0, 403, true], [403, 3755, null], [3755, 7098, null], [7098, 8895, null], [8895, 10053, null], [10053, 11629, null], [11629, 15213, null], [15213, 18110, null], [18110, 21380, null], [21380, 24245, null], [24245, 26623, null], [26623, 28861, null], [28861, 30700, null], [30700, 31399, null], [31399, 31792, null], [31792, 31792, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 31792, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 31792, null]], "pdf_page_numbers": [[0, 0, 1], [0, 0, 2], [0, 0, 3], [0, 403, 4], [403, 3755, 5], [3755, 7098, 6], [7098, 8895, 7], [8895, 10053, 8], [10053, 11629, 9], [11629, 15213, 10], [15213, 18110, 11], [18110, 21380, 12], [21380, 24245, 13], [24245, 26623, 14], [26623, 28861, 15], [28861, 30700, 16], [30700, 31399, 17], [31399, 31792, 18], [31792, 31792, 19]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 31792, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
a0cc4d04ed144e003ab4112aaf57cffdf6d2ef0e
|
1. Introduction
There are numerous applications which need to continuously monitor a body of data for the occurrence of certain activities. However, real world activities tend to be high level and can often be executed in many different ways. Models of activities that account for such uncertainty have been devised in various communities and are based on context free grammars [8, 9], graphical models [4, 6] and stochastic automata [1]. However, we are not aware of any work that addresses the problem of indexing large bodies of observations to make activity retrieval more efficient. For instance, web sites typically maintain very large web server logs to determine what activity a user is trying to perform. Detecting this early may allow the web site to prefetch data for her (predicted) goals. As many existing models of activities already exist in the literature, we start with a known stochastic automata-based activity model from the computer vision field [1]. Our choice was based on the fact that this model is clearly useful for some applications (in vision) and is relatively simple. Moreover, we show that its graph structure provides a good opportunity to track multiple activities simultaneously.
The contributions of this paper may be summed up as follows. We develop: (i) a structure called a multi-activity graph and a multi-activity graph index that allow multiple activity graphs to be merged together and indexed; (ii) algorithms to build such indexes; (iii) algorithms to solve two types of queries defined in [1]. The Evidence problem tries to find all sequences of observations that validate the occurrence of an activity with a minimum probability threshold. The Identification problem tries to identify the most probable activity occurring in an observation sequence. The first problem is exponential – hence we introduce two different restrictions that allow to effectively compute it. Finally, we evaluate MAGIC experimentally both on a synthetic and on a real, depersonalized travel data set, and show that the MAGIC index can be built fast, occupies reasonable amounts of memory, and solves efficiently the two problems above.
2. Stochastic Activity Modeling: Overview
We adapt the stochastic automaton activity model for video described in [1] to the case of observation data.
Definition 1 (Stochastic activity) A stochastic activity is a labeled graph \((V, E, \delta)\) where: \(V\) is a finite set of action symbols; \(E\) is a subset of \((V \times V)\); \(\exists v \in V\) s.t. \(v v' \in E\) s.t. \((v', v) \in E\), i.e., there exists at least one start node in \(V\); \(\exists v \in V\) s.t. \(v v' \in V\) s.t. \((v, v') \in E\), i.e., there exists at least one end node in \(V\); \(\delta : E \rightarrow [0, 1]\) is a function that associates a probability distribution with the outgoing edges of each node, i.e., \(\forall v \in V\) \(\sum_{(v', v) \in E} \delta(v, v') = 1\).
We now present a small example of a stochastic activity.
Example 1 Figure 1 shows the stochastic activity associated with ordering products from an online store. A user first accesses the product catalog page (catalog) and either inspects the details of desired items (itemDetails) or continues with a previously saved cart (cart). The checkout process requires the user to select a shipping method (shippingMethod), choose a payment method (paymentMethod), review the final details of the order (review) and finally confirm it (confirm). At each stage during the payment process, the user can cancel the sequence and return to the cart and from there on to one of the item details. The probabilities labeling the edges have the following intuitive meaning: once the catalog action has been taken, there is a 0.9 probability that the user will check details of some item (itemDetails) and a 0.1 probability that she will continue with a previously saved shopping cart (cart).
Example 2 The activity in Figure 1 occurs in the web server log of Table 1, as the latter contains the sequence of observations with identifiers 1, 4, 7, 10, 13, and 14, corresponding to the activity instance \{catalog, cart, shippingMethod, paymentMethod, review, confirm\}. The span of this activity occurrence is [1, 10].
Table 1. Example web log
<table>
<thead>
<tr>
<th>id</th>
<th>ts</th>
<th>action</th>
<th>id</th>
<th>ts</th>
<th>action</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>1</td>
<td>catalog</td>
<td>2</td>
<td>2</td>
<td>itemDetails</td>
</tr>
<tr>
<td>2</td>
<td>2</td>
<td>itemDetails</td>
<td>3</td>
<td>2</td>
<td>cart</td>
</tr>
<tr>
<td>4</td>
<td>3</td>
<td>cart</td>
<td>4</td>
<td>4</td>
<td>itemDetails</td>
</tr>
<tr>
<td>5</td>
<td>5</td>
<td>itemDetails</td>
<td>6</td>
<td>5</td>
<td>shippingMethod</td>
</tr>
<tr>
<td>7</td>
<td>5</td>
<td>shippingMethod</td>
<td>8</td>
<td>6</td>
<td>cart</td>
</tr>
<tr>
<td>9</td>
<td>7</td>
<td>shippingMethod</td>
<td>10</td>
<td>11</td>
<td>confirm</td>
</tr>
</tbody>
</table>
Proposition 1 Given an observation table \(O\) and a stochastic activity \(A\), the problem of finding all occurrences of \(A\) in \(O\) takes exponential time, w.r.t the size of \(O\).
The reason for this is that there may be an exponential number of identifiable occurrences of \(A\) in \(O\), due to interleaving of activities. This result tells us two things. First, it is not feasible in practice, to try to find all occurrences. Instead, we will attempt to impose restrictions on what constitutes a valid occurrence in order to greatly reduce the number of possible occurrences. We will propose two constraints applicable in most real-world scenarios. Second, due to the size of the search space, it is important to have a data structure that enables very fast searches for activity occurrences. We will propose the MAGIC index structure that allows us to answer the Evidence and Identification problems efficiently.
Consider the activity in Figure 1 and the following set of observations: \{catalog, cart, shippingMethod, paymentMethod, review, confirm, confirm\}. This leads to two activity occurrences, one for each of the confirm actions. Our first restriction requires that if two occurrences \(O_1\) and \(O_2\) are found in the observation sequence and the span of \(O_2\) is contained within the span of \(O_1\), we will discard \(O_1\) from the set of results. This is called the minimal span restriction (MS for short).
Definition 4 (MS restriction) Let \(O_1 = \{o_1, \ldots, o_k\} \subseteq O\) and \(O_2 = \{o'_1, \ldots, o'_l\} \subseteq O\) be two occurrences of the same activity. We say that the span of \(O_1\) is less than or
equal to the span of $O_2$ (written $\text{span}(O_1) \leq \text{span}(O_2)$) iff $o_1.ts \geq o_1.ts$ and $o_2.ts \leq o_1.ts$. Under the MS restriction, we only consider occurrences that are minimal w.r.t. span.
The MS restriction, by itself, may still allow exponentially many occurrences of an activity. This is because multiple occurrences may have the same span.
Our earliest action (EA for short) restriction requires that when looking for the next action symbol in an activity occurrence, we always choose the first possible successor in the sequence. For instance, consider the activity definition $v_1 \xrightarrow{1} v_2 \xrightarrow{1} v_3$ and the observation sequence $\{v_1^1, v_2^2, v_1^3, v_2^4, v_3^5\}$ where $v_i^j$ denotes the fact that action $v_i$ was observed at time $i$. There are two occurrences starting with $v_1^3$, namely $\{v_1^3, v_2^4, v_3^5\}$ and $\{v_1^4, v_2^5\}$. Under the EA restriction we only consider $\{v_1^3, v_2^4, v_3^5\}$, since $v_5^3$ is the first possible successor to $v_1^3$. This restriction makes the search space linear in the size of the observation sequence.
**Definition 5 (EA restriction)** An activity occurrence $\{o_1, \ldots, o_n\} \subseteq O$ is said to have the earliest action property if $\forall i \in [2, n], \exists \delta w_i \in O$ s.t. $w_i.ts < o_i.ts$ and $\{o_1, \ldots, o_{i-1}, w_i, o_{i+1}, \ldots, o_n\}$ is an occurrence of the same activity.
We now formally state the types of queries we are interested in. The Evidence problem is stated as: given an observation table $O$, a set of activities $A$, a time interval $(t_s, t_e)$, and a probability threshold $p_t$, compute all the (possibly restricted) occurrences $X$ in $O$ of activities in $A$ such that $X$ occurs within the interval $(t_s, t_e)$ and $\text{prob}(X) \geq p_t$.
The Identification problem is stated as: given an observation table $O$, a set of activities $A$, and a time interval $(t_s, t_e)$, find the activity which occurs in $O$ in the interval $(t_s, t_e)$ with maximal probability among the activities in $A$. A solution to the identification problem could be biased by the fact that shorter activity occurrences will generally tend to have higher probabilities. To remedy this, we normalize occurrence probabilities as defined in [1], by introducing the relative probability $p^*$ of an occurrence $X$ of activity $A$ as $p^*(X) = \frac{\text{prob}(X) - p_{\min}}{p_{\max} - p_{\min}}$, where $p_{\min}, p_{\max}$ are the lowest and highest possible probabilities of any occurrence of $A$.
### 4. Multi-Activity Graph Index Creation
In order to monitor observations for occurrences of multiple activities, we first merge all activity definitions from $A = \{A_1, \ldots, A_k\}$ into a single graph. We use $\text{id}(A)$ to denote a unique identifier for activity $A$ and $I_A$ to denote the set $\{\text{id}(A_1), \ldots, \text{id}(A_k)\}$.
**Definition 6 (Multi-activity graph)** Let $A = \{A_1, \ldots, A_k\}$ be a set of stochastic activities, where $A_i = (V_i, E_i, \delta_i)$. A Multi-Activity Graph is a triple $G = (V_G, I_A, \delta_G)$ where: (i) $V_G = \cup_{i=1}^k V_i$ is a set of action symbols and (ii) $\delta_G : V_G \times V_G \times I_A \rightarrow [0, 1]$ is a function that associates a triple $(v, v', \text{id}(A_i))$ with $\delta_i((v, v'))$, if $(v, v') \in E_i$ and $0$ otherwise.
A multi-activity graph merges a number of stochastic activities. It can be graphically represented by labeling nodes with action symbols and edges with the id’s of activities containing them, along with the corresponding probabilities (we do not explicitly represent edges with probability $0$). Note that for a set $A$ of activities, the multi-activity graph can be computed in time polynomial in the size of $A$. Figure 2 shows two stochastic activities, $A_1$ and $A_2$, and the corresponding multi-activity graph.

If $A = (V, E, \delta)$ is an activity and $v \in V$, we use $A.p_{\max}(v)$ to denote the maximum product of probabilities on any path in $E$ between $v$ and an end node. The multi-activity graph index data structure allows us to efficiently monitor activity occurrences as new observations occur.
**Definition 7 (Multi-activity graph index)** Let $A = \{A_1, \ldots, A_k\}$ be a set of stochastic activities, where $A_i = (V_i, E_i, \delta_i)$, and let $G = (I_A, V_G, \delta_G)$ be the multi-activity graph built over $A$. A Multi-Activity Graph Index is a 6-tuple $I_G = (G, \text{start}_G, \text{end}_G, \text{max}_G, \text{table}_G, \text{completed}_G)$, where:
- $\text{start}_G : V_G \rightarrow 2^A$ is a function that associates with each node $v \in V_G$, the set of activity-id’s for which $v$ is a start node;
- $\text{end}_G : V_G \rightarrow 2^A$ is a function that associates with each node $v \in V_G$, the set of activity-id’s for which $v$ is an end node;
- $\text{max}_G : V_G \times I_A \rightarrow [0, 1]$ is a function that associates a pair $(v, \text{id}(A_i))$ with $A_i.p_{\max}(v)$. If $v \in V_i$ and 0 otherwise;
- For each $v \in V_G$, $\text{table}_G(v)$ is a set of tuples of the form $(\text{current}, \text{activityID}, t_0, \text{probability}, \text{previous}, \text{next})$, where $\text{current}$ is a reference (pointer) to an observation, $\text{activityID} \in I_A$, $t_0, \text{probability} \in \mathbb{R}^+$, $\text{previous}$ and $\text{next}$ are references to tuples in $\text{table}_G$;
- $\text{completed}_G : I_A \rightarrow 2^P$ where $P$ is the set of tuples in $\text{table}_G$, is a function that associates an activity identifier $\text{id}(A)$ with a set of references to tuples in $\text{table}_G$ corresponding to a completed instance of activity $A$.
Note that $G$, $\text{start}_G$, $\text{end}_G$, $\text{max}_G$ can be computed a-priori. All the tables that are part of the index will be initially empty. As new observations are added, the index tables will be updated accordingly. The MAGIC index tracks information about which nodes are start and/or end nodes for the original activities. For each node, it also stores (i) the maximum probability of reaching an end node for each activity in $\mathcal{A}$, (ii) a table that tracks partially-completed activity instances where each tuple points to an observation whose action is part of the activity instance, as well as to the previous and successor tuples. In addition, each tuple stores the probability of the activity occurrence so far, and the time at which the partial occurrence began.
4.1. MAGIC Insertion Algorithm
This section describes an algorithm to update the MAGIC index (when new observations occur) under the MS and EA restrictions. We briefly discuss the changes necessary to implement the unrestricted case. Figure 3(a) shows the algorithm under the MS, EA restrictions.
Example 3 Figure 4 shows the evolution of the index for the multi-activity graph of Figure 2, as new observations are collected. The first observation denotes action $a$. Since both activities $A_1$ and $A_2$ have $a$ as their start node, two tuples are added to the index table associated with action $a$, i.e. $\text{tables}_G(a)$. The second observation denotes again action $a$; to apply the minimal span restriction, the tuples in $\text{tables}_G(a)$ are updated to point to the new observation. The third observation has action $b$, so the insertion algorithm looks at $\text{tables}_G(a)$, whose corresponding action is the only predecessor of $b$ in the multi-activity graph, to check whether the new observation can be linked to a partially-completed occurrence. The observation can in fact be linked to the first tuple in $\text{tables}_G(a)$ (for activity $A_1$), thus a new tuple is added to $\text{tables}_G(b)$ with probability equal to the product of the probability of the tuple in $\text{tables}_G(a)$ and the probability of the edge from $a$ to $b$. The fourth observation has action $b$; to apply the earliest action restriction we do not link it to the first tuple in $\text{tables}_G(a)$ that already has a successor. The fifth observation has action $c$, and can be linked to the second tuple in $\text{tables}_G(a)$ (for activity $A_2$), so a new tuple is added to $\text{tables}_G(c)$. The sixth observation has action $d$, so the insertion algorithm looks at $\text{tables}_G(b)$, $\text{tables}_G(c)$, and $\text{tables}_G(e)$, and adds 2 new tuples to $\text{tables}_G(d)$, the first linked to the one in $\text{tables}_G(b)$ (for activity $A_1$) and the second to the one in $\text{tables}_G(c)$ (for activity $A_2$). Moreover, as $d$ is an end node for both activities $A_1$ and $A_2$, pointers to the newly added tuples are added to $\text{completed}_G(id(A_1))$ and $\text{completed}_G(id(A_2))$, respectively.
**Proposition 2** Algorithm `insert` runs in time $O(|\mathcal{A}| \cdot \max_{v \in V} |\delta(v)\cdot |O|)$, where $O$ is the set of observations indexed so far.
For space reasons, we omit a detailed description of how the algorithm in Figure 3(a) can be changed to compute unrestricted occurrences\(^1\).
The MAGIC-evidence algorithm (Figure 3(b)) finds all minimal sets of observations that validate the occurrence of activities in $\mathcal{A}$ with a probability exceeding a given threshold. For reasons of space, we omit a formal description of the MAGIC-id algorithm that solves the Identification problem.
\(^1\)Briefly, when looking at observations corresponding to start nodes, we no longer need to update start nodes, so we remove lines 1-5 and add a tuple to $\text{tables}_G(t_{\text{new action}})$ for any activity $id \in \text{start}_G(t_{\text{new action}})$. When looking at observations corresponding to inner nodes in the activity graph, we can have multiple successors for a single MAGIC tuple, hence we remove the condition $t_{\text{next}} = \bot$. Moreover, when iterating over $\text{tables}_G(v)$, we relax the condition of having maximal $t_0$.
---
**Figure 3.** (a) Multi-activity graph index maintenance algorithm (MS, EA); (b) Algorithm MAGIC-evidence
tion problem. MAGIC-id simply identifies those tuples in completedG (and hence the set of associated activity IDs) that have maximum probability and are within the required time span.
**Proposition 3** Algorithms MAGIC-evidence and MAGIC-id terminate and return the correct answers.
We omit a proof of the above proposition for space reasons.
### 5. Experimental Results
Our MAGIC implementation consists of approximately 5400 lines of C code running on a Pentium 4 3.2Ghz with 2 GB of RAM running SuSE 9.3. All experiments were averaged over 10 independent runs. We use paired t-tests and z-score tests (where applicable) to determine statistical significance.
**Data sets.** We evaluated MAGIC on (i) a synthetic dataset of 5 million observations generated automatically, using two separate Java programs; (ii) a third party depersonalized dataset consisting of travel information such as hotel reservations, passport and flight information, containing approximately 5.5 million observations. We enhanced the second dataset by adding bogus credit card transactions for purchases, transportation, etc. The final dataset consists of 7.5 million observations and requires 1.05 GB of memory/disk space. We manually developed 30 activity definitions from activities that occur in the dataset.
**MAGIC Index Creation Time and Memory Requirements.** Figure 5(a) and (b) show the time to build the index and the memory used for the index for the synthetic dataset. Note that the x-axis is on a log-scale. As expected, it took approximately 10 minutes and 588 MB to build the unrestricted MAGIC structure for 50,000 observations – we stopped running experiments on the unrestricted version on MAGIC at this time. The three restrictions we defined in the paper yield significantly better results. We are able to process the entire dataset in 86 and less than 1.5 seconds for the MS and EA restrictions respectively. For reasons of space, we do not show similar figures for the travel data set – however, with 7.5M observations, the MAGIC structure is built in under 3 minutes with 150MB memory under the MS restriction.
**Query Time.** Finally, we look at the average query time on the synthetic dataset. We generated 75 evidence and 75 identification queries. Each query was run on 10 intervals generated uniformly at random encompassing between 1% and 75% of the data. Each evidence query was also run with 5 distinct thresholds selected uniformly at random. The running times reported in Figure 5(c) are an average over the entire set of queries. Query answering times are always below 2 seconds for any restrictions, showing that the MAGIC structure handles activity occurrences efficiently.
### 6. Related Work and Conclusions
There is a large body of work in the AI community on plan and activity recognition, a large portion of which
First, we define the multi-activity graph that stores body pose vectors in different frames. In videos on top of multidimensional index structures that have been addressed before. Indexing has primarily been used to quickly retrieve past activities or plans to recognize activities in a new set of observations (or a new video). Duong et al. [4] introduce the Switching Hidden Semi-Markov Model (S-HSMM), a two-layered extension of the Hidden Semi-Markov Model (HSMM). The bottom layer represents atomic activities and their duration using HSMMs, while the top layer represents a sequence of high-level activities, defined in terms of atomic activities. [7] uses non-stationary HSMMs to model the dependency of transition probabilities on the duration an agent has spent in a given state. Dynamic Bayesian networks can also be used for tracking and recognizing multi-agent activities [5]. The CFG-based representation of human activities [8, 9] and interactions enables to formally define complex activities based on simple actions. The problem of recognizing multiple interleaved activities has been studied in [2], where the authors propose a symbolic plan recognition approach, relaying on hierarchical plan-based representation and a set of algorithms that can answer a variety of recognition queries.
However, to the best of our knowledge, the problem of indexing large amounts of observations for the purpose of quickly retrieving completed instances of activities has not been addressed before. Indexing has primarily been used as a method to retrieve past activities or plans to recognize activities in a new set of observations (or a new video). For instance, [3] proposes recognition of human activities in videos on top of multidimensional index structures that store body pose vectors in different frames.
Our work brings two important novel contributions. First, we define the multi-activity graph that captures many activity in a single labeled stochastic automata. Second, we define the MAGIC index structure that can index millions of observations from interleaved activities and retrieve completed instances of these in just a few seconds. We also introduce two reasonable restrictions that reduce the overall complexity of the activity recognition problem to a manageable level. Finally, we experimentally evaluate our algorithms on both a synthetic and a third-party dataset and show that MAGIC is very fast and has reasonable memory consumption.
Acknowledgments. Researchers funded in part by grant N6133906C0149, ARO grant DAAD190310202, AFOSR grants FA95500610405 and FA95500510298, and NSF grant 0540216.
References
Figure 5. (a) MAGIC build time variation with the number of observations in the synthetic dataset; (b) MAGIC memory requirements for the synthetic dataset; (c) MAGIC query time.
|
{"Source-Url": "http://www.umiacs.umd.edu/~albanese/pdf/IRI07_CR.pdf", "len_cl100k_base": 5596, "olmocr-version": "0.1.50", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 28232, "total-output-tokens": 6446, "length": "2e12", "weborganizer": {"__label__adult": 0.00035643577575683594, "__label__art_design": 0.0006499290466308594, "__label__crime_law": 0.0006456375122070312, "__label__education_jobs": 0.0010232925415039062, "__label__entertainment": 0.00019240379333496096, "__label__fashion_beauty": 0.00022709369659423828, "__label__finance_business": 0.000545501708984375, "__label__food_dining": 0.0004661083221435547, "__label__games": 0.0009851455688476562, "__label__hardware": 0.002040863037109375, "__label__health": 0.0009679794311523438, "__label__history": 0.0004305839538574219, "__label__home_hobbies": 0.00018036365509033203, "__label__industrial": 0.0006961822509765625, "__label__literature": 0.0004117488861083984, "__label__politics": 0.0003237724304199219, "__label__religion": 0.0004813671112060547, "__label__science_tech": 0.453125, "__label__social_life": 0.000133514404296875, "__label__software": 0.0252227783203125, "__label__software_dev": 0.509765625, "__label__sports_fitness": 0.00034427642822265625, "__label__transportation": 0.0006551742553710938, "__label__travel": 0.0002579689025878906}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23661, 0.02938]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23661, 0.50196]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23661, 0.87252]], "google_gemma-3-12b-it_contains_pii": [[0, 2987, false], [2987, 6323, null], [6323, 12103, null], [12103, 16433, null], [16433, 19275, null], [19275, 23661, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2987, true], [2987, 6323, null], [6323, 12103, null], [12103, 16433, null], [16433, 19275, null], [19275, 23661, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23661, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23661, null]], "pdf_page_numbers": [[0, 2987, 1], [2987, 6323, 2], [6323, 12103, 3], [12103, 16433, 4], [16433, 19275, 5], [19275, 23661, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23661, 0.10667]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
af1610e218c67f947687d41b0f1493137e9b66e9
|
TUPA at MRP 2019
A Multi-Task Baseline System
Hershcovich, Daniel; Arviv, Ofir
Published in:
Proceedings of the Shared Task on Cross-Framework Meaning Representation Parsing at the 2019 CoNLL
Publication date:
2020
Document version
Publisher’s PDF, also known as Version of record
Document license:
CC BY
Citation for published version (APA):
TUPA at MRP 2019: A Multi-Task Baseline System
Daniel Hershcovich* and Ofir Arviv**
*University of Copenhagen, Department of Computer Science
**Hebrew University of Jerusalem, School of Computer Science and Engineering
hershcovich@di.ku.dk, ofir.arviv@mail.huji.ac.il
Abstract
This paper describes the TUPA system submission to the shared task on Cross-Framework Meaning Representation Parsing (MRP) at the 2019 Conference for Computational Language Learning (CoNLL). TUPA provides a baseline point of comparison and is not considered in the official ranking of participating systems. While originally developed for UCCA only, TUPA has been generalized to support all MRP frameworks included in the task, and trained using multi-task learning to parse them all with a shared model. It is a transition-based parser with a BiLSTM encoder, augmented with BERT contextualized embeddings.
1 Introduction
TUPA (Transition-based UCCA/Universal Parser; Hershcovich et al., 2017) is a general transition-based parser for directed acyclic graphs (DAGs), originally designed for parsing text to graphs in the UCCA framework (Universal Conceptual Cognitive Annotation; Abend and Rappoport, 2013). It was used as the baseline system in SemEval 2019 Task 1: Cross-lingual Semantic Parsing with UCCA (Hershcovich et al., 2019b), where it was outranked by participating team submissions in all tracks (open and closed in English, German and French), but was also among the top 5 best-scoring systems in all tracks, and reached second place in the English closed tracks.
1 Being a general DAG parser, TUPA has been shown (Hershcovich et al., 2018a,b) to support other graph-based meaning representations and similar frameworks, including UD (Universal Dependencies; Nivre et al., 2019), which was the focus of CoNLL 2017 and 2018 Shared Tasks (Zeman et al., 2017, 2018); AMR (Abstract Meaning Representation; Banarescu et al., 2013), targeted in SemEval 2016 and 2017 Shared Tasks (May, 2016; May and Priyadarshi, 2017); and DM (DELPH-IN MRS Bi-Lexical Dependencies; Ivanova et al., 2012), one of the target representations, among PAS and PSD (Prague Semantic Dependencies; Hajic et al., 2012; Miyao et al., 2014), in the SemEval 2014 and 2015 Shared Tasks on SDP (Semantic Dependency Parsing; Oepen et al., 2014, 2015, 2016). DM is converted from DeepBank (Flickinger et al., 2012), a corpus of hand-corrected parses from LinGO ERG (Copes take and Flickinger, 2000), an HPSG (Pollard and Sag, 1994) using Minimal Recursion Semantics (Copestake et al., 2005). EDS (Elementary Dependency Structures; Oepen and Lønning, 2006) is another framework derived from ERG, encoding English Resource Semantics in a variable-free semantic dependency graph.
The CoNLL 2019 Shared Task (Oepen et al., 2019) combines five frameworks for graph-based meaning representation: DM, PSD, EDS, UCCA and AMR. For the task, TUPA was extended to support the MRP format and frameworks, and is used as a baseline system, both as a single-task system trained separately on each framework, and as a multi-task system trained on all of them. The code is publicly available.1
2 Intermediate Graph Representation
Meaning representation graphs in the shared tasks are distributed in, and expected to be parsed to, a uniform graph interchange format, serialized as JSON Lines.2
The formalism encapsulates annotation for graphs containing nodes (corresponding either to text tokens, concepts, or logical predications), with the following components: top nodes, node
labels, node properties, node anchoring, directed edges, edge labels, and edge attributes.
While all frameworks represent top nodes, and include directed, labeled edges, UCCA does not contain node labels and properties, AMR lacks node anchoring, and only UCCA has edge attributes (distinguishing primary/remote edges).
2.1 Roots and Anchors
TUPA supports parsing to rooted graphs with labeled edges, and with the text tokens as terminals (leaves), which is the standard format for UCCA graphs. However, MRP graphs are not given in this format, since there may be multiple roots and the text tokens are only matched to the nodes by anchoring (and not by explicit edges).
For the CoNLL 2019 Shared Task, TUPA was extended to support node labels, node properties, and edge attributes (see §3.1). Top nodes and anchoring are combined into the graph by adding a virtual root node and virtual terminal nodes, respectively, during preprocessing.
A virtual terminal node is created per token according to the tokenization predicted by UDPipe (Straka and Straková, 2017) and provided as companion data by the task organizers. All top nodes are attached as children of the virtual root with a Top-labeled edge.
Nodes with anchoring are attached to the virtual terminals associated with the tokens whose character spans intersect with their anchoring, with ANCHOR-labeled edges. Note that anchoring is automatically determined for training in the case of AMR, using the alignments from the companion data, computed by the ISI aligner (Pourdamghani et al., 2014). There is no special treatment of non-trivial anchoring for EDS: in case a node is anchored to multiple tokens (as is the case for multi-word expressions), they are all attached with ANCHOR-labeled edges, resulting in possibly multiple parents for some virtual terminal nodes.
During inference, after TUPA returns an output graph, the virtual root and terminals are removed as postprocessing to return the final graph. Top nodes and anchoring are then inserted accordingly.
2.2 Placeholder Insertion
The number of distinct node labels and properties is very large for most frameworks, resulting in severe sparsity, as they are taken from an open vo-
Table 2: The TUPA-MRP transition set. We write the stack with its top to the right and the buffer with its head to the left; the set of edges is also ordered with the latest edge on the right. \( \text{NODE}_X \) and \( \text{PROPERTY}_X \) require that \( x \neq \text{root} \); \( \text{CHILD}_X \), \( \text{PROPERTY} \), \( \text{LEFT-EDGE} \) and \( \text{RIGHT-EDGE} \) require that \( x \notin w_{1:n} \); \( \text{ATTRIBUTE}_X \) requires that \( y \notin w_{1:n} \); \( \text{LEFT-EDGE} \) and \( \text{RIGHT-EDGE} \) require that \( y \neq \text{root} \) and that there is no directed path from \( y \) to \( x \); and \( \text{SWAP} \) requires that \( \ell(x) < i(y) \), where \( \ell(x) \) is the swap index (see §3.5).
Figure 2: The TUPA-MRP transition set. We write the stack with its top to the right and the buffer with its head to the left; the set of edges is also ordered with the latest edge on the right. \( \text{NODE}_X \) and \( \text{PROPERTY}_X \) require that \( x \neq \text{root} \); \( \text{CHILD}_X \), \( \text{PROPERTY} \), \( \text{LEFT-EDGE} \) and \( \text{RIGHT-EDGE} \) require that \( x \notin w_{1:n} \); \( \text{ATTRIBUTE}_X \) requires that \( y \notin w_{1:n} \); \( \text{LEFT-EDGE} \) and \( \text{RIGHT-EDGE} \) require that \( y \neq \text{root} \) and that there is no directed path from \( y \) to \( x \); and \( \text{SWAP} \) requires that \( \ell(x) < i(y) \), where \( \ell(x) \) is the swap index (see §3.5).
3 Transition-based Meaning Representation Parser
TUPA is a transition-based parser (Nivre, 2003), constructing graphs incrementally from input tokens by applying transitions (actions) to the parser state (configuration). The parser state is composed of a buffer \( B \) of tokens and nodes to be processed, a stack \( S \) of nodes currently being processed, and an incrementally constructed graph \( G \). Some states are marked as terminal, meaning that \( G \) is the final output. The input to the parser is a sequence of tokens: \( w_1, \ldots, w_n \). Parsing starts with a (virtual) root node on the stack, and the input tokens in the buffer, as (virtual) terminal nodes.
Given a gold-standard graph and a parser state, an oracle returns the set of gold transitions to apply at the next step, i.e., all transitions that preserve the reachability of the gold target graph.\(^3\) A classifier is trained using the oracle to select
\(^3\)This type of oracle is similar to a dynamic oracle (Goldberg and Nivre, 2012; Goldberg, 2013), but in TUPA it only supports the case where the current parser state is valid, i.e., only gold transitions have been applied since the initial state. Training with exploration is thus not supported (yet).
Figure 3: Illustration of the TUPA model, adapted from Hershcovich et al. (2018a), at an intermediate point in the process of parsing the sentence “The fox gazed at the little prince for a long time.” Top: parser state (stack, buffer and intermediate graph) for each framework. Bottom: encoder architecture. Input feature embeddings are concatenate with BERT embeddings for each token. Vector representations for the input tokens are then computed by two layers of shared and framework-specific bidirectional LSTMs. At each point in the parsing process, the encoded vectors for specific tokens (from specific location in the stack/buffer) are concatenated with embedding and numeric features from the parser state (for existing edge labels, number of children, etc.), and fed into the MLP for selecting the next transition. Note that parsing the different frameworks is not performed jointly; the illustration only expresses the parameter sharing scheme.
the next transition based on features encoding the parser’s current state, where the training objective is to maximize the sum of log-likelihoods of all gold transitions at each step. If there are multiple gold transitions, the highest-scoring one is taken in training. Inference is performed greedily: the highest-scoring transition is always taken.
Formally, the incrementally constructed graph $G$ consists of $(V, E, \ell_V, \ell_E, p, a)$, where $V$ is the set of nodes, $E$ is the sequence of directed edges, $\ell_V : V \rightarrow L_V$ is the node label function, $L_V$ being the set of possible node labels, $\ell_E : E \rightarrow L_E$ is the edge label function, $L_E$ being the set of possible edge labels, $p : V \rightarrow \mathcal{P}(P)$ is the node property function, $P$ being the set of possible node property-value pairs, and $a : E \rightarrow \mathcal{P}(A)$ is the edge attribute function, $A$ being the set of possible edge attribute-value pairs (a node may have any number of properties; an edge may have any number of attributes).
3.1 Transition Set
The set of possible transitions in TUPA is based on a combination of transition sets from other parsers, designed to support reentrancies (Sagae and Tsujii, 2008; Tokgöz and Eryiğit, 2015), discontinuities (Nivre, 2009; Maier, 2015; Maier and Lichte, 2016) and non-terminal nodes (Zhu et al., 2013). Beyond the original TUPA transitions (Hershcovich et al., 2017, 2018a), for the CoNLL 2019 Shared Task, transitions are added to support node labels, node properties, and edge attributes. Additionally, top nodes and node anchoring are encoded by special edges from a virtual root node and to virtual terminal nodes (corresponding to text tokens), respectively (see § 2).
The TUPA-MRP transition set is shown in Figure 2. It includes the following original TUPA transitions: the standard SHIFT and REDUCE operations (to move a node from the buffer to the stack and to discard a stack node, respectively), NODE$_X$ for creating a new non-terminal node and an $X$-labeled edge (so that the new node is a parent of the stack top), LEFT-EDGE$_X$ and RIGHT-EDGE$_X$ to create a new $X$-labeled edge, SWAP to handle discontinuous nodes (moving the second topmost stack node back to the buffer), and FINISH to mark the state as terminal.
Besides the original TUPA transitions, TUPA-MRP contains a \texttt{CHILD} transition to create unanchored children for existing nodes (like \texttt{NODE}, but the new node is a \texttt{child} of the stack top),\footnote{While UCCA contains unanchored (\textit{implicit}) nodes corresponding to non-instantiated arguments or predicates, the original TUPA disregards them as they are not included in standard UCCA evaluation. The CoNLL 2019 Shared Task omits implicit UCCA nodes too, in fact, but the \texttt{CHILD} transition is included to support unanchored nodes in AMR, and is not used otherwise.} a \texttt{LABEL} transition to select a label for an existing node (either the stack top of the second topmost stack node), a \texttt{PROPERTY} transition to select a property-value pair for an existing node, and an \texttt{ATTRIBUTE} transition to select an attribute-value pair for an existing edge (the last created edge).
The original TUPA transitions \texttt{LEFT-REMOTE}_X and \texttt{RIGHT-REMOTE}_X, creating new \textit{remote} edges (a UCCA-specific distinction), are omitted. Remote edges are encoded instead as edges with the \textit{remote} attribute, and are supported by the combination of \texttt{EDGE} and \texttt{ATTRIBUTE} transitions. In contrast to the original TUPA transitions, \texttt{EDGE} transitions are allowed to attach multiple parents to a node.
### 3.2 Transition Classifier
To predict the next transition at each step, TUPA uses a BiLSTM module followed by an MLP and a softmax layer for classification (Kiperwasser and Goldberg, 2016). The model is illustrated in Figure 3.
The BiLSTM module (illustrated in more detail in Figure 4) is applied before the transition sequence starts, running over the input tokenized sequence. It consists of a pre-BiLSTM MLP with feature embeddings (§3.3) and pre-trained contextualized embeddings (§3.4) concatenated as inputs, followed by (multiple layers of) a bidirectional recurrent neural network (Schuster and Paliwal, 1997; Graves, 2008) with a long short-term memory cell (Hochreiter and Schmidhuber, 1997).
While edge labels are combined into the identity of the transition (so that for example, \texttt{LEFT-EDGE}_P and \texttt{LEFT-EDGE}_S are separate transitions in the output), there is just one transition for each of \texttt{LABEL}, \texttt{PROPERTY} and \texttt{ATTRIBUTE}. After each time one of these transition is selected, an additional classifier is evoked with the set of possible values for the currently parsed framework. This hard separation is made due to the large number of node labels and properties in the MRP frameworks. Since there is only one possible edge attribute value (\textit{remote} for UCCA), performing this transition always results in this value being selected.
### 3.3 Features
In both training and testing, we use vector embeddings representing the lemmas, coarse POS tags (UPOS) and fine-grained POS tags (XPOS). These feature values are provided by UDPipe as companion data by the task organizers. In addition, we use punctuation and gap type features (Maier and Lichte, 2016), and previously predicted node and edge labels, node properties, edge attributes and parser actions. These embeddings are initialized randomly (Glorot and Bengio, 2010).
To the feature embeddings, we concatenate numeric features representing the node height, number of parents and children, and the ratio between the number of terminals to total number of nodes in the graph $G$. Numeric features are taken as they are, whereas categorical features are mapped to real-valued embedding vectors. For each non-terminal node, we select a \textit{head terminal} for feature extraction, by traversing down the graph, selecting the first outgoing edge each time according to alphabetical order of labels.
### 3.4 Pre-trained Contextualized Embeddings
Contextualized representation models such as BERT (Devlin et al., 2019) have recently achieved state-of-the-art results on a diverse array of downstream NLP tasks, gaining improved results compared to non-contextual representations. We use
the weighted sum of last four hidden layers of a
BERT pre-trained model as extra input features.\textsuperscript{5}
BERT uses a wordpiece tokenizer (Wu et al., 2016), which segments all text into sub-word
units, while TUPA uses the UDPipe tokenization.
To maintain alignment between wordpieces and
tokens, we use a summation of the outputs of
BERT vectors corresponding to the wordpieces of
each token as its representation.
3.5 Constraints
As each annotation scheme has different con-
straints on the allowed graph structures, we apply
these constraints separately for each task. During
training and parsing, the relevant constraint set
rules out some of the transitions according to the
parser state.
Some constraints are task-specific, others are
generic. For example, in AMR, a node with an
incoming NAME edge must have the NAME label.
In UCCA, a node may have at most one outgoing
edge with label $\in \{ \text{PROCESS, STATE}\}$.
An example of a generic constraint is that stack
nodes that have been swapped should not be
swapped again, to avoid infinite loops in inference.
To implement this constraint, we define a swap in-
dex for each node, assigned when the node is cre-
ated. At initialization, only the root node and ter-
inals exist. We assign the root a swap index of 0,
and for each terminal, its position in the text
(starting at 1). Whenever a node is created as a
result of a NODE or CHILD transition, its swap
index is the arithmetic mean of the swap indices
of the stack top and buffer head. While this con-
straint may theoretically limit the ability to parse
arbitrary graphs, in practice we find that all graphs
in the shared task training set can still be reached
without violating it.
4 Multi-Task Learning
Whereas in the single-task setting TUPA is trained
separately on each framework as described above,
in the multi-task setting, all frameworks share a
BiLSTM for encoding the input. In addition,
each framework has a framework-specific BiL-
STM, private to it. Each framework has its own
MLP on top of the concatenation of the shared and
framework-specific BiLSTM (see Figure 3).
\textsuperscript{5}We used the bert-large-cased model
from https://github.com/huggingface/pytorch-transformers.
<table>
<thead>
<tr>
<th>Hyperparameter</th>
<th>Value</th>
</tr>
</thead>
<tbody>
<tr>
<td>Lemma dim.</td>
<td>200</td>
</tr>
<tr>
<td>UPOS dim.</td>
<td>20</td>
</tr>
<tr>
<td>XPOS dim.</td>
<td>20</td>
</tr>
<tr>
<td>Dep. rel. dim.</td>
<td>10</td>
</tr>
<tr>
<td>Punct. dim.</td>
<td>1</td>
</tr>
<tr>
<td>Action dim.</td>
<td>3</td>
</tr>
<tr>
<td>Node label dim.</td>
<td>20</td>
</tr>
<tr>
<td>Node prop. dim.</td>
<td>20</td>
</tr>
<tr>
<td>Edge label dim.</td>
<td>20</td>
</tr>
<tr>
<td>Edge attrib. dim.</td>
<td>1</td>
</tr>
<tr>
<td>MLP layers</td>
<td>2</td>
</tr>
<tr>
<td>MLP dim.</td>
<td>50</td>
</tr>
<tr>
<td>Shared BiLSTM layers</td>
<td>2</td>
</tr>
<tr>
<td>Shared BiLSTM dim.</td>
<td>500</td>
</tr>
<tr>
<td>Shared pre-BiLSTM MLP layers</td>
<td>1</td>
</tr>
<tr>
<td>Shared pre-BiLSTM MLP dim.</td>
<td>300</td>
</tr>
<tr>
<td>Private BiLSTM layers</td>
<td>2</td>
</tr>
<tr>
<td>Private BiLSTM dim.</td>
<td>500</td>
</tr>
<tr>
<td>Private pre-BiLSTM MLP layers</td>
<td>1</td>
</tr>
<tr>
<td>Private pre-BiLSTM MLP dim.</td>
<td>300</td>
</tr>
</tbody>
</table>
Table 1: Hyperparameter settings.
For node labels and properties and for edge
attributes (when applicable), an additional “axis”
(private BiLSTM and MLP) is added per frame-
work (e.g., AMR node labels are predicted sepa-
rately and with an identical architecture to AMR
transitions, except the output dimension is differ-
ent). This is true for the single-task setting too,
so in fact the single-task setting is multi-task over
\{transitions, node labels, node properties, edge at-
tributes\}.
5 Training details
The model is implemented using DyNet v2.1
(Neubig et al., 2017).\textsuperscript{6} Unless otherwise noted,
we use the default values provided by the pack-
age. We use the same hyperparameters as used in
previous experiments on UCCA parsing (Hersh-
covich et al., 2018a), without any hyperparameter
tuning on the CoNLL 2019 data.
5.1 Hyperparameters
We use dropout (Srivastava et al., 2014) be-
tween MLP layers, and recurrent dropout (Gal and
Ghahramani, 2016) between BiLSTM layers, both
with $p = 0.4$. We also use word, lemma, coarse-
and fine-grained POS tag dropout with $\alpha = 0.2$
\textsuperscript{6}http://dynet.io
Table 2: Official test MRP F-scores (in %) for TUPA (single-task and multi-task). For comparison, the highest score achieved for each framework and evaluation set is shown.
(Kiperwasser and Goldberg, 2016): in training, the embedding for a feature value \( w \) is replaced with a zero vector with a probability of \( \frac{\alpha}{\#(w)+\alpha} \), where \( \#(w) \) is the number of occurrences of \( w \) observed. In addition, we use node dropout (Hershcovich et al., 2018a): with a probability of 0.1 at each step, all features associated with a single node in the parser state are replaced with zero vectors. For optimization we use a minibatch size of 100, decaying all weights by \( 10^{-5} \) at each update, and train with stochastic gradient descent for 50 epochs with a learning rate of 0.1, followed by AMSGrad (Sashank J. Reddi, 2018) for 250 epochs with \( \alpha = 0.001, \beta_1 = 0.9 \) and \( \beta_2 = 0.999 \). Table 1 lists other hyperparameter settings.
5.2 Official Evaluation
For the official evaluation, we did not use a development set, and trained on the full training set for as many epochs as the evaluation period allowed for. The multi-task model completed just 3 epoch of training. The single task models completed 12 epochs for DM, 22 epochs for PSD, 14 epochs for EDS, 100 epochs for UCCA (the maximum number we allowed) and 13 epochs for AMR.
Due to an oversight resulting from code re-use, in the official evaluation we used non-whitelisted resources. This is similar to the delexicalization employed by Buys and Blunsom (2017a) for AMR parsing.
5.3 Post-evaluation Training
After the evaluation period, we continued training for a longer period of time, using a slightly modified system: we used only resources whitelisted by the task organizers in the post-evaluation training, removing the constraints and placeholders based on PropBank and AMR lexicons.
In this setting, training is done over a shuffled mix of the training set for all frameworks (no special sampling is done to balance the number of instances per framework), and a development set of 500 instances per framework (see §5.1). We select the epoch with the best average MRP F-score score on a development set, selected by sampling 500 random training instances from each framework (the development instances are excluded from the training set). The large multi-task model only completed 4 training epochs in the available time, the single-task models completed 24 epochs for DM, 31 epochs for PSD, 25 epochs for EDS, 69 epochs for UCCA and 23 epochs for AMR.
6 Results
Table 2 presents the averaged scores on the test sets in the official evaluation (§5.2), for TUPA and for the best-performing system in each framework and evaluation set. Since non-whitelisted resources were used, the TUPA scores cannot be taken as a baseline. Furthermore, due to insufficient training time, all models but the UCCA one are underfitting, while the UCCA model is overfitting due to excessive training without early stopping (no development set was used in this setting).
Table 3: Post-evaluation test scores (in %) for TUPA (single-task and multi-task), using the MRP F-score (left), and using Native Evaluation (middle): labeled SDP F-score for DM and PSD, EDM F-score for EDS, primary labeled F-score for UCCA, and Smatch for AMR. The rightmost column (Trans./Token Ratio) shows the mean ratio between length of oracle transition sequence and sentence length, over the training set.
<table>
<thead>
<tr>
<th>Post-evaluation</th>
<th>MRP Test Scores</th>
<th>Native Evaluation Test Scores</th>
<th>Trans./Token Ratio</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
<td>TUPA (single-task)</td>
<td>TUPA (multi-task)</td>
<td>TUPA (single-task)</td>
</tr>
<tr>
<td>ALL</td>
<td>LPPS</td>
<td>ALL</td>
<td>LPPS</td>
</tr>
<tr>
<td>DM</td>
<td>75.57</td>
<td>80.46</td>
<td>62.16</td>
</tr>
<tr>
<td>PSD</td>
<td>70.86</td>
<td>70.62</td>
<td>65.95</td>
</tr>
<tr>
<td>EDS</td>
<td>84.85</td>
<td>85.36</td>
<td>79.39</td>
</tr>
<tr>
<td>UCCA</td>
<td>77.69</td>
<td>82.15</td>
<td>64.05</td>
</tr>
<tr>
<td>AMR</td>
<td>53.85</td>
<td>53.47</td>
<td>39.00</td>
</tr>
<tr>
<td>Overall</td>
<td>75.73</td>
<td>77.63</td>
<td>66.01</td>
</tr>
</tbody>
</table>
6.1 Post-evaluation Results
Table 3 presents the averaged scores on the test sets for the post-evaluation trained models (§5.3). Strikingly, the multi-task TUPA consistently falls behind the single-task one, for each framework separately and in the overall score. This stems from several factors, namely that the sharing strategy could be improved, but mainly since the multi-task model is probably underfitting due to insufficient training. We conclude that better efficiency and faster training is crucial for practical applicability of this approach. Perhaps a smaller multi-task model would have performed better by training on more data in the available time frame.
6.2 Diagnostic Evaluation
The rightmost column of Table 3 displays the mean ratio between length of oracle transitions sequence and sentence length by framework, over the shared task training set. Scores are clearly better as the framework has longer oracle transition sequences, perhaps because many of the transitions are “easy” as they correspond to structural elements of the graphs or properties copied from the input tokens.
6.3 Comparability with Previous Results
Previous published results of applying TUPA to UCCA parsing (Herscovich et al., 2017, 2018a, 2019b,a) used a different version of the parser, without contextualized word representations from BERT.
For comparability with previous results, we train and test an identical model to the one presented in this paper, on the SemEval 2019 Task 1 data (Herscovich et al., 2019b), which is UCCA-only, but contains tracks in English, German and French. For this experiment, we use bert-multilingual instead of bert-large-cased, and train a shared model over all three languages. A 50-dimensional learned language embedding vector is concatenated to the input. Word, lemma and XPOS features are not used. No multi-task learning with other frameworks is employed. The results are shown in Table 4. While improvement is achieved uniformly over the previous TUPA scores, even with BERT, TUPA is outperformed by the shared task winners (Jiang et al., 2019). Note that Jiang et al. (2019) also used bert-multilingual in the open tracks.
We also train and test TUPA with BERT embeddings on v1.0 of the UCCA English Web Treebank (EWT) reviews dataset (Herscovich et al., 2019a). While the EWT reviews are included in the MRP shared task UCCA data, the different format and preprocessing makes for slightly different scores, so we report the scores for comparability with previous work in Table 5. We again see pronounced improvements from incorporating pretrained contextualized embeddings into the model.
7 Related Work
Transition-based meaning representation parsing dates back already to semantic dependency parsing work by Sagae and Tsujii (2008); Tokgöz and Eryiğit (2015), who support a DAG structure by allowing multiple parents to be created by EDGE transitions, and by Titov et al. (2009), who applied a SWAP transition (Nivre, 2008) for online reordering of nodes to support non-projectivity.
Transition-based parsing was applied to AMR
<table>
<thead>
<tr>
<th></th>
<th>All</th>
<th>Prim.</th>
<th>Rem.</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>SemEval 2019</strong></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>English-Wiki (open)</td>
<td>73.5</td>
<td>73.9</td>
<td>53.5</td>
</tr>
<tr>
<td>TUPA (w/o BERT)</td>
<td>77.8</td>
<td>78.3</td>
<td>57.4</td>
</tr>
<tr>
<td>TUPA (w/ BERT)</td>
<td><strong>80.5</strong></td>
<td><strong>81.0</strong></td>
<td><strong>58.8</strong></td>
</tr>
<tr>
<td>Jiang et al. (2019)</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td><strong>English-20K (open)</strong></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>TUPA (w/o BERT)</td>
<td>68.4</td>
<td>69.4</td>
<td>25.9</td>
</tr>
<tr>
<td>TUPA (w/ BERT)</td>
<td>74.9</td>
<td>75.7</td>
<td><strong>44.0</strong></td>
</tr>
<tr>
<td>Jiang et al. (2019)</td>
<td><strong>76.7</strong></td>
<td><strong>77.7</strong></td>
<td>39.2</td>
</tr>
<tr>
<td><strong>German-20K (open)</strong></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>TUPA (w/o BERT)</td>
<td>79.1</td>
<td>79.6</td>
<td>59.9</td>
</tr>
<tr>
<td>TUPA (w/ BERT)</td>
<td>81.3</td>
<td>81.6</td>
<td><strong>69.2</strong></td>
</tr>
<tr>
<td>Jiang et al. (2019)</td>
<td><strong>84.9</strong></td>
<td><strong>85.4</strong></td>
<td>64.1</td>
</tr>
<tr>
<td><strong>French-20K (open)</strong></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>TUPA (w/o BERT)</td>
<td>48.7</td>
<td>49.6</td>
<td>2.4</td>
</tr>
<tr>
<td>TUPA (w/ BERT)</td>
<td>72.0</td>
<td>72.8</td>
<td><strong>45.8</strong></td>
</tr>
<tr>
<td>Jiang et al. (2019)</td>
<td><strong>75.2</strong></td>
<td><strong>76.0</strong></td>
<td>43.3</td>
</tr>
</tbody>
</table>
Table 4: Test UCCA F-score scores (in %) on all edges, primary edges and remote edges, on the SemEval 2019 Task 1 data. The previous published TUPA scores are shown (TUPA w/o BERT), as well as scores for TUPA with BERT contextualized embeddings, TUPA (w/ BERT), averaged over three separately trained models in each setting, differing only by random seed (standard deviation < 0.03); and the scores for the best-scoring system from that shared task.
8 Conclusion
We have presented TUPA, a baseline system in the CoNLL 2019 shared task on Cross-Framework Meaning Representation. TUPA is a general transition-based DAG parser, which is trained with multi-task learning on multiple frameworks. Its input representation is augmented with BERT contextualized embeddings.
Acknowledgments
We are grateful for the valuable feedback from the anonymous reviewers. We would like to thank the other task organizers, Stephan Oepen, Omri Abend, Jan Hajič, Tim O’Gorman and Nianwen Xue, for valuable discussions and tips on developing the baseline systems, as well as for providing the data, evaluation metrics and information on the various frameworks.
References
Sanjiv Kumar Sashank J. Reddi, Satyen Kale. 2018. On the convergence of Adam and beyond. ICLR.
|
{"Source-Url": "https://static-curis.ku.dk/portal/files/239516691/OA_TUPA.pdf", "len_cl100k_base": 8118, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 45968, "total-output-tokens": 13708, "length": "2e12", "weborganizer": {"__label__adult": 0.0006585121154785156, "__label__art_design": 0.0015230178833007812, "__label__crime_law": 0.0006361007690429688, "__label__education_jobs": 0.005702972412109375, "__label__entertainment": 0.0006031990051269531, "__label__fashion_beauty": 0.0004346370697021485, "__label__finance_business": 0.0006074905395507812, "__label__food_dining": 0.0005397796630859375, "__label__games": 0.00139617919921875, "__label__hardware": 0.0009775161743164062, "__label__health": 0.0012607574462890625, "__label__history": 0.0007801055908203125, "__label__home_hobbies": 0.0001455545425415039, "__label__industrial": 0.0007219314575195312, "__label__literature": 0.0066070556640625, "__label__politics": 0.0006041526794433594, "__label__religion": 0.0012025833129882812, "__label__science_tech": 0.424560546875, "__label__social_life": 0.00036978721618652344, "__label__software": 0.03466796875, "__label__software_dev": 0.51416015625, "__label__sports_fitness": 0.0004887580871582031, "__label__transportation": 0.0008931159973144531, "__label__travel": 0.00031876564025878906}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 46251, 0.04871]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 46251, 0.16908]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 46251, 0.81706]], "google_gemma-3-12b-it_contains_pii": [[0, 591, false], [591, 4116, null], [4116, 6326, null], [6326, 9040, null], [9040, 12306, null], [12306, 16395, null], [16395, 20597, null], [20597, 23667, null], [23667, 28736, null], [28736, 32361, null], [32361, 37245, null], [37245, 41766, null], [41766, 46251, null]], "google_gemma-3-12b-it_is_public_document": [[0, 591, true], [591, 4116, null], [4116, 6326, null], [6326, 9040, null], [9040, 12306, null], [12306, 16395, null], [16395, 20597, null], [20597, 23667, null], [23667, 28736, null], [28736, 32361, null], [32361, 37245, null], [37245, 41766, null], [41766, 46251, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 46251, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 46251, null]], "pdf_page_numbers": [[0, 591, 1], [591, 4116, 2], [4116, 6326, 3], [6326, 9040, 4], [9040, 12306, 5], [12306, 16395, 6], [16395, 20597, 7], [20597, 23667, 8], [23667, 28736, 9], [28736, 32361, 10], [32361, 37245, 11], [37245, 41766, 12], [41766, 46251, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 46251, 0.18085]]}
|
olmocr_science_pdfs
|
2024-12-06
|
2024-12-06
|
f42cb58e2339a694088f4c5634003d66c3ea21a8
|
Tracing system-level communication in object-oriented distributed systems
Zoltán Ádám Mann
Budapest University of Technology and Economics
Department of Control Engineering and Information Technology
H-1117 Budapest, Magyar tudósok körútja 2, Hungary
zoltan.mann@cs.bme.hu
This paper won first prize at the 2001 IEEE Hungary Section Student Paper Contest.
Abstract—Standard tracing mechanisms were usually developed for use in a single-computer environment. Moreover, they are bound to a specific programming language. Today’s highly distributed and heterogeneous computing environments require new tracing methodologies. In this paper, the author collects the requirements that a tracing architecture is supposed to fulfill, and investigates how such a tracing architecture may be implemented in a distributed, heterogeneous and object-oriented environment. As a practical contribution, a system for tracing CORBA applications is presented, based on the interceptor mechanism.
I. INTRODUCTION
As computers are more and more interconnected, the target of software development also becomes a distributed, heterogeneous system, rather than a single computer. As a result, new techniques, methodologies and tools are required to facilitate the development of distributed software. Also, as components of different platform, architecture and programming language are interconnected, integration becomes a major challenge. Since the object-oriented paradigm has provided a very good integration scheme, object-oriented distributed systems enjoy great prosperity. This is why the paper focuses on the tracing of object-oriented distributed systems and particularly on CORBA (Common Object Request Broker Architecture [1]), being the most widely used middleware system. (For tracing of other middleware systems, see e.g. [2], [3], [4].)
Besides these—rather technical—arguments, there are also some other factors to consider. The first is the wide spread of e-business, bringing along a boom for CORBA and similar middleware technologies as well. Another, not so widely recognized, but equally important point is the presence of embedded distributed systems. The idea of using standard middleware solutions in embedded environments is gaining popularity. Just consider the various intelligent electronic units that can be found on an airplane: a complex distributed system, where the individual components may communicate and co-operate using CORBA. Obviously, tracing is a vital and complicated task in such an environment.
A third issue is the ever-growing competition on the market. Software development companies are supposed to create complex and reliable distributed systems. Parallel to these requirements, the time-to-market pressure also keeps growing. This, too, results in the need for methods and tools to make distributed software development an easier and quicker task.
This paper addresses the problem of tracing distributed systems in the following way. First in section II, an overview is given about tracing in general and then (in section III) more specifically, tracing in a distributed environment. This is followed by a discussion on the role of the middleware in section III-D. As a concrete example, CORBA interceptors are investigated as a possible mean for tracing in section IV. Sections V and VI present development details and an analysis of interceptor-based tracing. Section VII concludes the paper.
II. TRACING IN GENERAL
Since the term ’tracing’ is rather overloaded (especially in the context of computer science), its meaning has to be clarified first (section II-A). Moreover, tracing is often used as a synonym for debugging. However, tracing is a lot more than that: section II-B presents other important aims of tracing. Section II-C covers the basics of standard tracing mechanisms.
A. The definition of tracing
As with every definition (especially those that try to define a term that has already been widely used in many different contexts), it is hard to find a perfect definition that is flexible enough to include all possible uses, and at the same time specific enough. The following definition is probably not perfect either, but it will do for the rest of this paper.
Definition: Tracing is a step-by-step execution of a program or software system, conducted in order to gain extra information—i.e. information that is not part of the output in a normal execution—or insight on how the program or software system works.
Note that it is not specified what a step in ‘step-by-step execution’ is. This is by intent so in order to make the definition scalable: steps may be very low-level (e.g. machine code instructions) but may also be high-level (e.g. communication events or messages in a distributed application).
B. Aims of tracing
As already mentioned, debugging is not the only purpose of tracing. Indeed, as a part of this work, the following possible use cases of tracing have been identified in discussions with programmers and software engineers:
1. Checking of correct behaviour. This is the most obvious usage: the programmer simply wants to make sure that the software does what it is supposed to do. Therefore, he or she runs the program step by step and looks at its output and inner state.
2. Locating bugs. If the software does not do what it is supposed to do, then again, tracing can help in identifying the nature of the error and in finding it.
3. Monitoring crucial applications. Even if the system seems to do correctly what it is supposed to, this will just not be enough in mission-critical applications. For instance, a power plant regulating software will have to be monitored constantly.
4. Better understanding of how the system works. Running the software step by step can also serve demonstration purposes and thus be used in e.g. university lectures or other courses. Also, if a programmer, developing a new module for a complex software, would like to obtain a large picture of how the system works (e.g. how existing modules co-operate and communicate), they can use tracing.
5. Extracting documentation. There are already tools that can extract static documentation from the source code of the software. On the other hand, tracing could be used to extract dynamic documentation, e.g. a communication diagram, while the system is running.
6. Performance analysis (also known as profiling). In this case, it can usually be assumed that the software behaves correctly, but slowly. Therefore, the programmer—or a performance specialist—runs the system and at the same time measures the elapsed time in certain functions of the program. Typically, the goal is not a precise measurement but rather to identify bottlenecks.
As shown later, all these aims can—and should—be provided for with essentially the same tracing architecture. Also, the usability of a tracing tool should be measured on how well it fulfills these requirements.
However, it has to be noted that the above requirements need slightly different usage of the underlying tracing architecture. Therefore, a tracing tool should support several modes of operation. For example, the first four use cases require some on-line user interaction, while the last two do not. It follows that tracing tools should provide at least an interactive and a non-interactive mode. Of course other distinctions are also possible.
C. Typical solutions
Tracing facilities are usually provided by programming environments and are bound to a particular programming language. This is quite natural because the programming language is exactly the level of abstraction that serves for the interaction of the programmer with the computer. So tracing, too, is best performed at that level.
Usually the compiler will add extra information ("debug information") to the machine-level code so that tracing steps correspond to instructions of the source code and not to machine-level instructions. When debugging is on, an interrupt will usually be called at these instruction boundaries, resulting in a step-by-step execution of the program.
Also, debuggers typically offer more sophisticated features as well (such as Run to cursor, Breakpoints, Conditional breakpoints etc.) and the whole debugging-profiling-tracing functionality is available through a special user interface, which is usually integrated into the programming environment.
III. TRACING IN A DISTRIBUTED ENVIRONMENT
A. Difficulties in a distributed environment
As is often the case, the distributedness of the system can cause several problems.
First of all, the place where trace information is created and the place where it is needed are probably not the same. Therefore, the following processes must be arranged separately, but not independently:
- Extraction of trace information
- Transport of trace information
- Processing and combining trace information
- Displaying trace information
This may also cause some anomalies. For example, since the communication delays in the distributed system may vary over space and time, it is possible that the information that object B obtained a message from object A, becomes available earlier than the information that object A sent a message to object B.
It is also possible that some components of the distributed system stop working correctly or stop working at all. Transitional network failures can also cause some trace information, that is just on its way, to be lost.
Time and time-related (such as performance) measurements are made extremely difficult by the usual lack of a global clock.
If the system is not only distributed but also heterogeneous, this poses an even bigger challenge. Namely, as explained in the previous section, traditional tracing tools are usually bound to a particular programming language. If the components of the system are implemented in different programming languages, this becomes infeasible.
B. Solution framework
In this work, the following framework was used for tracing distributed systems (see figure 1):
- In every component of the distributed system, a new process is installed which gathers trace information. More specifically, it intercepts incoming and outgoing calls.
- There is an additional component which is responsible for collecting trace information from the other components, as well as for displaying it appropriately (denoted as tracer).
- Collecting trace information may either be implemented in a push or in a pull model. In any case, communication may either be arranged using the common channels of the distributed system or through dedicated channels.
From a software engineering point of view, it is vital to have a central tracer component. It encapsulates all details concerning the tracing mode, the level of verbosity, the output format, output device specific information etc. Otherwise (that is, if trace information were also output in a distributed way) the output of trace information could easily become inconsistent.
Another vital aspect is the scope of tracing: it is logical to lay emphasis on large-scale communication. The reason is that standard tracing techniques may be used very well to trace execution inside a component. For that, it is still the particular programming language that was used to implement the component in question, that can provide the best tracing facility. The real challenge in tracing distributed applications is to trace the communication between the components.
C. Instrumentation
One of the key issues in building a tracing system such as the one depicted in figure 1 is, how to gather trace information. The process of adding this new feature to existing code is called instrumentation.
The most widely used solution is manual instrumentation. This means that the programmer has to add extra pieces of code in order to notify the tracer about what is going on. Typically, the tracer provides some functions for this. So the programmer will call these functions at every point in the software that is potentially critical. Usually this means that the tracer needs to be notified just before and just after every function call of the original code, and/or at the beginning and end of each function. The notification should include information such as the initiator and the target of the call, parameters, return value etc., since this information is an important part of the tracer output.
Assume, for instance, that the original code contains the following call:
```
result = server->do("Joe", 42);
```
After instrumentation, the code becomes something like this:
```
tracer->before(this, server, "do", "Joe", 42);
```
result = server->do("Joe", 42);
tracer->after(this, server, "do", "Joe", 42, result);
As can be seen, manual instrumentation is tedious and error-prone. Moreover, it is necessary for manual instrumentation to possess the source code. Therefore, the goal is automatic instrumentation, meaning that the programmer’s extra work should be minimized (ideally eliminated). In the next sections it is explored how this can be supported by the middleware.
D. The role of the middleware
As already mentioned, traditional tracing solutions are specific to particular programming languages, because the programming language is the very level of abstraction on which the programmer handles the computer, and so it is the programming environment itself that can provide the best tracing facilities.
When moving on to distributed systems, an additional, higher level of abstraction appears, namely that of system-level communication, supported by the middleware. (In some cases, this also results in the appearance of a higher-level language, e.g. Interface Definition Language (IDL) in the case of CORBA.) The same way that specific programming environments can provide the best traditional tracing solutions, it is the middleware itself that may—and should—provide the best solutions for system-level tracing.
If the whole distributed system is developed in a single programming environment, then this environment will be capable of providing tracing solutions for multiple abstraction levels, not only for system-level communication. An example for such a system is GRADE [5]. However, this kind of distributed software development is not typical, largely because middleware systems have to be able to integrate legacy applications as well.
Moreover, the problems of distributed tracing (mentioned in section III-A) are typical tasks of the middleware. So it is again the middleware itself that can provide the best support for message delivery, time stamping, event handling etc.
In the next section, this idea is illustrated on the example of CORBA: a mechanism is presented that can be used to provide automatic instrumentation for CORBA applications and thus achieving tracing of system-level communication.
IV. CORBA INTERCEPTORS
Interceptors are objects implementing the Interceptor interface [6]. There are two kinds of interceptors: ServerRequestInterceptors and ClientRequestInterceptors. Both interfaces define callback methods that are invoked by the Object Request Broker (ORB) at specific points of a CORBA call. See figure 2 for the flow of control.
Both kinds of interceptors must be registered with the ORB. That is, the interceptors are registered with a local ORB object, in a specific name space. After that, the ServerRequestInterceptor will intercept all incoming requests and outgoing replies, whereas the ClientRequestInterceptor will intercept all outgoing requests and incoming replies.
The interceptors obtain information concerning the current call in a RequestInfo object. This includes a reference to the target of the call, the name of the invoked operation, the list of parameters, the return value (if already available), a possibly empty list of thrown exceptions, and a list of so-called service contexts. Service contexts can be used for out-of-band communication between interceptors in different components: an interceptor may add extra information to a service context, which can in turn be accessed later by another interceptor.
Interceptors may also alter some of the information contained in the RequestInfo object. For instance, an interceptor might change the target of the call. This way, load balancing or fault tolerance schemes may be integrated into an existing application, without modifying its actual source code. Also, interceptors can be used for security purposes, e.g. for transparent authentication. These are probably the goals that interceptors were actually developed for.
Interceptors were first defined in CORBA 2.3 [1]. However, this 9-page definition was quite under-specified. (For a comparison: the current draft interceptor specification consists of 254 pages [7].) This resulted in a number of proprietary solutions from different vendors. The problem was recognized by the OMG, which issued a Request For Proposals [8] in September 1998. After some iterations of proposals and discussions, the leading vendors of the field came to an agreement, and handed in their Joint Submission in December 1999 [9]. The architecture described in this submission seems to be the de facto standard since then. It has also been incorporated into the CORBA 3.0 draft.
Since the Joint Submission is available, vendors are working on their interceptor implementation in order to make it conform with the specification. The ORB used in this work, TAO [10], was one of the first to introduce support for interceptors; however, also in a proprietary way. TAO version 1.1, which is at the time of writing still the latest commercially supported version of TAO, reflects that proprietary mechanism. On the other hand, there have been a number of changes of TAO since then, bringing also its interceptor support closer to the specification. At the time of writing, the latest version is 1.1.14.
V. AN INTERCEPTOR-BASED TRACING ARCHITECTURE
From the above it should be clear that although interceptors were not designed specifically for tracing purposes, they can indeed be used to trace CORBA applications. For this, only a subset of their functionality is needed, namely that they are informed of every CORBA call.
In order to trace every call, a ServerRequestInterceptor and a ClientRequestInterceptor must be registered in every component. Each interceptor sends the trace information to the central tracer object through the usual communication channels of the system, i.e. using CORBA calls. In other words, the tracer has to be implemented as a CORBA servant, its notification methods defined in IDL.
This way, every remote procedure call (RPC) generates four events: (i) when the client issues a request; (ii) when the request reaches the server; (iii) when the server sends its reply; and (iv) when the reply arrives back at the client. If needed, all this information may be displayed. On the other hand, the user interface of the tracer may be configured so that, say, only one event is shown for each RPC. Since this is controlled centrally, the consistency of the output is guaranteed.
Since emphasis is laid on tracing high-level communication between components of the distributed system, the components involved in a particular RPC have to be identified. In order to achieve this, every component registers itself at the tracer, whereupon it gets an unique ID. Later on, when a call is issued from this component, the request is intercepted by the corresponding ClientRequestInterceptor, which in turn packs the ID of the component into a service context and adds it to the call. When the request arrives at the server side, and is intercepted by the ServerRequestInterceptor, the ID of the caller is extracted. At this point the ServerRequestInterceptor knows enough (namely the ID of the caller and its own ID, which is now the ID of the callee) to inform the tracer about the call. The first two events can be generated. Similarly, at the third event, i.e. when the ServerRequestInterceptor intercepts the outgoing reply, it adds its own ID in a service context to the call, so that this information is also known at the fourth event.
Communication events are also identified using IDs, so that the tracer can recognize events belonging to the same RPC. Therefore, when the tracer is first notified of an RPC (i.e. when the first two events are fired), it generates and returns an unique communication ID. This ID is then also added to the call in a service context and included in later notifications to the tracer (when the third and fourth events are fired). It is then the tracer’s responsibility to group the events belonging to the same RPC and handle them appropriately, e.g. by sorting them using the CORBA Time Service.
When interceptors themselves issue calls, care must be taken to avoid infinite loops. Namely, the interceptor will also intercept the calls it issued itself, and if it makes a
call again, this results in an infinite recursion. To avoid this, the interceptors must check whether the target of the intercepted call is the tracer, and if it is, they should do nothing.
What exactly the tracer will display, depends on the mode it is used in. It has already been stated that the tracer must have at least two working modes: interactive and non-interactive mode. In interactive mode, the tracer waits for user input inside the notification methods, thus blocking the whole system; in non-interactive mode the trace information is just displayed and the notification methods return immediately. Either way, the trace information can be directed to the display or to a file (textual or as a communication diagram in PostScript format). The output is configurable, e.g. it can be specified whether to display all events or just certain kinds of events, if timestamps should also be displayed or not etc. Another mode of operation (the so-called local mode) is presented in section VI, which is useful for performance measurements.
Lastly, let us examine to what extent instrumentation can be automated. Since the used version of TAO did not yet support the standard registration mechanism of interceptors, they had to be registered manually, both with the ORB and the tracer. For this purpose, a new class (InterceptorLauncher) was created, which encapsulates in its constructor the details of the registration mechanism. So there is a single line of code that has to be inserted into the source code of every component (at the startup code of the component, before the first CORBA call that the component is involved in), creating an InterceptorLauncher object, which in turn automatically creates and registers the necessary interceptors, and stores the ID obtained from the tracer. Also, a name can be specified as an argument to the constructor of InterceptorLauncher, which will be used by the tracer when displaying events related to the component in question.
After this initialization, all tracing is done automatically; no manual instrumentation is necessary. Only the insertion of one line of code is needed for each component. But this, too, can be a problem, especially if the source code is not available. The standard registration mechanism of interceptors, as defined by the Joint Submission, will probably remedy this problem. Until this gets integrated into TAO, a slightly modified version of the TAO dynamic link library was developed, which automatically loads the interceptors. Using this library instead of the normal one, no code modifications are needed, not even a re-compile or a re-link.
VI. Evaluation of Interceptor-based Tracing
A. Implementation
The software was implemented in Visual C++, using the previously mentioned ORB TAO, and tested on Windows NT workstations. The program currently supports three output modes: textual description of the communication events on screen and in file, as well as communication diagrams in PostScript format. Moreover, it provides three modes of operation:
1. Interactive mode. Tracing information is displayed on the graphical user interface of the tracer, and user input is needed in each step to continue execution. The extent of the displayed trace information can be fine-tuned using several options: the set of RPC events to be displayed can be specified for both synchronous and asynchronous calls. The essential use cases of this mode are: Checking of correct behaviour; Locating bugs; Better understanding of the operation of the system.
2. Non-interactive mode. The only difference from interactive mode is that the execution of the program being traced is not suspended. Essential use cases: Checking of correct behaviour; Monitoring crucial applications; Better understanding of the operation of the system; Extracting documentation.
3. Local mode. No central tracer is installed, but the interceptors write the locally collected information to a (local) file. This is a stripped-down variant of the tracing architecture with limited functionality, but since network communication is kept at a minimum, this is the fastest operation mode. The system automatically switches to this mode if the interceptors cannot find the tracer. No GUI is available, therefore the tracing options must be specified in a file named tracer.ini. The essential use case is: Performance analysis.
The implemented software performed very well in the tests, thus proving three important claims:
- The aims of tracing, as defined in section II-B, can all be provided for with essentially the same tracing architecture;
- The interceptor mechanism of CORBA provides a suitable framework for such a tracing architecture;
- In general, the middleware can provide powerful support for tracing with meta-objects.
The implementation also revealed some shortcomings of the interceptor mechanism of TAO, which have been fixed by the TAO team since then or are likely to be fixed in the near future.
B. Overhead analysis
Although the implemented software performed very well functionally, it is clear that the overhead generated by interceptors can be problematic in some applications, most notably if tracing is used for time-related measurements. First, a simple model for the estimation of the overhead is presented, followed by the corresponding empirical results.
Assuming that the participating computers are much faster than the network connecting them, the overhead can be roughly calculated as the additional time caused by additional network traffic. Let \( t_c \) denote the average time needed for a call through the network, and \( t_s \) the server-side processing time of a particular RPC. (Usually, it cannot be assumed—even under the above assumption—that \( t_c = 0 \), because the server-side processing may also involve calls to other servers.) It follows that the duration of the whole RPC is \( T = 2t_c + t_s \) (see figure 3).
Now consider the case in which interceptors are also present and they notify the tracer about every RPC event. That is, an overhead of \( 2t_c + t_i \) is induced at every RPC event—where \( t_i \) denotes the time consumed in the interceptors and the tracer—summing up to \( T' = T + 4(2t_c + t_i) = \)
10t_{c} + t_{s} + 4t_{t}. Thus, the relative overhead is
\[ r = \frac{T' - T}{T} = \frac{8t_{c} + 4t_{t}}{2t_{c} + t_{s}} \] \hspace{1cm} (1)
The worst case is when \( t_{s} = 0 \), i.e., when server-side processing is not time-consuming compared to communication:
\[ r_{\text{acc}} = \frac{8t_{c} + 4t_{t}}{2t_{c}} = 4 + 2\frac{t_{t}}{t_{c}} \] \hspace{1cm} (2)
Consequently, the relative overhead can be even more than 4 in the worst case, yielding a more than 5 times slowdown. The amount by which the relative overhead exceeds 4 depends on the amount of time spent in the interceptors and in the tracer. Most notably, if the tracer has to refresh some complicated GUI, this might take considerable time. Thus, \( t_{t} \) can be relatively high even in the case of non-interactive operation.
Of course, the situation is much better if the time needed for server-side processing is not negligible. It can be seen from (1) that if \( t_{s} \) is high enough, the overhead can become arbitrarily small.
One possible solution to the problem of the potentially high overhead is to declare the services of the tracer as one-way. Thus, the messages towards the tracer do not block the interceptors, and this way the system does not have to wait until the notification reaches the tracer, which does the necessary processing, and the call returns. Hence, such a solution would minimize the overhead. But of course this is only possible if the methods of the tracer do not return any results.
Local mode represents another solution: if tracing is used for time-related measurements, interceptors generate no additional network traffic by notifying the tracer about every RPC event, but write out every collected information (including timestamps) to local files. Normally, it can be assumed that when it comes to performance analysis, the system is already behaving correctly. Thus, caching can be used (usually provided by the operating system by default) to further decrease the overhead. However, if the system is not reliable enough, this can be switched off (so that no RPC events are lost in the case of a crash); this way, the trade-off between speed and reliability can be tuned. If, for some reason, the overall communication scenario of the system is needed, this might be assembled afterwards from the individual log files.
It has to be noted that the above model can also be used if the whole 'distributed' system is actually located on the same computer. In this case, too, inter-process communication is the most time-consuming factor since it involves context switches which are known to be very costly on modern processors. Care has to be taken though in local mode to make sure that every process writes to a separate file.
Finally, some empirical measurements were conducted on the implemented tracing tool. A detailed evaluation is beyond the scope of this paper; here just the worst-case figures are presented for each working mode. A test application was implemented, which also included an empty function on the server side (specifically for testing the worst-case scenario, i.e., when \( t_{s} = 0 \). The client called this function 10,000 times. Table I contains the average duration of this operation (averaged from 5 measurements each; all measurements were performed on a PII/300 PC, under Microsoft Windows NT 4.0).
<table>
<thead>
<tr>
<th></th>
<th>Duration [sec]</th>
<th>Relative overhead [%]</th>
</tr>
</thead>
<tbody>
<tr>
<td>Without interceptors</td>
<td>29.5</td>
<td>-</td>
</tr>
<tr>
<td>Non-interactive mode</td>
<td>191.5</td>
<td>549</td>
</tr>
<tr>
<td>Local mode, without</td>
<td>41.5</td>
<td>41</td>
</tr>
<tr>
<td>caching</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Local mode with</td>
<td>33.8</td>
<td>15</td>
</tr>
<tr>
<td>caching</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
The figures of table I clearly justify the above estimates and show that non-interactive mode (which in this case generated a more than 6 times slowdown) is not useful for time-related measurements. However, local mode with caching presented a worst-case overhead of 15%, which is low enough to enable measurements aiming at finding performance bottlenecks.
VII. CONCLUSION
This paper has addressed the increasingly important problem of tracing distributed, heterogeneous applications. The most important contributions are:
- An investigation of the possible use cases of tracing.
- It has been shown that interceptors can be used to trace CORBA applications.
- The resulting tracing architecture can provide for all of the identified use cases.
- A new tool for tracing CORBA applications, which was found useful in empirical evaluations.
- A simple mathematic model for the estimation of the overhead generated by interceptors, which was justified by practical measurements.
REFERENCES
|
{"Source-Url": "http://www.cs.bme.hu/~manusz/publications/IEEE-Contest-2001/Mann_IEEE_Contest_2001.pdf", "len_cl100k_base": 6436, "olmocr-version": "0.1.50", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 41637, "total-output-tokens": 7246, "length": "2e12", "weborganizer": {"__label__adult": 0.0002655982971191406, "__label__art_design": 0.00021827220916748047, "__label__crime_law": 0.0002758502960205078, "__label__education_jobs": 0.00046753883361816406, "__label__entertainment": 4.51207160949707e-05, "__label__fashion_beauty": 0.00010007619857788086, "__label__finance_business": 0.00013887882232666016, "__label__food_dining": 0.0002188682556152344, "__label__games": 0.0003421306610107422, "__label__hardware": 0.0010023117065429688, "__label__health": 0.0003306865692138672, "__label__history": 0.0001729726791381836, "__label__home_hobbies": 5.567073822021485e-05, "__label__industrial": 0.0002837181091308594, "__label__literature": 0.00017368793487548828, "__label__politics": 0.00016224384307861328, "__label__religion": 0.00035858154296875, "__label__science_tech": 0.01491546630859375, "__label__social_life": 6.210803985595703e-05, "__label__software": 0.007114410400390625, "__label__software_dev": 0.97265625, "__label__sports_fitness": 0.00021505355834960935, "__label__transportation": 0.00037598609924316406, "__label__travel": 0.00016880035400390625}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33511, 0.01165]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33511, 0.63896]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33511, 0.92965]], "google_gemma-3-12b-it_contains_pii": [[0, 4770, false], [4770, 10579, null], [10579, 15513, null], [15513, 20878, null], [20878, 27113, null], [27113, 32493, null], [32493, 33511, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4770, true], [4770, 10579, null], [10579, 15513, null], [15513, 20878, null], [20878, 27113, null], [27113, 32493, null], [32493, 33511, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33511, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33511, null]], "pdf_page_numbers": [[0, 4770, 1], [4770, 10579, 2], [10579, 15513, 3], [15513, 20878, 4], [20878, 27113, 5], [27113, 32493, 6], [32493, 33511, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33511, 0.05634]]}
|
olmocr_science_pdfs
|
2024-11-29
|
2024-11-29
|
b01974103dc398f74a27e3f3866ae6362def9e53
|
Dicer: A Framework for Controlled, Large-Scale Web Experiments
Sarah Chasins
University of California, Berkeley, USA
schasins@cs.berkeley.edu
Phitchaya Mangpo Phothilimthana
University of California, Berkeley, USA
mangpo@cs.berkeley.edu
ABSTRACT
As dynamic, complex, and non-deterministic webpages proliferate, running controlled web experiments on live webpages is becoming increasingly difficult. To compare algorithms that take webpages as inputs, an experimenter must worry about ever-changing webpages, and also about scalability. Because webpage contents are constantly changing, experimenters must intervene to hold webpages constant, in order to guarantee a fair comparison between algorithms. Because webpages are increasingly customized and diverse, experimenters must test web algorithms over thousands of webpages, and thus need to implement their experiments efficiently. Unfortunately, no existing testing frameworks have been designed for this type of experiment.
We introduce Dicer, a framework for running large-scale controlled experiments on live webpages. Dicer’s programming model allows experimenters to easily 1) control when to enforce a same-page guarantee and 2) parallelize test execution. The same-page guarantee ensures that all loads of a given URL produce the same response. The framework utilizes a specialized caching proxy server to enforce this guarantee. We evaluate Dicer on a dataset of 1,000 real webpages, and find it upholds the same-page guarantee with little overhead.
Categories and Subject Descriptors
D.2.5 [Software Engineering]: Testing and Debugging
Keywords
Web Algorithm Testing, Testing Framework, JavaScript
1. INTRODUCTION
Algorithms that take webpages as inputs are becoming increasingly ubiquitous. From search algorithms that find data on the web, to template extraction algorithms that identify data relations on the web, to scraping algorithms that collect data from the web, the number of programs that operate over web content is on the rise. As the amount of online data continues to increase, the need for web algorithms — that is, algorithms that take webpages as inputs — will only grow.
Unfortunately, support for the development of web algorithms has not kept pace. In particular, controlled testing remains extremely difficult. While there is a preponderance of tools targeted at developers testing their own pages, these tools are not easily applied to the more general problem of running arbitrary tests over real-world, constantly changing pages. If we are to fairly compare different algorithms on real-world pages, we cannot simply run the algorithms directly; input webpages can be updated at any time, perhaps altering an experiment’s outcome, making one algorithm or another appear more successful. In controlled testing, an experimenter must hold input webpages constant while comparing different algorithms. To our knowledge, no existing JavaScript testing framework offers this functionality.
As a motivating example, we discuss one such web algorithm, the DOM node addressing problem. The task is to load a URL at time $t_1$, describe a given node $n$ from the loaded webpage, then load the same URL at a later time $t_2$, and use the description of $n$ to identify the node at time $t_2$ that corresponds to $n$. Several node addressing algorithms have been proposed [1, 2, 6, 11, 18], but because testing them is so difficult, none has been evaluated empirically.
In response to the lack of testing tools for web algorithms, we introduce Dicer, the DOM-Interacting Controlled Experiment Runner. Dicer uses a custom caching proxy server to offer a same-page guarantee, a guarantee that all requests for a given URL return the same response, regardless of server state changes and JavaScript non-determinism. Further, Dicer automatically parallelizes experiments, facilitating large-scale evaluations. Our goal is to make web experiments more accessible, and thereby make thorough testing of web algorithms more widespread. Dicer’s programming model makes it easy for users to reason about input pages and hold them stable over time, and it ensures that automatic parallelization is always possible. This brings controlled web experiments within reach of a wider audience.
In summary, we make the following contributions:
- We develop a novel programming model for running DOM-interacting controlled web experiments.
- We design and implement a proxy server that offers a stricter same-page guarantee than existing proxy servers.
- We design and implement the first JavaScript testing tool that can run DOM-interacting controlled web experiments.
2. GOALS
We identify five core properties that are critical to making a framework capable of running large-scale controlled experiments on real-world webpages. A framework must:
1. parallelize test execution
2. allow DOM interaction
3. run arbitrary JavaScript code
4. run on live pages from URLs (not only local DOMs)
5. offer a same-page guarantee
We address the need for each in turn. First, because our focus is on large-scale experiments, parallelization is key to making our target experiments practical. Second, because we target algorithms that take webpages as inputs, it is crucial to allow algorithms to use the DOM API to access and interact with webpage content. Tools that only test JavaScript functions independently of webpages, such as Node.js testing frameworks, are not suitable for our target domain. Third, a framework should permit the user to test arbitrary JavaScript code. A framework that only allows pass/fail outputs restricts the class of algorithms that can be tested. Fourth, it is important that a framework allow users to run their tests on real pages, not only on locally constructed DOMs. While users can of course download pages from URLs in order to construct their DOMs locally, this process becomes cumbersome when a user wishes to test at multiple different points in time. Last, comparing different web algorithms demands a same-page guarantee. If a user wishes to test one DOM-interacting algorithm against another, it is crucial that both receive the same page as input.
2.1 Motivating Example
To offer robust record and replay for live webpages, a tool must be able to identify the same DOM node in a webpage over time, even if the DOM structure of the page changes between record and replay. We will call this the node addressing problem. Using an XPath from the DOM tree’s root to the target node is too fragile; new wrapper nodes and sibling nodes will break the XPath.Ids would be sufficient if all elements had ids, if those ids stayed the same throughout page redesigns, and if all web designers adhered to the one element per id rule. Unfortunately, none of these conditions is met across the web. Handling real webpages requires much more care, and node addressing algorithms are typically quite complex.
We formalize a node addressing algorithm as a function from a DOM tree $T$ and a node $n \in T$ to an expression $e$, such that $e(T) = n$. Let $T'$ be a different DOM tree, and let $e(T') = n'$. We consider a node addressing algorithm robust if $n'$ is the same node that a human would select if asked to find the original node $n$ in $T'$.
Node addressing algorithms are in use in tools like CoScripter [11], iMacros [3], and Selenium’s Record and Playback [18]. Unfortunately, testing node addressing algorithms is so difficult that to our knowledge, despite the fact that this problem is central to the success of their tools, none of these node addressing algorithms have been tested against other candidate algorithms on large data sets. Each project appears to have settled on an approach that works sufficiently well on its test cases, without empirical validation.
Our motivating example will be an experiment that tests multiple node addressing algorithms against each other. Since we will not ask a human to evaluate $n'$ in our large-scale experiment, we use a simple correctness condition. If algorithm A produces $e$ such that $e(T) = n$ and $e(T') = n'$, we will consider algorithm A correct on page $T'$ if and only if $n'$ on $n$ directs the browser to the same URL as clicking on $n$. Since many nodes will not respond to clicking, we only test on nodes that direct the browser to a new URL. We term these reactive nodes.
<table>
<thead>
<tr>
<th>Method</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>startSession()</td>
<td>Starts a new session.</td>
</tr>
<tr>
<td>endSession()</td>
<td>Ends current session.</td>
</tr>
<tr>
<td>stage(String ip, String it, String ot)</td>
<td>Adds stage to the current session with input program from file ip, input table from file it, which will write to file ot.</td>
</tr>
</tbody>
</table>
Table 1: The Dicer API.
Note that an experiment like this relies on all five of the crucial features identified above.
1. Parallelism: A thorough test demands running this task on many nodes, in order to reveal enough naturally broken addresses to distinguish between approaches, which makes parallel execution highly desirable.
2. DOM interaction: The algorithms must click on nodes.
3. Arbitrary JavaScript: The algorithms cannot be limited to, for instance, pass/fail outputs.
4. Running on live pages: The algorithms should run on the real sites of interest. The pages should change between training and testing.
5. Same-page guarantee: To fairly compare different algorithms, their inputs must be the same.
3. PROGRAMMING MODEL
In this section we introduce the core abstractions that form Dicer’s programming model, describe how our motivating example is implemented with these abstractions, and discuss some design decisions.
3.1 Abstractions
Our programming model is built around a few key abstractions that make it easy to express complicated experiments.
**Session**: a sequence of stages.
**Stage**: a (input table, input program) pair, which produces an output table when passed to Dicer.
**Input Table**: a set of m-field rows.
**Input Program**: a sequence of one or more algorithms.
**Algorithm**: a set of one or more subalgorithms.
**Subalgorithm**: a JavaScript function, accepts $n$ arguments.
**Output Table**: a set of $m$-field rows.
A session is a sequence of stages during which Dicer offers a same-page guarantee. During a session, loading a given URL always loads the same DOM tree. Each stage is defined by its input table and its input program. The first column of the input table contains URLs, indicating the pages on which the input program should be run. All remaining columns contain additional arguments to the input program. An input program may contain multiple algorithms to run for each row in the input table. If an algorithm runs over multiple pages, it must be decomposed into subalgorithms, one to run on each page. Each subalgorithm is a function that accepts $n$ arguments (corresponding to the $n$ items in each input table row). Each row in the input table corresponds to one run of the input program. For each algorithm in the input program, Dicer first directs the browser to the URL in the first column of the row, then runs the algorithm on the loaded page, passing all row cells as arguments to the algorithm. Thus, for a given row, all algorithms are run with the same arguments on fresh, identical copies of the target page. For any given input row, the return values of the different algorithms are concatenated to produce full output rows, which are appended to the output table. Only the final subalgorithm of an algorithm may produce output.
Dicer offers a simple API for interacting with these abstractions, outlined in Table 1.
### 3.2 Motivating Example Experiment
To explain the Dicer programming model more concretely, we show how we can use the framework to implement the node addressing experiment described in Section 2.1. In Dicer, this experiment is split into the four stages depicted in Figure 1 and described below.
**Stage 1** traverses the DOM tree, recording an XPath for each node. The input table has only one column, the URL column. The table contains the list of URLs on which we want to test our node addressing algorithms. The input program is an algorithm that produces an XPath for each node in a document’s DOM tree, and emits an output row for each XPath.
**Stage 2** determines which nodes are reactive. Its input table is the output table from Stage 1. For each XPath in the input table, it clicks on the node at the XPath. (Since Stages 1 and 2 run in a single session, with the same-page guarantee ensuring the same T, even a fragile XPath will serve here.) The input program is one algorithm with two subalgorithms. The first subalgorithm clicks on the node identified by the XPath. If the click loads a new page, the second subalgorithm runs on the new page. If the click does not load a new page, the second subalgorithm runs on the original page. By retrieving the current URL in the second subalgorithm and comparing it with the pre-click URL, we can determine whether the clicked node is reactive. The subalgorithm only produces an output row if the URL has indeed changed. For this stage, the output table again contains one row per XPath, but now all rows that correspond to non-reactive nodes have been removed.
**Stage 3** takes the XPaths of all reactive nodes as input, and runs each node addressing algorithm on each reactive node, producing e expressions as output. The input table has one row for each reactive node. The first column contains the URLs at which those reactive nodes are found. The second column has the XPaths of the nodes. The input program is a set of algorithms, all the node addressing algorithms we want to test. Each of these algorithms has only one subalgorithm, a JavaScript function that takes all input columns, including the XPath, as its arguments. Each algorithm returns an expression e. All the algorithms’ outputs together form an output row. Thus the output table contains a column corresponding to each node addressing algorithm.
**Stage 4** tests the e expressions, so it takes the e expressions as input. If we were to test the algorithms’ outputs — the e expressions — in the same session, our results would be rather boring. If T ′ = T, all algorithms should succeed on T ′. Thus, we test the algorithms’ outputs in a new session, so that pages are allowed to change. This stage runs one algorithm per node addressing algorithm. Each algorithm uses an e expression to find a node on the new page, and then click on it. It produces the new post-click URL as output. The new post-click URL can be compared with the Stage 2 post-click URL to determine whether each algorithm successfully identifies the corresponding node.
To run this experiment with Dicer, a user only needs the JavaScript input programs described above, a list of URLs on which to run the experiment, and the code in Listing 1 which uses the Dicer API to set up the experiment.
### 3.3 The Design of Stage Output
Recall that a single input row may produce any number of output rows. Also recall that we allow subalgorithms. Together these features have the potential to complicate our programming model. We keep the design simple and usable by allowing only the final subalgorithm to produce output.
An alternative approach would allow all subalgorithms to produce output. This is useful for cases in which earlier subalgorithms can access data that later subalgorithms cannot, but it substantially complicates the programming model, because the outputs of different subalgorithms may vary in number, and must somehow be stitched together to produce complete output rows. Fortunately, algorithms that would benefit from the more complicated programming model can be refactored to adhere to our simpler one. For instance, recall that our motivating example needs to compare the URLs before and after clicking on a node. The pre-click URL is available during the first subalgorithm, but not during the later subalgorithm. To complete this task in the simple programming model, it is split into two stages. The first stage stores the original URL, while the second clicks the link and stores the second URL. Because this approach simplifies the user’s experiment design experience, and because splitting such tasks across stages is sufficient to make our approach general, this is the design we have adopted.
We considered allowing all subalgorithms to produce output, but requiring the user’s JavaScript subalgorithm code to associate each slice of an output row with a row ID. All cells in a row would share an ID. We believe that this requirement complicates the programming model, putting a burden even on users with simple tests. We rejected this approach, choosing to optimize for usability in the common case. Another alternative design would require that the number of output rows be the same across subalgorithms, allowing the
---
1Redirects and JavaScript-controlled target URLs make clicking the only reliable way to collect post-click URLs.
framework to use ordering to align slices and produce the full rows. A final option would allow each algorithm to produce only one output row per input row. We decided against these last two designs because of how substantially they diminish the expressiveness of the model.
Since the common case, but provides users enough flexibility to achieve a particular alignment with each other, this design will not be convenient. Experimenters with this use case would be best served by an ID alignment approach like the one described above for subalgorithms. Because of the burden it places on simple experiments, we did not choose an ID alignment model. However, advanced users can leverage order alignment to simulate ID alignment by associating each algorithm output with an ID, then emitting outputs in an ID-defined order. Thus our design optimizes for usability in the common case, but provides users enough flexibility to handle the uncommon alignment case.
4. IMPLEMENTATION
The basic architecture of Dicer, illustrated in Figure 3, revolves around directing a set of worker threads, each of which controls a browser instance. Their web traffic goes through our custom proxy server, which implements a caching scheme that upholds our same-page guarantee.
Source code for Dicer is available at https://github.com/schasins/dicer
4.1 The Dicer Library
We have implemented our framework as a Java library. Users interact with the simple abstractions described in Section 4.1 as in Listing 1. Because Dicer is implemented as a Java library, users can interleave Dicer processing with other Java processing as necessary — for instance to combine the outputs of two stages into an input for another stage.
Dicer uses Selenium 3 to control browser instances. Headless WebKits were not sufficiently robust or reliable for our domain. Prototype implementations of Dicer built on top of PhantomJS 16 and Ghost.py 9 had respectively 0.6% and 12.4% rates of incorrect answers on a simple title extraction benchmark. In contrast, the Selenium implementation yields 0 incorrect answers on the same benchmark.
Dicer automatically parallelizes experiments. It runs a given input program across multiple input table rows in parallel. Also, Dicer can run multiple algorithms from a given input program across each input row in parallel. The Dicer programming model ensures that it always correct to parallelize these components of an experiment. Dicer uses a shared queue to distribute tasks across worker threads.
4.2 The Dicer Proxy Server
Enforcing our same-page guarantee requires controlling for both non-determinism in the server and non-determinism in the pages’ own JavaScript code.
4.2.1 Server-Level Non-Determinism
Dicer uses a caching proxy server to enforce the same-page guarantee. It makes a new cache whenever a new session is created and serves all recurring URLs from that cache until the end of the session. All URLs, including URLs loaded directly by Dicer, URLs loaded within iframes, and URLs loaded via AJAX go through the proxy server.
Despite the preponderance of existing caching proxy servers, none proved sufficiently configurable to meet Dicer’s needs. Squid cache 19 can be configured to ignore some web cache policy parameters (e.g. ‘no-cache,’ ‘must-revalidate,’ and ‘expiration’), but not others (e.g. ‘Vary’). Thus, Squid will never cache any page with “Vary: *” in the header. Apache Traffic Server 5 provides even less cache configuration control. Other proxy servers can be configured to ignore all caching policy parameters, but are too fragile to be useful for large-scale experiments. Some can only handle HTTPS traffic with restrictions. For instance, Polipo 7 can be configured appropriately, but it is not very stable, and cannot decrypt or modify HTTPS traffic.
To meet our framework’s demands, we implemented a custom caching proxy server. Dicer directs all request-response traffic through our caching proxy server, and the caching proxy server in turn directs all request-response traffic through an SSL-stripping proxy server 13, as illustrated in Figure 3. The SSL-stripping layer allows our proxy server to handle HTTPS traffic. It decrypts responses from the original servers and forwards the plain text to the caching proxy server, as if they are HTTP responses. Thus, the proxy server can freely modify the content of any response, even an HTTPS response. This is critical to mitigating JavaScript non-determinism, as discussed in Section 4.2.2.
Our custom proxy server stores every response into its cache, ignoring all web cache policy parameters in the header. When a framework browser instance requests a given URL, it always elicits the same response from the server. The only exception to the permanent caching rule addresses cyclic redirects. Some pages use a cyclic redirect process, redirecting a request for URL X to URL Y (with a ‘no-cache’ policy), and redirecting a request for Y to X, until eventually the originally requested X is ready, and the X response is no longer a redirect. At this point, the response for X contains the final page content that our cache should as-
associate with URL X. In this scenario, if neither X’s nor Y’s associated response can be altered, our system will loop forever. Our server addresses this issue by maintaining a redirect table and running a cycle check before recording a redirect response. If the addition of the redirect response would cause such a cycle, the server removes the pre-existing cached response that produces the cycle.
4.2.2 JavaScript-Level Non-Determinism
Ensuring server-level determinism is not sufficient to offer a same-page guarantee. Even if a given URL always retrieves the same response, the page’s own JavaScript is a source of non-determinism. Non-deterministic JavaScript may itself modify the page, or may make new requests to the server.
The Mugshot project [14] identifies `math.random` and the JavaScript `Date` class as the two JavaScript functions that introduce non-determinism. Since their work, Navigation Timing has been introduced and become prevalent, and is now another substantial source of JavaScript non-determinism. To prevent these sources of non-determinism from undermining our same-page guarantee, our proxy server inserts a script into all HTML responses. This script replaces `math.random` with a deterministic, explicitly seeded algorithm. During a given session, all pages receive the same seed. The script also replaces the `Date` constructor, which returns the current date, with a constructor that returns the date at the beginning of the current session. Finally, the script replaces `window.performance` with an empty dictionary to handle variations in Navigation Timing.
Cookies are also a potential source of JavaScript-level non-determinism, since they can change during an experiment. Thus Dicer turns off cookie storage for its browser instances.
Note that these are not the only sources of non-determinism in the browser. For instance, the browser’s scheduler is non-deterministic. If multiple requests are outstanding at once, or if a page’s JavaScript uses the `setTimeout` function, scheduler non-determinism may cause the page to diverge. However we find these ordering-based sources of non-determinism have little effect in practice. (See Section 5.1 for evaluation details.)
5. EVALUATION
In this section we evaluate how well Dicer upholds the same-page guarantee, as well as the effect of the same-page guarantee on framework execution time.
5.1 Same-Page Guarantee
To assess our framework’s same-page guarantee, we use a data set of more than 200,000 DOM nodes — specifically, all nodes in the Alexa top 100 webpages. We load each webpage twice in a single Dicer session and compare DOMs. We observe the percentage of unmatched nodes with no non-determinism control in place, with only server-level non-determinism control in place, and with both server- and JavaScript-level non-determinism control in place. We consider a node matched if both its position in the DOM tree and its text content is the same across loads.
Table 2 displays our results. Note that the failure to sufficiently control for non-determinism may affect our results in two ways. It may mean that a page times out in one load but not another, or it may mean that a page is loaded both times, but with different content. In Table 2, the “Pre-Filtering” row reflects the percentage of unmatched nodes, including nodes that are considered to be unmatched because a page times out during only one of its loads. The “Post-Filtering” row reflects the percentage of unmatched nodes after the removal of nodes from pages that timed out in one of the loads. That is, the set of nodes is first restricted to the nodes from pages that successfully loaded both times. We then identify unmatched nodes in that restricted set.
When we control for both server- and JavaScript-level non-determinism, as described in Section 5.1, only 0.1% of nodes were unmatched. Also note that when both server- and JavaScript-level non-determinism are controlled, non-deterministic timeouts are eliminated. We conclude that our approach to non-determinism control is sufficient for our target domain. While we could obtain higher determinism guarantees by building a custom browser with a deterministic scheduler, this approach would sacrifice the performance and external validity advantages of using a real production browser. We leave heavyweight techniques with stronger determinism guarantees for future work.
5.2 Performance Impact
To assess the overhead associated with upholding the same-page guarantee, we compare loading time with and without our caching proxy server in place. We consider both the first loading time, at which point the proxy server must retrieve and store all responses, and the second loading time, at which point it serves responses from the cache. We compare the execution times of a simple title extraction benchmark on Alexa’s top 1,000 webpages [4].
Figure 4 reveals that the first load with the same-page guarantee exhibits a 31.1% slowdown over loads without the same-page guarantee. This is the overhead associated with modifying and saving the retrieved pages, a cost incurred for the first load of any URL. In contrast, the second load with the same-page guarantee exhibits a 19.4% speedup over loads without the same-page guarantee. This reflects the performance benefits of caching.
Since the performance impact on first loads is manageable, and the performance of all future loads is better with the same-page guarantee, we conclude that our framework’s
execution times are well within the acceptable range. In light of the fact that our proxy server has not yet been optimized for performance, we are satisfied that the cost of upholding the same-page guarantee is low.
5.3 Motivating Example
We used Dicer to test five node addressing algorithms on a dataset of 25,000 nodes. The results determined the node addressing approach of a record and replay tool, Ringer [2].
6. RELATED WORK
Of the many existing tools for running JavaScript tests, almost all are targeted towards web developers who want to test their own pages, or even just their JavaScript. We discuss the main subcategories of this class of tools.
We start with tools targeted towards web developers running unit tests. Jasmine [8] is one of the most prevalent tools. While its ease of use makes it an excellent tool for small-scale experiments, it lacks many of the characteristics we desire for large-scale, general purpose web experiments. First, its parallelization mechanism is quite limited. Second, it uses a restrictive programming model, tailored to offer pass/fail responses for each test. Third, it only runs on locally constructed DOMs. The same restrictions make projects such as QUnit [17], Mocha [15], and YUI Test [12] unsuitable for large-scale experiments. In fact, QUnit, Mocha, and YUI Test do not offer even the limited parallelization that Jasmine provides.
Some tools, like Vows [10], offer parallelization, but are aimed only at testing JavaScript. These typically run on Node.js, which eliminates any DOM-interactive code from their domains, and naturally any URL-loaded pages.
Finally we consider web automation tools, program-controllable browsers like Ghost.js [9], PhantomJS [16], and Selenium [3]. None of these is explicitly a testing framework, so they do not offer convenient programming models for large-scale experiments. Also, Ghost.js and PhantomJS have no built-in parallelization. There is a variation on Selenium, Selenium Grid [20], that does offer parallelization. However, it is tailored for users who want to run the same small tests on multiple browsers and on multiple operating systems, usually to test a site’s browser compatibility, rather than for users with many distinct results to collect as part of a large-scale experiment. Ghost.js, PhantomJS, Selenium, and other web automation tools like them, do all offer DOM interaction, the ability to run arbitrary JavaScript code, and the ability to load pages from URLs. However, in the case of headless WebKits, we found that their limited robustness reduced the ability to run arbitrary code.
All of the tools above lack a same-page guarantee. Ultimately most tools, being targeted towards developers, are intended for users who know their test pages will stay the same, or know how they will change. This makes them generally unsuitable for broader web experiments.
7. CONCLUSION
To this point, no JavaScript testing framework has been targeted at helping users test their web algorithms in controlled experiments. This paper has presented a testing framework for running large-scale, DOM-interacting controlled web experiments. It handles test parallelization, and permits users to run arbitrary DOM-interacting code on real webpages. Most importantly, it is the first framework to offer a same-page guarantee. This guarantee allows users to control for the fact that webpages change over time, in order to conduct fair experiments. Further, our framework’s caching proxy server offers a better same-page guarantee than any existing proxy server. We believe that our framework represents an important step in web algorithm testing, bringing controlled web experiments within reach of a wider population. As the need for web-processing algorithms grows steadily greater, tools like this framework will help programmers handle the rapid and constant evolution of their webpage inputs, enabling them to build robust, empirically validated web algorithms.
8. ACKNOWLEDGEMENTS
This work is supported in part by NSF Grants CCF-1018729, CCF-1139138, CCF-1337415, and CCF-0916351, NSF Graduate Research Fellowship DGE-1106400, a grant from DOE FOA-0000619, a grant from DARPA FA8750-14-C-0011, and gifts from Mozilla, Nokia, Intel and Google. Additional thanks to John Kubiatowicz and Anthony Joseph.
9. REFERENCES
[16] PhantomJS. PhantomJS | PhantomJS.
|
{"Source-Url": "http://www.www2015.it/documents/proceedings/companion/p1321.pdf", "len_cl100k_base": 6592, "olmocr-version": "0.1.50", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 21236, "total-output-tokens": 7455, "length": "2e12", "weborganizer": {"__label__adult": 0.00024271011352539065, "__label__art_design": 0.0002295970916748047, "__label__crime_law": 0.00019121170043945312, "__label__education_jobs": 0.0005121231079101562, "__label__entertainment": 5.877017974853515e-05, "__label__fashion_beauty": 0.00010228157043457033, "__label__finance_business": 0.00011855363845825197, "__label__food_dining": 0.000213623046875, "__label__games": 0.0003390312194824219, "__label__hardware": 0.0005359649658203125, "__label__health": 0.000247955322265625, "__label__history": 0.00014030933380126953, "__label__home_hobbies": 5.6743621826171875e-05, "__label__industrial": 0.00020313262939453125, "__label__literature": 0.00015366077423095703, "__label__politics": 0.00013446807861328125, "__label__religion": 0.00026988983154296875, "__label__science_tech": 0.00826263427734375, "__label__social_life": 7.617473602294922e-05, "__label__software": 0.00788116455078125, "__label__software_dev": 0.9794921875, "__label__sports_fitness": 0.0002038478851318359, "__label__transportation": 0.0002582073211669922, "__label__travel": 0.00015246868133544922}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33781, 0.02142]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33781, 0.49981]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33781, 0.88336]], "google_gemma-3-12b-it_contains_pii": [[0, 4817, false], [4817, 11607, null], [11607, 17076, null], [17076, 22216, null], [22216, 27710, null], [27710, 33781, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4817, true], [4817, 11607, null], [11607, 17076, null], [17076, 22216, null], [22216, 27710, null], [27710, 33781, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33781, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33781, null]], "pdf_page_numbers": [[0, 4817, 1], [4817, 11607, 2], [11607, 17076, 3], [17076, 22216, 4], [22216, 27710, 5], [27710, 33781, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33781, 0.03546]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
846cb0b0b2e9f04c87add96be009882725556414
|
Notes on the BLCOP Package
Francisco Gochez, Mango Solutions
February 4, 2015
1 Introduction
The BLCOP package is an implementation of the Black-Litterman and copula opinion pooling frameworks. This vignette gives an overview of these two opinion-blending methods, briefly shows how they are implemented in this package, and closes with a short discussion of how the package may evolve in the future (any feedback would be greatly appreciated).
2 Overview of the Black-Litterman model
The Black-Litterman model was devised in 1992 by Fisher Black and Robert Litterman. Their goal was to create a systematic method of specifying and then incorporating analyst/portfolio manager views into the estimation of market parameters. Let \( A = \{a_1, a_2, \ldots, a_n\} \) be a set of random variables representing the returns of \( n \) assets. In the BL approach, the joint distribution of \( A \) is taken to be multivariate normal, i.e. \( A \sim N(\mu, \Sigma) \). The problem they then addressed was that of incorporating an analyst’s views into the estimation of the market mean \( \mu \)\(^1\). Suppose that we take \( \mu \) itself to be a random variable which is itself normally distributed, and moreover that its dispersion is proportional to that of the market. Then
\[
\mu \sim N(\pi, \tau \Sigma),
\]
and \( \pi \) is some underlying parameter which can be determined by the analyst using some established procedure. Black and Litterman argued from equilibrium considerations that this should be obtained from the intercepts of the capital-asset pricing model.
Next, the analyst forms subjective views on the actual mean of the returns for the holding period. This is the part of the model that allows the analyst/portfolio manager to include his or her views. BL proposed that views should be made on linear combinations (i.e. portfolios) of the asset return variable means \( \mu \). Each view would take the form of a “mean plus error”. Thus for example, a typical view would look as follows:
\[
p_{i1}\mu_1 + p_{i2}\mu_2 + \cdots + p_{in}\mu_n = q_i + \epsilon_i,
\]
where \( \epsilon_i \sim N(0, \sigma_i^2) \). The standard deviations \( \sigma_i^2 \) of each view could be taken as controlling the confidence in each view. Collecting these views into a matrix we will call the “pick” matrix, we obtain the “general” view specification
\[
P\mu \sim N(\mu, \Omega).
\]
\( \Omega \) is the diagonal matrix \( \text{diag}(\sigma_1^2, \sigma_2^2, \ldots, \sigma_n^2) \). It can be shown (c.f. [Me08], p.5 and appendix), based on Bayes’ Law, that the posterior distribution of the market mean conditional on these views is
\[
\mu_{|q,\Omega} \sim N(\mu_{BL}, \Sigma_{BL})
\]
where
\[
\mu_{BL} = ((\tau \Sigma)^{-1} + P^T \Omega^{-1} P)^{-1} \left((\tau \Sigma)^{-1} \pi + P^T \Omega^{-1} q\right)
\]
\[
\Sigma_{BL} = ((\tau \Sigma)^{-1} + P^T \Omega^{-1} P)^{-1}
\]
We can then obtain the posterior distribution of the market by taking \( A_{|q,\Omega} = \mu_{|q,\Omega} + Z \), and \( Z \sim N(0, \Sigma) \) is independent of \( \mu \). One then obtains that \( E[A] = \mu_{BL} \) and \( \Sigma_{BL} = \Sigma + \Sigma_{BL}^0 \) ([Me08], p. 5).
Let us now see how these ideas are implemented in the BLCOP package.
\(^1\)A. Meucci has reformulated the model in terms of forming views directly on market realization rather than the mean, and in my opinion this formulation is considerably clearer. See [Me08]
3 Using the Black-Litterman model in BLCOP
The implementation of the Black-Litterman model in BLCOP is based on objects that represent views on the market and objects that represent the posterior distribution of the market after blending the views. We will illustrate this with a simple example. Suppose that an analyst wishes to form views on 6 stocks, 2 of which are technology stocks and the other 4 of which are from the financial sector. Initially, she believes that the average of the 2 tech stocks will outperform one of the financial stocks, say $\frac{1}{2}(\text{DELL} + \text{IBM}) - \text{MS} \sim N(0.06, 0.01)$. We will create a BLViews class object with the BLViews constructor function. Its arguments are the “pick” matrix, a vector of confidences, the vector “q”, and the the names of the assets in one’s “universe”. Please note that the following examples may require the suggested fPortfolio and mnormt packages.
```r
> pickMatrix <- matrix(c(1/2, -1, 1/2, rep(0, 3)), nrow = 1, ncol = 6)
> views <- BLViews(P = pickMatrix, q = 0.06, confidences = 100,
+ assetNames = colnames(monthlyReturns))
> views
```
1 : 0.5*IBM+-1*MS+0.5*DELL=0.06 + eps. Confidence: 100
Next, we need to determine the “prior” distribution of these assets. The analyst may for instance decide to set these means to 0, and then calculate the variance-covariance matrix of these through some standard estimation procedure (e.g. exponentially weighted moving average). Here we use cov.mve from the MASS package.
```r
> priorMeans <- rep(0, 6)
> priorVarcov <- cov.mve(monthlyReturns)$cov
```
We can now calculate the posterior market distribution using the posteriorEst. This takes as parameters the view object, the prior covariance and mean, and “tau”\(^2\). The procedure for setting \(\tau\) is the subject of some controversy in the literature, but here we shall set it to 1/2.
```r
> marketPosterior <- posteriorEst(views = views, sigma = priorVarcov,
+ mu = priorMeans, tau = 1/2)
```
Prior means:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
</tbody>
</table>
Posterior means:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0010355484</td>
<td>-0.0100833606</td>
<td>0.0123780212</td>
<td>-0.0033628579</td>
<td>-0.0041899317</td>
<td></td>
</tr>
</tbody>
</table>
>-0.0007841554
Posterior covariance:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>IBM</td>
<td>0.012526216</td>
<td>0.010726825</td>
<td>0.010231239</td>
<td>0.007619611</td>
<td>0.008808647</td>
</tr>
<tr>
<td>MS</td>
<td>0.010726825</td>
<td>0.016247958</td>
<td>0.009073655</td>
<td>0.008678542</td>
<td>0.011413792</td>
</tr>
<tr>
<td>DELL</td>
<td>0.010231239</td>
<td>0.009073655</td>
<td>0.023500595</td>
<td>0.005503472</td>
<td>0.008743611</td>
</tr>
<tr>
<td>C</td>
<td>0.007619611</td>
<td>0.008678542</td>
<td>0.005503472</td>
<td>0.00844480</td>
<td>0.008360002</td>
</tr>
<tr>
<td>JPM</td>
<td>0.008808647</td>
<td>0.011413792</td>
<td>0.008743611</td>
<td>0.008360002</td>
<td>0.016860110</td>
</tr>
<tr>
<td>BAC</td>
<td>0.002739933</td>
<td>0.002525494</td>
<td>0.001327366</td>
<td>0.003770900</td>
<td>0.005260369</td>
</tr>
</tbody>
</table>
Now suppose that we wish to add another view, this time on the average of the four financial stocks. This can be done conveniently with addBLViews as in the following example:
\(^2\)An additional parameter called kappa will be discussed shortly
> finViews <- matrix(ncol = 4, nrow = 1, dimnames = list(NULL, c("C", "JPM", "BAC", "MS"))
> finViews[, 1:4] <- rep(1/4, 4)
> views <- addBLViews(finViews, 0.15, 90, views)
> views
1 : 0.5*IBM+1*MS+0.5*DELL=0.06 + eps. Confidence: 100
2 : 0.25*MS+0.25*C+0.25*JPM+0.25*BAC=0.15 + eps. Confidence: 90
We will now recompute the posterior, but this time using the capital asset pricing model to compute the “prior” means. Rather than manually computing these, it is convenient to use the BLPosterior wrapper function. It will compute these “alphas”, as well as the variance-covariance matrix of a returns series, and will then call posteriorEst automatically.
> marketPosterior <- BLPosterior(as.matrix(monthlyReturns), views, tau = 1/2,
+ marketIndex = as.matrix(sp500Returns), riskFree = as.matrix(US13wTB))
Prior means:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0208</td>
<td>0.0595</td>
<td>0.0170</td>
<td>0.0144</td>
<td>0.0273</td>
<td>0.0028</td>
</tr>
</tbody>
</table>
Posterior means:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0634</td>
<td>0.0719</td>
<td>0.0778</td>
<td>0.0403</td>
<td>0.0688</td>
<td>0.0259</td>
</tr>
</tbody>
</table>
Posterior covariance:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0213</td>
<td>0.0105</td>
<td>0.0125</td>
<td>0.0085</td>
<td>0.0052</td>
<td>0.0053</td>
</tr>
</tbody>
</table>
Both BLPosterior and posteriorEst have a kappa parameter which may be used to replace the matrix Ω of confidences in the posterior calculation. If it is greater than 0, then Ω is set to κPΣP rather than diag(σ^2_1, σ^2_2, ..., σ^2_n). This choice of Ω is suggested by several authors, and it leads to the confidences being determined by volatilities of the asset returns.
A user may also be interested in comparing allocations that are optimal under the prior and posterior distributions. The fPortfolio package of the Rmetrics project ([RmCTWu09]), for example, has a rich set of functionality available for portfolio optimization. The helper function optimalPortfolios.fPort was created to wrap these functions for exploratory purposes.
> optPorts <- optimalPortfolios.fPort(marketPosterior, optimizer = "tangencyPortfolio")
$optPorts
Title:
MV Tangency Portfolio
Estimator: getPriorEstim
Solver: solveRquadprog
Optimize: minRisk
Constraints: LongOnly
Portfolio Weights:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0765</td>
<td>0.9235</td>
<td>0.0000</td>
<td>0.0000</td>
<td>0.0000</td>
<td>0.0000</td>
</tr>
</tbody>
</table>
Covariance Risk Budgets:
IBM MS DELL C JPM BAC
Target Returns and Risks:
mean mu Cov Sigma CVaR Var
0.0000 0.0566 0.1460 0.0000 0.0000
Description:
Wed Feb 04 08:20:55 2015 by user: Administrator
$posteriorOptimPortfolio
Title:
MV Tangency Portfolio
Estimator: getPosteriorEstim
Solver: solveRquadprog
Optimize: minRisk
Constraints: LongOnly
Portfolio Weights:
IBM MS DELL C JPM BAC
0.3633 0.1966 0.1622 0.0000 0.2779 0.0000
Covariance Risk Budgets:
IBM MS DELL C JPM BAC
Target Returns and Risks:
mean mu Cov Sigma CVaR Var
0.0000 0.0689 0.1268 0.0000 0.0000
Description:
Wed Feb 04 08:20:55 2015 by user: Administrator
attr(,"class")
[1] "BLOptimPortfolios"
> par(mfcol = c(2, 1))
$mfcoll
[1] 1 1
> weightsPie(optPorts$priorOptimPortfolio)
IBM MS
0.0764997 0.9235003
> weightsPie(optPorts$posteriorOptimPortfolio)
IBM MS DELL JPM
0.3632847 0.1966260 0.1622156 0.2778737
Additional parameters may be passed into function to control the optimization process. Users are referred to the `fPortfolio` package documentation for details.
```r
> optPorts2 <- optimalPortfolios.fPort(marketPosterior,
+ constraints = "minW[1:6]=0.1", optimizer = "minriskPortfolio")
> optPorts2
$priorOptimPortfolio
Title:
MV Minimum Risk Portfolio
Estimator: getPriorEstim
Solver: solveRquadprog
Optimize: minRisk
Constraints: minW
Portfolio Weights:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.1137</td>
<td>0.1000</td>
<td>0.1000</td>
<td>0.1098</td>
<td>0.1000</td>
<td>0.4764</td>
</tr>
</tbody>
</table>
Covariance Risk Budgets:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>+7.6 %</td>
<td>+92.4 %</td>
<td>+36.3 %</td>
<td>+19.7 %</td>
<td>+16.2 %</td>
<td>+27.8 %</td>
</tr>
</tbody>
</table>
Target Returns and Risks:
<table>
<thead>
<tr>
<th>mean</th>
<th>mu</th>
<th>Cov</th>
<th>Sigma</th>
<th>CVaR</th>
<th>VaR</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0000</td>
<td>0.0157</td>
<td>0.0864</td>
<td>0.0000</td>
<td>0.0000</td>
<td>0.0000</td>
</tr>
</tbody>
</table>
Description:
Wed Feb 04 08:20:55 2015 by user: Administrator
$\text{posteriorOptimPortfolio}$
Title:
MV Minimum Risk Portfolio
Estimator: getPosteriorEstim
Solver: solveRquadprog
Optimize: minRisk
Constraints: minW
Portfolio Weights:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.1000</td>
<td>0.1000</td>
<td>0.1000</td>
<td>0.1326</td>
<td>0.1000</td>
<td>0.4674</td>
</tr>
</tbody>
</table>
Covariance Risk Budgets:
<table>
<thead>
<tr>
<th>IBM</th>
<th>MS</th>
<th>DELL</th>
<th>C</th>
<th>JPM</th>
<th>BAC</th>
</tr>
</thead>
</table>
Target Returns and Risks:
<table>
<thead>
<tr>
<th>mean</th>
<th>mu</th>
<th>Cov</th>
<th>Sigma</th>
<th>CVaR</th>
<th>VaR</th>
</tr>
</thead>
<tbody>
<tr>
<td>0.0000</td>
<td>0.0457</td>
<td>0.1008</td>
<td>0.0000</td>
<td>0.0000</td>
<td>0.0000</td>
</tr>
</tbody>
</table>
Description:
Wed Feb 04 08:20:55 2015 by user: Administrator
attr(\"class\")
[1] "BLOptimPortfolios"
Finally, density plots of marginal prior and posterior distributions can be generated with \texttt{densityPlots}. As we will see in the next section, this gives more interesting results when used with copula opinion pooling.
\[ \text{densityPlots(marketPosterior, assetsSel = "JPM")} \]
4 Overview of Copula Opinion Pooling
Copula opinion pooling is an alternative way to blend analyst views on market distributions that was developed by Attilio Meucci towards the end of 2005. It is similar to the Black-Litterman model in that it also uses a “pick” matrix to formulate views. However it has several advantages including the following:
- Views are made on realizations of the market, not on market parameters as in the original formulation of BL
- The joint distribution of the market can be any multivariate distribution
- Views are not restricted to the normal distribution
- The parameters in the model have clearer meanings
- The model can easily be generalized to incorporate the views of multiple analysts
Nevertheless, all of this comes at a price. We can no longer use closed-form expressions for calculating the posterior distribution of the market and hence must rely on simulation instead. Before proceeding to the implementation however let us look at the theory. Readers are referred to [Me05] for a more detailed discussion.
As before, suppose that we have a set of $n$ assets whose returns are represented by a set of random variables $A = \{a_1, a_2, ..., a_n\}$. As in Black-Litterman, we suppose that $A$ has some prior joint distribution
whose c.d.f we will denote by $\Phi_A$. Denote the marginals of this distribution by $\phi_i$. An analyst forms his views on linear combinations of future realizations of the values of $A$ by assigning subjective probability distributions to these linear combinations. That is we form views of the form $p_1a_1 + p_2a_2 + \ldots + p_na_n \sim \theta_i$, where $\theta_i$ is some distribution. Denote the pick matrix formed by all of these views by $P$ once again.
Now, since we have assigned some prior distribution $\Phi_A$ to these assets, it follows that actually the product $V = PA$ inherits a distribution as well, say $v_i = p_1a_1 + p_2a_2 + \ldots + p_na_n \sim \theta_i'$. In general $\theta_i \neq \theta_i'$ unless one’s views are identical to the market prior. Thus we must somehow resolve this contradiction. A straightforward way of doing this is to take the weighted sum of the two marginal c.d.fs, so i.e. $\hat{\theta}_i = \tau_i\theta_i + (1 - \tau_i)\theta_i'$, and $\tau_i \in [0,1]$ is a parameter representing our confidence in our subjective views. This is the actual marginal distribution that will be used to determine the market posterior.
The market posterior is actually determined by setting the marginals of distributions of $V$ to $\hat{\theta}_i$, while using a copula to keep the dependence structure of $V$ intact. Let $V = (v_1, v_2, \ldots, v_k)$, where $k$ is the number of views that the analyst has formed. Then $v_i \sim \theta_i'$. Let $C$ be the copula of $V$ so that $C$ is the joint distribution of $(\theta_1'(v_1), \theta_2'(v_2), \ldots, \theta_k'(v_k)) = (C_1, C_2, \ldots, C_k)$ if we now take the $\theta_i'$ to be cumulative density functions. Next set $\hat{V}$ as the random variable with the joint distribution $(\theta_1^{-1}(C_1), \theta_2^{-1}(C_2), \ldots, \theta_k^{-1}(C_k))$. The posterior market distribution is obtained by rotating $\hat{V}$ back into market coordinates using the orthogonal complement of $P$. See [Me05], pp. 5 for details.
5 COP in BLCOP
Let us now work through a brief example to see how these ideas are implemented in the BLCOP package. First, one again works with objects that hold the view specification, which in the COP case are of class COPViews. These can again be created with a constructor function of the same name. However a significant difference is the use of mvdistribution and distribution class objects to specify the prior distribution and view distributions respectively. We will show the use of these in the following example, which is based on the example used in [Me05], p.9. Suppose that we wish to invest in 4 market indices (S&P500, FTSE, CAC and DAX). Meucci suggests a multivariate Student-t distribution with $\nu = 5$ degrees of freedom and dispersion matrix given by:
$$
\begin{pmatrix}
0.376 & 0.253 & 0.333 & 0.397 \\
0.360 & 0.360 & 0.396 & \\
0.600 & 0.578 & \\
0.775 &
\end{pmatrix}
$$
He then sets $\mu = \delta\Sigma w_{eq}$ where $w_{eq}$ is the relative capitalization of the 4 indices and $\delta = 2.5$. For simplicity we will simply take $w_{eq} = (1/4, 1/4, 1/4, 1/4)$.
\[
\begin{align*}
\text{dispersion} & <- c(.376,.253,.360,.333,.360,.600,.397,.396,.578,.775) / 1000 \\
\text{sigma} & <- \text{BLCOP}::\text{symmetricMatrix}(\text{dispersion, dim} = 4) \\
\text{caps} & <- \text{rep}(1/4, 4) \\
\text{mu} & <- 2.5 * \text{sigma} \times \% \times \text{caps} \\
\text{dim(mu)} & <- \text{NULL} \\
\text{marketDistribution} & <- \text{mvdistribution("mt", mean = mu, S = sigma, df = 5)} \\
\text{class(marketDistribution)} & <- \text{mvdistribution("mt", mean = mu, S = sigma, df = 5)}
\end{align*}
\]
[1] "mvdistribution"
attr(,"package")
[1] "BLCOP"
The class `mvdistribution` works with R multivariate probability distribution “suffixes”. `mt` is the R “name”/“suffix” of the multivariate Student-t as found in the package `mnormt`. That is, the sampling function is given by `rmt`, the density by `dmt`, and so on. The other parameters are those required by the these functions to fully parametrize the multivariate Student-t. The `distribution` class works with univariate distributions in a similar way and is used to create the view distributions. We continue with the above example by creating a single view on the DAX.
```r
> pick <- matrix(0, ncol = 4, nrow = 1,
+ dimnames = list(NULL, c("SP", "FTSE", "CAC", "DAX")))
> pick[1,"DAX"] <- 1
> viewDist <- list(distribution("unif", min = -0.02, max = 0))
> views <- COPViews(pick, viewDist = viewDist, confidences = 0.2,
+ assetNames = c("SP", "FTSE", "CAC", "DAX"))
```
As can be seen, the view distributions are given as a list of `distribution` class objects, and the confidences set the \(\tau\)'s described previously. Here we have assigned a \(U(-0.02, 0)\) distribution to our view with confidence 0.2. Additional views can be added with `addCOPViews`.
```r
> newPick <- matrix(0, 1, 2)
> dimnames(newPick) <- list(NULL, c("SP", "FTSE"))
> newPick[1,] <- c(1, -1) # add a relative view
> views <- addCOPViews(newPick,
+ list(distribution("norm", mean = 0.05, sd = 0.02)), 0.5, views)
```
The posterior is calculated with `COPPosterior`, and the updated marginal distributions can be visualized with `densityPlots` once again. The calculation is performed by simulation, based on the ideas described in [Me06]. The simulations of the posterior distribution are stored in the `posteriorSims` of the class `COPResult` that is returned by `COPPosterior`.
```r
> marketPosterior <- COPPosterior(marketDistribution, views, numSimulations = 50000)
> densityPlots(marketPosterior, assetsSel = 4)
```
6 Future developments
While mostly stable, the code is currently in need of some minor cleanup work and refactoring (e.g. pick matrices are referred to as \( P \) in some places and \( \text{pick} \) in others) as well as improvements in the documentation and examples. Attilio Meucci has also very recently proposed an even more general view-blending method which he calls Entropy Pooling and its inclusion would be another obvious extension of this package’s functionality in the longer term.
References
[RmCTWu09] Rmetrics Core Team, Wuertz, D. The \texttt{fPortfolio} package. September 2009, Available at cran.r-project.org
|
{"Source-Url": "http://cran.ms.unimelb.edu.au/web/packages/BLCOP/vignettes/BLCOP.pdf", "len_cl100k_base": 6567, "olmocr-version": "0.1.49", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 27795, "total-output-tokens": 7342, "length": "2e12", "weborganizer": {"__label__adult": 0.0006594657897949219, "__label__art_design": 0.0015087127685546875, "__label__crime_law": 0.0011053085327148438, "__label__education_jobs": 0.00403594970703125, "__label__entertainment": 0.00025773048400878906, "__label__fashion_beauty": 0.0004427433013916016, "__label__finance_business": 0.06268310546875, "__label__food_dining": 0.0008144378662109375, "__label__games": 0.0013761520385742188, "__label__hardware": 0.0012073516845703125, "__label__health": 0.0017957687377929688, "__label__history": 0.0006957054138183594, "__label__home_hobbies": 0.0006146430969238281, "__label__industrial": 0.003063201904296875, "__label__literature": 0.0004608631134033203, "__label__politics": 0.0010747909545898438, "__label__religion": 0.0007386207580566406, "__label__science_tech": 0.355224609375, "__label__social_life": 0.0004301071166992187, "__label__software": 0.07147216796875, "__label__software_dev": 0.487548828125, "__label__sports_fitness": 0.000820159912109375, "__label__transportation": 0.0013561248779296875, "__label__travel": 0.0006661415100097656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 20040, 0.08492]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 20040, 0.24466]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 20040, 0.7912]], "google_gemma-3-12b-it_contains_pii": [[0, 80, false], [80, 3434, null], [3434, 6628, null], [6628, 9005, null], [9005, 9911, null], [9911, 10664, null], [10664, 11865, null], [11865, 13140, null], [13140, 16829, null], [16829, 18739, null], [18739, 19870, null], [19870, 20040, null]], "google_gemma-3-12b-it_is_public_document": [[0, 80, true], [80, 3434, null], [3434, 6628, null], [6628, 9005, null], [9005, 9911, null], [9911, 10664, null], [10664, 11865, null], [11865, 13140, null], [13140, 16829, null], [16829, 18739, null], [18739, 19870, null], [19870, 20040, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 20040, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 20040, null]], "pdf_page_numbers": [[0, 80, 1], [80, 3434, 2], [3434, 6628, 3], [6628, 9005, 4], [9005, 9911, 5], [9911, 10664, 6], [10664, 11865, 7], [11865, 13140, 8], [13140, 16829, 9], [16829, 18739, 10], [18739, 19870, 11], [19870, 20040, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 20040, 0.16602]]}
|
olmocr_science_pdfs
|
2024-11-25
|
2024-11-25
|
bc638a681f2a1816608063003a8d9138d7d4ae6d
|
Package ‘RSQLite’
January 21, 2024
Title SQLite Interface for R
Version 2.3.5
Date 2024-01-20
Description Embeds the SQLite database engine in R and provides an interface compliant with the DBI package. The source for the SQLite engine and for various extensions in a recent version is included. System libraries will never be consulted because this package relies on static linking for the plugins it includes; this also ensures a consistent experience across all installations.
License LGPL (>= 2.1)
BugReports https://github.com/r-dbi/RSQLite/issues
Depends R (>= 3.1.0)
Imports bit64, blob (>= 1.2.0), DBI (>= 1.2.0), memoise, methods, pkgconfig, rlang
Suggests callr, DBItest (>= 1.8.0), gert, gh, hms, knitr, magrittr, markdown, rvest, testthat (>= 3.0.0), withr, xml2
LinkingTo plogr (>= 0.2.0), cpp11 (>= 0.4.0)
VignetteBuilder knitr
Config/autostyle/scope line_breaks
Config/autostyle/strict false
Config/testthat/edition 3
Encoding UTF-8
RoxygenNote 7.3.0
Collate 'SQLiteConnection.R' 'SQLKeywords_SQLiteConnection.R'
'SQLiteDriver.R' 'SQLite.R' 'SQLiteResult.R' 'coerce.R'
'compatRowNames.R' 'copy.R' 'cpp11.R' 'datasetsDb.R'
'dbAppendTable_SQLiteConnection.R' 'dbBeginTransaction.R'
'dbBegin_SQLiteConnection.R' 'dbBind_SQLiteResult.R'
'dbClearResult_SQLiteResult.R' 'dbColumnInfo_SQLiteResult.R'
1
Author Kirill Müller [aut, cre] (<https://orcid.org/0000-0002-1416-3412>),
Hadley Wickham [aut],
David A. James [aut],
Seth Falcon [aut],
D. Richard Hipp [ctb] (for the included SQLite sources),
Dan Kennedy [ctb] (for the included SQLite sources),
Joe Mistachkin [ctb] (for the included SQLite sources),
SQLite Authors [ctb] (for the included SQLite sources),
Liam Healy [ctb] (for the included SQLite sources),
R Consortium [fnd],
RStudio [cph]
Maintainer Kirill Müller <kirill@cynkra.com>
Repository CRAN
datasetsDb
A sample sqlite database
Description
This database is bundled with the package, and contains all data frames in the datasets package.
Usage
datasetsDb()
Examples
library(DBI)
db <- RSQLite::datasetsDb()
dbListTables(db)
dbReadTable(db, "CO2")
dbGetQuery(db, "SELECT * FROM CO2 WHERE conc < 100")
dbDisconnect(db)
### Description
By default, SQLite is in auto-commit mode. `dbBegin()` starts a SQLite transaction and turns auto-commit off. `dbCommit()` and `dbRollback()` commit and rollback the transaction, respectively and turn auto-commit on. `DBI::dbWithTransaction()` is a convenient wrapper that makes sure that `dbCommit()` or `dbRollback()` is called. A helper function `sqliteIsTransacting()` is available to check the current transaction status of the connection.
### Usage
```r
## S4 method for signature 'SQLiteConnection'
dbBegin(conn, .name = NULL, ..., name = NULL)
## S4 method for signature 'SQLiteConnection'
dbCommit(conn, .name = NULL, ..., name = NULL)
## S4 method for signature 'SQLiteConnection'
dbRollback(conn, .name = NULL, ..., name = NULL)
sqliteIsTransacting(conn)
```
### Arguments
- **conn**: a `SQLiteConnection` object, produced by `DBI::dbConnect()`.
- **.name**: For backward compatibility, do not use.
- **...**: Needed for compatibility with generic. Otherwise ignored.
- **name**: Supply a name to use a named savepoint. This allows you to nest multiple transactions.
### See Also
The corresponding generic functions `DBI::dbBegin()`, `DBI::dbCommit()`, and `DBI::dbRollback()`.
### Examples
```r
library(DBI)
con <- dbConnect(SQLite(), " :memory: ")
dbWriteTable(con, "arrests", datasets::USArrests)
dbGetQuery(con, "select count(*) from arrests")
dbBegin(con)
rs <- dbSendStatement(con, "DELETE from arrests WHERE Murder > 1")
dbGetRowsAffected(rs)
```
```
dbClearResult(rs)
dbGetQuery(con, "select count(*) from arrests")
dbRollback(con)
dbGetQuery(con, "select count(*) from arrests")[1, ]
dbBegin(con)
rs <- dbSendStatement(con, "DELETE FROM arrests WHERE Murder > 5")
dbClearResult(rs)
dbCommit(con)
dbGetQuery(con, "SELECT count(*) FROM arrests")[1, ]
# Named savepoints can be nested ----------------------------------------
#------------------------------------------------------------------------
$dbBegin(con, name = "a")
$dbBegin(con, name = "b")
sqliteIsTransacting(con)
$dbRollback(con, name = "b")
$dbCommit(con, name = "a")
$dbDisconnect(con)
```
---
### dbReadTable_SQLiteConnection_character
**Read a database table**
#### Description
Returns the contents of a database table given by name as a data frame.
#### Usage
```r
## S4 method for signature 'SQLiteConnection,character'
dbReadTable(
conn,
name,
...,
row.names = pkgconfig::get_config("RSQLite::row.names.table", FALSE),
check.names = TRUE,
select.cols = NULL
)
```
#### Arguments
- `conn` a `SQLiteConnection` object, produced by `DBI::dbConnect()`
- `name` a character string specifying a table name. SQLite table names are *not* case sensitive, e.g., table names `ABC` and `abc` are considered equal.
- `...` Needed for compatibility with generic. Otherwise ignored.
row.names Either TRUE, FALSE, NA or a string.
If TRUE, always translate row names to a column called "row_names". If FALSE, never translate row names. If NA, translate rownames only if they're a character vector.
A string is equivalent to TRUE, but allows you to override the default name.
For backward compatibility, NULL is equivalent to FALSE.
check.names If TRUE, the default, column names will be converted to valid R identifiers.
select.cols Deprecated, do not use.
Details
Note that the data frame returned by dbReadTable() only has primitive data, e.g., it does not coerce character data to factors.
Value
A data frame.
See Also
The corresponding generic function DBI::dbReadTable().
Examples
library(DBI)
db <- RSQLite::datasetsDb()
dbReadTable(db, "mtcars")
dbReadTable(db, "mtcars", row.names = FALSE)
dbDisconnect(db)
Description
Functions for writing data frames or delimiter-separated files to database tables.
Usage
## S4 method for signature 'SQLiteConnection,character,character'
dbWriteTable(
conn,
name,
value,
...,
field.types = NULL,
overwrite = FALSE,
append = FALSE,
header = TRUE,
colClasses = NA,
row.names = FALSE,
nrows = 50,
sep = ",",
eol = "\n",
skip = 0,
temporary = FALSE
)
## S4 method for signature 'SQLiteConnection,character,data.frame'
```
## S4 method for signature 'SQLiteConnection,character,data.frame'
dbWriteTable(
conn,
name,
value,
...,
row.names = pkgconfig::get_config("RSQLite::row.names.table", FALSE),
overwrite = FALSE,
append = FALSE,
field.types = NULL,
temporary = FALSE
)
```
Arguments
- **conn**
- a `SQLiteConnection` object, produced by `DBI::dbConnect()`
- **name**
- a character string specifying a table name. SQLite table names are *not* case sensitive, e.g., table names ABC and abc are considered equal.
- **value**
- a data.frame (or coercible to data.frame) object or a file name (character). In the first case, the data.frame is written to a temporary file and then imported to SQLite; when `value` is a character, it is interpreted as a file name and its contents imported to SQLite.
- **...**
- Needed for compatibility with generic. Otherwise ignored.
- **field.types**
- character vector of named SQL field types where the names are the names of new table’s columns. If missing, types inferred with `DBI::dbDataType()`.
- **overwrite**
- a logical specifying whether to overwrite an existing table or not. Its default is `FALSE`.
- **append**
- a logical specifying whether to append to an existing table in the DBMS. Its default is `FALSE`.
- **header**
- is a logical indicating whether the first data line (but see `skip`) has a header or not. If missing, its value is determined following `read.table()` convention, namely, it is set to `TRUE` if and only if the first row has one fewer field that the number of columns.
- **colClasses**
- Character vector of R type names, used to override defaults when imputing classes from on-disk file.
initExtension
Description
Several extension functions are included in the RSQLite package. When enabled via initExtension(), these extension functions can be used in SQL queries. Extensions must be enabled separately for each connection.
Usage
initExtension(db, extension = c("math", "regexp", "series", "csv"))
Arguments
- **db**: A `SQLiteConnection` object to load these extensions into.
- **extension**: The extension to load.
Details
The "math" extension functions are written by Liam Healy and made available through the SQLite website (https://www.sqlite.org/contrib). This package contains a slightly modified version of the original code. See the section "Available functions in the math extension" for details.
The "regexp" extension provides a regular-expression matcher for POSIX extended regular expressions, as available through the SQLite source code repository (https://sqlite.org/src/file?filename=ext/misc/regexp.c). SQLite will then implement the A regexp B operator, where A is the string to be matched and B is the regular expression.
The "series" extension loads the table-valued function `generate_series()`, as available through the SQLite source code repository (https://sqlite.org/src/file?filename=ext/misc/series.c).
The "csv" extension loads the function `csv()` that can be used to create virtual tables, as available through the SQLite source code repository (https://sqlite.org/src/file?filename=ext/misc/csv.c).
Available functions in the math extension
- **Math functions**: acos, acosh, asin, asinh, atan, atanh, atn2, ceil, cos, cosh, cot, coth, degrees, difference, exp, floor, log, log10, pi, power, radians, sign, sin, sinh, sqrt, square, tan, tanh
- **String functions**: charindex, leftstr, ltrim, padc, padl, padr, proper, replace, replicate, reverse, rightstr, rtrim, strfilter, trim
- **Aggregate functions**: stdev, variance, mode, median, lower_quartile, upper_quartile
Examples
```r
library(DBI)
db <- RSQLite::datasetsDb()
# math
RSQLite::initExtension(db)
dbGetQuery(db, "SELECT stdev(mpg) FROM mtcars")
sd(mtcars$mpg)
# regexp
RSQLite::initExtension(db, "regexp")
dbGetQuery(db, "SELECT * FROM mtcars WHERE carb REGEXP '^[12]'")
# series
RSQLite::initExtension(db, "series")
dbGetQuery(db, "SELECT value FROM generate_series(0, 20, 5);")
dbDisconnect(db)
```
# csv
db <- dbConnect(RSQLite::SQLite())
RSQLite::initExtension(db, "csv")
# use the filename argument to mount CSV files from disk
sql <- paste0("CREATE VIRTUAL TABLE tbl USING ",
"csv(data='1,2', schema='CREATE TABLE x(a INT, b INT)')"
)
dbExecute(db, sql)
dbGetQuery(db, "SELECT * FROM tbl")
---
rsqLiteVersion | RSQLite version
Description
Return the version of RSQLite.
Usage
rsqliteVersion()
Value
A character vector containing header and library versions of RSQLite.
Examples
RSQLite::rsqliteVersion()
---
SQLite | Connect to an SQLite database
Description
Together, SQLite() and dbConnect() allow you to connect to a SQLite database file. See DBI::dbSendQuery() for how to issue queries and receive results.
Usage
SQLite(...)
SQLite
drv,
dbname = "",
...,
loadable.extensions = TRUE,
default.extensions = loadable.extensions,
cache_size = NULL,
synchronous = "off",
flags = SQLITE_RWC,
vfs = NULL,
bigint = c("integer64", "integer", "numeric", "character"),
extended_types = FALSE
)
## S4 method for signature 'SQLiteConnection'
dbDisconnect(conn, ...)
Arguments
... In previous versions, SQLite() took arguments. These have now all been moved
to dbConnect(), and any arguments here will be ignored with a warning.
drv, conn An object generated by SQLite(), or an existing SQLiteConnection. If an
connection, the connection will be cloned.
dbname The path to the database file. SQLite keeps each database instance in one single
file. The name of the database is the file name, thus database names should be
legal file names in the running platform. There are two exceptions:
- "" will create a temporary on-disk database. The file will be deleted when
the connection is closed.
- ":memory:" or "file::memory:" will create a temporary in-memory database.
loadable.extensions When TRUE (default) SQLite3 loadable extensions are enabled. Setting this
value to FALSE prevents extensions from being loaded.
default.extensions When TRUE (default) the initExtension() function will be called on the new
connection. Setting this value to FALSE requires calling initExtension() man-
ually.
cache_size Advanced option. A positive integer to change the maximum number of disk
pages that SQLite holds in memory (SQLite’s default is 2000 pages). See
https://www.sqlite.org/pragma.html#pragma_cache_size for details.
synchronous Advanced options. Possible values for synchronous are "off" (the default),
"normal", or "full". Users have reported significant speed ups using synchronous
= "off", and the SQLite documentation itself implies considerable improved
performance at the very modest risk of database corruption in the unlikely case
of the operating system (not the R application) crashing. See https://www.
sqlite.org/pragma.html#pragma_synchronous for details.
flags SQLITE_RWC: open the database in read/write mode and create the database file
if it does not already exist; SQLITE_RW: open the database in read/write mode.
Raise an error if the file does not already exist; SQLite,R0: open the database in read only mode. Raise an error if the file does not already exist.
**vfs**
Select the SQLite3 OS interface. See https://www.sqlite.org/vfs.html for details. Allowed values are "unix-posix", "unix-unix-afp", "unix-unix-flock", "unix-dotfile", and "unix-none".
**bigint**
The R type that 64-bit integer types should be mapped to, default is bit64::integer64, which allows the full range of 64 bit integers.
**extended_types**
When TRUE columns of type DATE, DATETIME / TIMESTAMP, and TIME are mapped to corresponding R-classes, c.f. below for details. Defaults to FALSE.
**Details**
Connections are automatically cleaned-up after they’re deleted and reclaimed by the GC. You can use DBI::dbDisconnect() to terminate the connection early, but it will not actually close until all open result sets have been closed (and you’ll get a warning message to this effect).
**Value**
SQLite() returns an object of class SQLiteDriver.
dbConnect() returns an object of class SQLiteConnection.
**Extended Types**
When parameter extended_types = TRUE date and time columns are directly mapped to corresponding R-types. How exactly depends on whether the actual value is a number or a string:
<table>
<thead>
<tr>
<th>Column type</th>
<th>Value is numeric</th>
<th>Value is Text</th>
</tr>
</thead>
<tbody>
<tr>
<td>DATE</td>
<td>Count of days since 1970-01-01</td>
<td>YMD formatted string (e.g. 2020-01-23)</td>
</tr>
<tr>
<td>TIME</td>
<td>Count of (fractional) seconds</td>
<td>HMS formatted string (e.g. 12:34:56)</td>
</tr>
<tr>
<td>DATETIME / TIMESTAMP</td>
<td>Count of (fractional) seconds since midnight 1970-01-01 UTC</td>
<td>DATE and TIME as above separated by a space</td>
</tr>
</tbody>
</table>
If a value cannot be mapped an NA is returned in its place with a warning.
**See Also**
The corresponding generic functions DBI::dbConnect() and DBI::dbDisconnect().
**Examples**
```r
library(DBI)
# Initialize a temporary in memory database and copy a data.frame into it
con <- dbConnect(RSQLite::SQLite(), "::memory:"
data(USArrests)
dbWriteTable(con, "USArrests", USArrests)
dbListTables(con)
# Fetch all query results into a data frame:
dbGetQuery(con, "SELECT * FROM USArrests")
```
# Or do it in batches
rs <- dbSendQuery(con, "SELECT * FROM USArrests")
d1 <- dbFetch(rs, n = 10) # extract data in chunks of 10 rows
dbHasCompleted(rs)
d2 <- dbFetch(rs, n = -1) # extract all remaining data
dbHasCompleted(rs)
dbClearResult(rs)
# clean up
dbDisconnect(con)
sqliteCopyDatabase
Copy a SQLite database
Description
Copies a database connection to a file or to another database connection. It can be used to save an
in-memory database (created using dbname = ":memory:" or dbname = "file::memory:" ) to a file
or to create an in-memory database a copy of another database.
Usage
sqliteCopyDatabase(from, to)
Arguments
from
A SQLiteConnection object. The main database in from will be copied to to.
to
A SQLiteConnection object pointing to an empty database.
Author(s)
Seth Falcon
References
https://www.sqlite.org/backup.html
Examples
library(DBI)
# Copy the built in databaseDb() to an in-memory database
con <- dbConnect(RSQLite::SQLite(), ":memory:")
dbListTables(con)
db <- RSQLite::datasetsDb()
RSQLite::sqliteCopyDatabase(db, con)
dbDisconnect(db)
dbListTables(con)
dbDisconnect(con)
sqliteSetBusyHandler
Configure what SQLite should do when the database is locked
Description
When a transaction cannot lock the database, because it is already locked by another one, SQLite by default throws an error: database is locked. This behavior is usually not appropriate when concurrent access is needed, typically when multiple processes write to the same database.
sqliteSetBusyHandler() lets you set a timeout or a handler for these events. When setting a timeout, SQLite will try the transaction multiple times within this timeout. To set a timeout, pass an integer scalar to sqliteSetBusyHandler().
Another way to set a timeout is to use a PRAGMA, e.g. the SQL query
PRAGMA busy_timeout=3000
sets the busy timeout to three seconds.
Usage
sqliteSetBusyHandler(dbObj, handler)
Arguments
<table>
<thead>
<tr>
<th>Argument</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>dbObj</td>
<td>A SQLiteConnection object.</td>
</tr>
<tr>
<td>handler</td>
<td>Specifies what to do when the database is locked by another transaction. It can be:</td>
</tr>
<tr>
<td></td>
<td>• NULL: fail immediately,</td>
</tr>
<tr>
<td></td>
<td>• an integer scalar: this is a timeout in milliseconds that corresponds to PRAGMA busy_timeout,</td>
</tr>
<tr>
<td></td>
<td>• an R function: this function is called with one argument, see details below.</td>
</tr>
</tbody>
</table>
Details
Note that SQLite currently does not schedule concurrent transactions fairly. If multiple transactions are waiting on the same database, any one of them can be granted access next. Moreover, SQLite does not currently ensure that access is granted as soon as the database is available. Make sure that you set the busy timeout to a high enough value for applications with high concurrency and many writes.
If the handler argument is a function, then it is used as a callback function. When the database is locked, this will be called with a single integer, which is the number of calls for same locking event. The callback function must return an integer scalar. If it returns 0L, then no additional attempts are made to access the database, and an error is thrown. Otherwise another attempt is made to access the database and the cycle repeats.
Handler callbacks are useful for debugging concurrent behavior, or to implement a more sophisticated busy algorithm. The latter is currently considered experimental in RSQLite. If the callback
sqliteSetBusyHandler
function fails, then RSQLite will print a warning, and the transaction is aborted with a "database is locked" error.
Note that every database connection has its own busy timeout or handler function.
Calling sqliteSetBusyHandler() on a connection that is not connected is an error.
Value
Invisible NULL.
See Also
https://www.sqlite.org/c3ref/busy_handler.html
Index
bit64::integer64, 12
datasetsDb, 3
dbBegin,SQLiteConnection-method (dbBegin_SQLiteConnection), 4
dbBegin_SQLiteConnection, 4
dbCommit,SQLiteConnection-method (dbBegin_SQLiteConnection), 4
dbCommit_SQLiteConnection (dbBegin_SQLiteConnection), 4
dbConnect(), 11
dbConnect,SQLiteConnection-method (SQLite), 10
dbConnect,SQLiteDriver-method (SQLite), 10
dbConnect_SQLiteConnection (SQLite), 10
dbConnect_SQLiteDriver (SQLite), 10
dbDisconnect,SQLiteConnection-method (SQLite), 10
dbDisconnect_SQLiteConnection (SQLite), 10
DBI::dbBegin(), 4
DBI::dbCommit(), 4
DBI::dbConnect(), 4, 5, 7, 12
DBI::dbDataType(), 7
DBI::dbDisconnect(), 12
DBI::dbReadTable(), 6
DBI::dbRollback(), 4
DBI::dbSendQuery(), 10
DBI::dbWithTransaction(), 4
DBI::dbWriteTable(), 8
DBI::make.db.names(), 8
dbReadTable,SQLiteConnection,character-method (dbReadTable_SQLiteConnection_character), 5
dbReadTable_SQLiteConnection_character, 5
dbRollback,SQLiteConnection-method (dbBegin_SQLiteConnection), 4
dbRollback_SQLiteConnection (dbBegin_SQLiteConnection), 4
dbWriteTable,SQLiteConnection,character,character-method (dbWriteTable_SQLiteConnection_character_character), 6
dbWriteTable,SQLiteConnection,character,data.frame-method (dbWriteTable_SQLiteConnection_character_character), 6
dbWriteTable_SQLiteConnection_character_character, 6
dbWriteTable_SQLiteConnection_character_data.frame (dbWriteTable_SQLiteConnection_character_character), 6
initExtension, 8
initExtension(), 11
read.table(), 7
RSQLite (SQLite), 10
RSQLite-package (SQLite), 10
rsqLiteVersion, 10
SQLite, 10
SQLite(), 11
sqlite-transaction (dbBegin_SQLiteConnection), 4
SQLITE_RO (SQLite), 10
SQLITE_RW (SQLite), 10
SQLITE_RWC (SQLite), 10
SQLiteConnection, 4, 5, 7, 9, 11, 12, 14
sqliteCopyDatabase, 13
SQLiteDriver, 12
sqliteIsTransacting (dbBegin_SQLiteConnection), 4
sqliteSetBusyHandler, 14
|
{"Source-Url": "https://cloud.r-project.org/web/packages/RSQLite/RSQLite.pdf", "len_cl100k_base": 5403, "olmocr-version": "0.1.48", "pdf-total-pages": 16, "total-fallback-pages": 0, "total-input-tokens": 34545, "total-output-tokens": 6366, "length": "2e12", "weborganizer": {"__label__adult": 0.00024330615997314453, "__label__art_design": 0.00020134449005126953, "__label__crime_law": 0.00021445751190185547, "__label__education_jobs": 0.00023114681243896484, "__label__entertainment": 7.081031799316406e-05, "__label__fashion_beauty": 7.081031799316406e-05, "__label__finance_business": 0.00010520219802856444, "__label__food_dining": 0.00023448467254638672, "__label__games": 0.0004963874816894531, "__label__hardware": 0.0006833076477050781, "__label__health": 0.0001634359359741211, "__label__history": 0.00013697147369384766, "__label__home_hobbies": 4.7266483306884766e-05, "__label__industrial": 0.0001888275146484375, "__label__literature": 0.00010907649993896484, "__label__politics": 0.00010263919830322266, "__label__religion": 0.0002377033233642578, "__label__science_tech": 0.006954193115234375, "__label__social_life": 7.069110870361328e-05, "__label__software": 0.039947509765625, "__label__software_dev": 0.94921875, "__label__sports_fitness": 0.0001685619354248047, "__label__transportation": 0.00016546249389648438, "__label__travel": 0.00014126300811767578}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 21326, 0.01873]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 21326, 0.56497]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 21326, 0.68733]], "google_gemma-3-12b-it_contains_pii": [[0, 1383, false], [1383, 1895, null], [1895, 2227, null], [2227, 3721, null], [3721, 5034, null], [5034, 6133, null], [6133, 8065, null], [8065, 8381, null], [8381, 10392, null], [10392, 11148, null], [11148, 13349, null], [13349, 15662, null], [15662, 16786, null], [16786, 19082, null], [19082, 19487, null], [19487, 21326, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1383, true], [1383, 1895, null], [1895, 2227, null], [2227, 3721, null], [3721, 5034, null], [5034, 6133, null], [6133, 8065, null], [8065, 8381, null], [8381, 10392, null], [10392, 11148, null], [11148, 13349, null], [13349, 15662, null], [15662, 16786, null], [16786, 19082, null], [19082, 19487, null], [19487, 21326, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 21326, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 21326, null]], "pdf_page_numbers": [[0, 1383, 1], [1383, 1895, 2], [1895, 2227, 3], [2227, 3721, 4], [3721, 5034, 5], [5034, 6133, 6], [6133, 8065, 7], [8065, 8381, 8], [8381, 10392, 9], [10392, 11148, 10], [11148, 13349, 11], [13349, 15662, 12], [15662, 16786, 13], [16786, 19082, 14], [19082, 19487, 15], [19487, 21326, 16]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 21326, 0.02709]]}
|
olmocr_science_pdfs
|
2024-11-25
|
2024-11-25
|
63b36dc8ebbe15ce022949ea4bb15cde48bb528d
|
Normative Multi-Agent Programs
Mehdi Dastani
Nick Tinnemeier
John-Jules Meyer
Background and Motivation
- MAS consist of individual agents that are:
- autonomous: pursues its own objectives.
- heterogeneous: internal state and operations not known to external entities.
- Need for coordination of external behavior of agents to achieve MAS’s overall objectives.
Existing coordination mechanisms:
- Coordination artifacts and languages defined in terms of low-level coordination concepts such as synchronization, shared-space, and channels, e.g., REO and Linda.
- Organizational models, normative systems, and electronic institutions defined in terms of social and organizational concepts, e.g., Moise+ and ISLANDER/AMELI.
- Design and develop a **programming language** to support the implementation of coordination mechanisms in terms of normative concepts.
Simple railway simulation:
- simple railway station
- passengers travelling by train
- rules of conduct (e.g. having a ticket while on the train)
Passengers are agents that can:
- embark the train
- enter the platform
- buy a ticket
General idea of Normative Multi-Agent Organization
Organization is developed as separate entity
Agents:
- specified by 2APL programming language
- perform external actions
- internal architecture unknown to organization
Organization:
- determines effect of external actions (no autonomy)
- normatively assesses effect of agents’ actions (monitoring)
- sanctions agents’ wrongdoings (enforcement)
- prevents ending up in really bad states (regimentation)
Programming a normative multi-agent organization is to specify:
- references to *2APL agent programs*, e.g.,
passenger PassProg 1
- the initial state of organization by *brute facts*, e.g.,
{-at_platform, -in_train, -ticket}
- the *effects of actions*, e.g.,
{-ticket} \(\rightarrow\) buy_ticket {ticket}
{at_platform, -in_train} \(\rightarrow\) embark {-at_platform, in_train}
- the norms through *counts-as rules*, e.g.,
{at_platform , -ticket} \(\rightarrow\) {viol_{ticket}}
{in_train , -ticket} \(\rightarrow\) {viol_{⊥}}
- possible sanctions for agent’s through *sanction rules*, e.g.,
{viol_{ticket}} \(\rightarrow\) {fined_{10}}
Agents: passenger PassProg 1
Facts: {-at_platform, -in_train, -ticket}
Effects: {-at_platform} enter {at_platform},
{-ticket} buy_ticket {ticket},
{at_platform, -in_train} embark {-at_platform, in_train}
Counts as rules: {at_platform , -ticket} ⇒ {viol_ticket},
{in_train , -ticket} ⇒ {viol⊥}
Sanction rules: {viol_ticket} ⇒ {fined_{10}}
Normative Programming Language (Semantics)
\[ \langle \{A_1, \ldots, A_n\}, \quad \sigma_b \subseteq P_b, \quad \sigma_n \subseteq P_n \rangle \]
- $P_b$ and $P_n$ are disjoint sets of literals
- Brute facts describe current state of the environment
- Normative facts describe normative assessment of organisation
When an agent performs an external action, the organization:
- determines new brute state based on effect rules
- by using function $up(\alpha(i), \sigma_b)$, i.e. $\sigma'_b = up(\alpha(i), \sigma_b)$
- normatively judges this state by applying counts-as rules
- by taking the closure of $\sigma'_b$ under rules $R_c$, i.e. $\sigma'_n = \text{Cl}^{R_c}(\sigma'_b) \setminus \sigma'_b$
Based on this normative judgment, the organization either:
- effectuates action and applies all sanction rules accordingly
- by taking closure of $\sigma'_n$ under rules $R_s$, i.e. $S = \text{Cl}^{R_s}(\sigma'_n) \setminus \sigma'_n$
- new brute state becomes $\sigma'_b \cup S$
- blocks the action if it would lead to a state marked by $\text{viol} \perp$
Applicable rules given set of literals $X$ and rules $R$, $\text{Appl}^R(X)$:
$$\text{Appl}^R(X) = \{ \Phi \Rightarrow \Psi \mid X \models \Phi \}$$
Closure of set of literals $X$ under rules $R$, $\text{Cl}^R(X)$:
$$\text{Cl}^R(X) = \text{Cl}^R_{m+1}(X) \text{ iff } \text{Cl}^R_{m+1}(X) = \text{Cl}^R_m(X), \text{ where}$$
- **B:** $\text{Cl}^R_0(X) = X \cup (\bigcup_{l \in \text{Appl}^R(X)} \text{cons}_l)$
- **S:** $\text{Cl}^R_{n+1}(X) = \text{Cl}^R_n(X) \cup (\bigcup_{l \in \text{Appl}^R(\text{Cl}^R_n(X))} \text{cons}_l)$
Effect of action $\alpha(i)$ specified by brute effect rule ($\Phi \alpha(i) \Phi'$):
$$\text{up}(\alpha(i), \sigma_b) = (\sigma_b \cup \Phi') \setminus (\{p \mid -p \in \Phi'\} \cup \{-p \mid p \in \Phi'\})$$
Let action $\alpha(i)$ be specified as: $(\Phi \alpha(i) \Phi')$.
Transition rule for individual agent’s external actions:
$A_i \xrightarrow{\alpha(i)} A'_i :$ agent $i$ can perform external action $\alpha$.
Transition rule for normative multi-agent organization:
$\sigma'_n = Cl^{R_{c}}(\sigma'_b) \setminus \sigma'_b \quad \sigma'_n \not\models viol_{\bot} \quad \sigma'_b = up(\alpha(i), \sigma_b) \quad S = Cl^{R_{s}}(\sigma'_n) \setminus \sigma'_n \quad \sigma'_b \cup S \not\models \bot$
\[
\langle A, \sigma_b, \sigma_n \rangle \longrightarrow \langle A', \sigma'_b \cup S, \sigma'_n \rangle
\]
where $A' = (A \setminus \{A_i\}) \cup \{A'_i\}$ and $viol_{\bot}$ is the designated literal for regimentation.
Conclusion and Future Works
- A Programming language to implement multi-agent system organization.
- Logic to verify properties of normative multi-agent program.
- Adding more social constructs such as roles, relations between roles, and contracts.
- More expressive language for norms and sanctions, e.g., use temporal, deontic operators.
- Facilitating tools for norm aware agents.
Modularity in BDI-based Agent Programming
- Modularity is an essential principle in structured programming.
- Modularization is a mechanism to structure a computer program in separate modules.
- Modularization can be used for information hiding and reusability.
- Modularization in existing BDI-based Agent programming languages is to structure an individual agent’s program in separate modules, each encapsulating cognitive components such as beliefs, goals, events, and plans that together can be used to handle specific situations.
In Jack and Jadex, modules (capabilities) encapsulate cognitive components that implement a specific capability/functionality of the agent.
The interpreter searches the modules in order to determine, e.g., how an event can be processed.
• In GOAL, modules are used as a mechanism to focus the program execution. It is to realize a specific policy or mechanism in order to control nondeterminism in agent execution.
• In 3APL a module is associated with a specific goal indicating which and how planning rules should be applied to achieve that specific goal.
• Belief or goal conditions are assigned to modules. The agent’s interpreter uses the modules when the respective conditions hold.
:main:deliveryAgent
{
:beliefs{ home(a).
loc(p1,a). loc(p2,a). loc(p3,a). loc(p4,a). loc(truck,a).
loc(c1,b). loc(c2,c). order(c1,[p1,p2]). order(c2,[p3,p4]).
}
:goals{ delivered_order(c1). delivered_order(c2). ... }
:program{ ... }
:action-spec{ ... }
:module: deliverOrder{
:context{ bel(order(C,O), in(O,a)), goal(delivered_order(C)) }
:beliefs{
ordered(C,P) :- order(C,Y), member(P,Y).
...
}
:goals{ }
:program{
if bel(ordered(C, P)), ~bel(in(P, truck)) then load(P).
if bel(loc(truck, X), loaded_order(C), loc(C, Y)) then goto(Y).
if bel(loc(truck, X), in(P,truck), loaded(C,P)) then unload(P).
if bel(loc(C, X), empty, home(Y)) then goto(Y).
}
:action-spec{ ... }
}
:module: stockMgt{
:context{ bel(ordered(C,P), empty), ~bel(in(P,a)) }
:goals{ in(P,a) }
:program{ ... }
:action-spec{ ... }
}
...
}
Modularity: Our Vision
- Provides agent programmer **more control** over how and when modules are used.
- In AOSE **roles** are considered as functionalities to handle specific situations. In BDI approach, roles are specified in terms of beliefs, goals, events, and plans.
- Agent may want to construct and maintain **profiles** of users or other agents. A user or other agents can be specified in terms of beliefs, goals, events, and plans.
- Using modules for **information hiding** and **reusability**.
- Providing a set of **generic programming constructs** that can be used by an agent programmer to perform a variety of (role and profile related) operations on modules.
Extending 2APL with Modules
• A 2APL multi-agent program is implemented in terms of a set of module specifications. Each module is specified by beliefs, goals, plans, and practical reasoning rules.
• A subset of these modules is identified as the specification of individual agents constituting multi-agent system. The execution of a 2APL multi-agent program is the instantiation and execution of these modules.
• Several operations/actions can be performed on modules.
• Operations on Modules
– Create/Release module
– Execute module
– Update module
– Test module
Multi-Agent Program
\[ \langle \text{MAS}_\text{Prog} \rangle \ := \ "\text{Modules :" } \langle \text{module} \rangle^+ \\
"\text{Agents :" } (\langle \text{agentname} \rangle \ ":: \langle \text{moduleIdent} \rangle [\langle \text{int} \rangle])^+ \]
\[ \langle \text{module} \rangle \ := \ \langle \text{moduleIdent} \rangle \ "\cdot \text{2apl}" \ [\langle \text{environments} \rangle] \]
\[ \langle \text{agentname} \rangle \ := \ \langle \text{ident} \rangle \]
\[ \langle \text{moduleIdent} \rangle \ := \ \langle \text{ident} \rangle \]
\[ \langle \text{environments} \rangle \ := \ "\text{@" } \langle \text{ident} \rangle^+ \]
Modules:
manager.2apl @clientdatabase
admin.2apl
userCreator.2apl @userdatabase
Agents:
richard: manager.2apl
administrator: admin.2apl
A 2APL Module
\[2APL\_Module\] ::= ("private" | "public") "singleton"?
("Include:" \[ident\])
| "BeliefUpdates:" \[BelUpSpec\]
| "Beliefs:" \[belief\]
| "Goals:" \[goals\]
| "Plans:" \[plans\]
| "PG-rules:" \[pgrules\]
| "PC-rules:" \[pcrules\]
| "PR-rules:" \[prrules\])*
\[baction\] ::= ... | \[createaction\] | \[releaseaction\] | \[return\] | \[moduleaction\]
\[createaction\] ::= "create(" \[ident\]," \[ident\])"
\[releaseaction\] ::= "release(" \[ident\])"
\[return\] ::= "return"
\[moduleaction\] ::= \[ident\]." \[maction\]
\[maction\] ::= "execute(" \[test\])" | "executeasync(" \[test]? ")"
| "stop" | \[test\] | \[adoptgoal\] | \[dropgoal\] | \[updBB\]
\[updBB\] ::= "updateBB(" \[literals\])"
Creating a Module Instance
- One module instance can create several instances of one and the same module specification.
- The creating module instance assigns a unique name to the module instance.
- A creating module instance becomes the owner of the created module instance. The creating module instance is the only module instance that can operate on the created module instance until the created module is released.
A module instance can execute another one and wait until the execution of the module instance is halted.
- A condition should be given to indicate when the execution of a module instance must halt.
A 2APL module instance can execute another one in parallel.
- The executed module instance can be halted either by means of a condition evaluated on the internals of the executed module instance, or
- Explicitly by means of a stop action performed by its owner.
Update and Test Module
- A module instance can test and update the beliefs and goals of a module instance that it owns.
- In order to control the access to the internals of a module instance, two types of modules instances are introduced:
- A **private** module instance does not allow its owner to access to its internals. The owner can only execute it.
- The internals of a **public** module instance are accessible to its owner module.
Singleton Modules
- One and the same module instances can be used by two different module instances. For this purpose **singleton module** is introduced.
- The ownership of a singleton module instance can be changed through **create** and **release** operations.
- The state of the singleton module instance is invariant with respect to these operations, i.e., the state of a singleton module instance is maintained after it is released and owned by another.
Creating/Release Module: Syntax
create(mod-name, mod-ident)
release(m)
mod-ident . operation
- If the module is not a singleton, then its instance will be removed/lost.
- If the module is a singleton, then its instance will be maintained (in the multi-agent system) such that it can be used by another module instance using the create action.
- A singleton module can only have one instance at a time such that it can always be accessed by means of the module name `mod-name`.
- The subsequent creation of a singleton module instance (by another module), which may be assigned a different name, will refer to the same instance of the module as when it was released by its last owner.
m.execute($test$)
return
- The execution of a module instance starts the deliberation process based on the internals of the module instance. The execution of the owning module instance halts until the execution of the owned module instance halts.
- A module instance is notified to stop its execution by a \texttt{stop!} event. The multi-agent system interpreter evaluates the test condition and sends the \texttt{stop!} event.
- The module instance that receives a \texttt{stop!} event starts a cleaning operation and sends a \texttt{return!} event back when it is ready by performing the \texttt{return} action.
- After the reception of this event, the owning module’s deliberation process is continued, after which it may decide to release the owned module instance.
Execute in Parallel Operation: Syntax
\[
m.\text{executeasync}(<test>?)
m.\text{stop}
\]
Identical to \texttt{execute} action, except that the owner instance does not have to wait until the execution of the module instance halts.
- A parallel executing module instance can be halted through the \texttt{test} argument as before, or
- By performing the \texttt{stop} action on the module instance by the owning module instance. This action will send a \texttt{stop!} event to the owned module instance.
Updating and Testing Module: Syntax
\[
m.\langle test \rangle \\
m.\text{updateBB}(\varphi) \\
m.\text{adopta}(\varphi) \\
m.\text{adoptz}(\varphi) \\
m.\text{dropgoal}(\varphi) \\
m.\text{dropsubgoals}(\varphi) \\
m.\text{dropsupergoals}(\varphi)
\]
- A module instance can test whether certain beliefs and goals are entailed by a public owned module instance \( m \) through action \( m.\text{B}(\varphi) \& G(\psi) \).
- The beliefs of a module instance \( m \) can be updated by \( m.\text{updateBB}(\varphi) \) action.
- A goal can be added to the goals of a module instance \( m \) by means of \( m.\text{adopta}(\varphi) \) and \( m.\text{adoptz}(\varphi) \) actions.
- The goals of a module instance \( m \) can be dropped by means of \( m.\text{dropgoal}(\varphi) \), \( m.\text{dropsubgoals}(\varphi) \) and \( m.\text{dropsupergoals}(\varphi) \) actions.
Example
------------- a plan in a module -------------
{
create(userCreator, u);
u.updateBB(user(dave, hopkins));
u.adopta(registered(dave));
u.execute(B(registered(dave)));
release(u)
}
------------- userCreator module -------------
public BeliefUpdates:
{ true } AddUser(FirstName) { registered(FirstName) }
PG-Rules:
registered(FirstName) <- user(FirstName, LastName) |
{
@userdatabase(adduser(FirstName, LastName));
AddUser(FirstName)
}
PC-Rules:
event(stop) <- true | return
• Multi-Agent Configuration: $\langle A, \chi \rangle$, where
– $A$ be a set of module configurations
– $\chi$ be a set of external shared environments
• Module Configuration: $(A_i, p, r, e, \varphi)$, where
– $A_i$ is a module instance with the unique name $i$,
– $p$ is the name of the owner of the module instance,
– $r$ is an identifier referring to the module specification,
– $e$ is the execution flag, and
– $\varphi$ is the execution stop condition
• Initial Configuration: $\langle A, \chi \rangle$, where
– $A = \{(A_{i_1}, \text{mas}, m, t, \perp), \ldots, (A_{i_N}, \text{mas}, m, t, \perp) \mid (i : m@env_1 \ldots env_k, N) \in \text{MAS_Prog}\}$
– $\chi = \{env_i \mid env_i \in \text{MAS_Prog}\}$
A non-singleton module instance can be created by another module instance $A_i$ if $A_i$ is in the execution mode (the execution flag equals $t$ and $A_i \not\models \varphi$) and there is no module instance with the same name already created by the same module ($\neg \exists r'', e, \varphi' : (A_{i.n}, i, r'', e, \varphi') \in A$).
$$(A_i, p, r', t, \varphi) \in A \& A_i \not\models \varphi \& A_i \xrightarrow{\text{create}(r, n)!} A'_i \& \neg \text{singleton}(r) \& \neg \exists r'', e, \varphi' : (A_{i.n}, i, r'', e, \varphi') \in A$$
$$\langle A, \chi \rangle \rightarrow \langle A', \chi \rangle$$
where $A' = (A \setminus \{(A_i, p, r', t, \varphi)\}) \cup \{(A'_i, p, r', t, \varphi), (A_{i.n}, i, r, f, \bot)\}$.
Conclusion
- 2APL is extended with Modules.
- Agents can be added dynamically during the multi-agent system execution.
- Modules can be used to implement roles and profiles.
- `execute` and `executeasync` may not be appropriate for profile execution as it should not have consequences for the environment and other agents. We may introduced `dryrun` and `dryrunasync`.
- The notion of singleton can be generalized to allow a minimum and maximum of instances of a module that can be active at one time.
- New actions `add` and `remove` that accept plans or rules as argument.
|
{"Source-Url": "http://www.cs.uu.nl/docs/vakken/map/slides/NormativeModular.pdf", "len_cl100k_base": 4806, "olmocr-version": "0.1.49", "pdf-total-pages": 33, "total-fallback-pages": 0, "total-input-tokens": 52615, "total-output-tokens": 6244, "length": "2e12", "weborganizer": {"__label__adult": 0.0003120899200439453, "__label__art_design": 0.0002460479736328125, "__label__crime_law": 0.0003402233123779297, "__label__education_jobs": 0.0006628036499023438, "__label__entertainment": 4.017353057861328e-05, "__label__fashion_beauty": 0.00010132789611816406, "__label__finance_business": 0.0002357959747314453, "__label__food_dining": 0.00025343894958496094, "__label__games": 0.0005040168762207031, "__label__hardware": 0.0005702972412109375, "__label__health": 0.00035381317138671875, "__label__history": 0.00016045570373535156, "__label__home_hobbies": 8.499622344970703e-05, "__label__industrial": 0.0003209114074707031, "__label__literature": 0.00015747547149658203, "__label__politics": 0.0002157688140869141, "__label__religion": 0.0003364086151123047, "__label__science_tech": 0.006389617919921875, "__label__social_life": 7.76052474975586e-05, "__label__software": 0.0038967132568359375, "__label__software_dev": 0.98388671875, "__label__sports_fitness": 0.0002472400665283203, "__label__transportation": 0.0004780292510986328, "__label__travel": 0.00015151500701904297}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 17642, 0.00335]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 17642, 0.7743]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 17642, 0.73048]], "google_gemma-3-12b-it_contains_pii": [[0, 79, false], [79, 369, null], [369, 869, null], [869, 1105, null], [1105, 1562, null], [1562, 2233, null], [2233, 2575, null], [2575, 2891, null], [2891, 3646, null], [3646, 4393, null], [4393, 5113, null], [5113, 5498, null], [5498, 5498, null], [5498, 6037, null], [6037, 6275, null], [6275, 6729, null], [6729, 7649, null], [7649, 8330, null], [8330, 8907, null], [8907, 9699, null], [9699, 10433, null], [10433, 10855, null], [10855, 11318, null], [11318, 11763, null], [11763, 12225, null], [12225, 12911, null], [12911, 13682, null], [13682, 14187, null], [14187, 15055, null], [15055, 15596, null], [15596, 16328, null], [16328, 17062, null], [17062, 17642, null]], "google_gemma-3-12b-it_is_public_document": [[0, 79, true], [79, 369, null], [369, 869, null], [869, 1105, null], [1105, 1562, null], [1562, 2233, null], [2233, 2575, null], [2575, 2891, null], [2891, 3646, null], [3646, 4393, null], [4393, 5113, null], [5113, 5498, null], [5498, 5498, null], [5498, 6037, null], [6037, 6275, null], [6275, 6729, null], [6729, 7649, null], [7649, 8330, null], [8330, 8907, null], [8907, 9699, null], [9699, 10433, null], [10433, 10855, null], [10855, 11318, null], [11318, 11763, null], [11763, 12225, null], [12225, 12911, null], [12911, 13682, null], [13682, 14187, null], [14187, 15055, null], [15055, 15596, null], [15596, 16328, null], [16328, 17062, null], [17062, 17642, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 17642, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 17642, null]], "pdf_page_numbers": [[0, 79, 1], [79, 369, 2], [369, 869, 3], [869, 1105, 4], [1105, 1562, 5], [1562, 2233, 6], [2233, 2575, 7], [2575, 2891, 8], [2891, 3646, 9], [3646, 4393, 10], [4393, 5113, 11], [5113, 5498, 12], [5498, 5498, 13], [5498, 6037, 14], [6037, 6275, 15], [6275, 6729, 16], [6729, 7649, 17], [7649, 8330, 18], [8330, 8907, 19], [8907, 9699, 20], [9699, 10433, 21], [10433, 10855, 22], [10855, 11318, 23], [11318, 11763, 24], [11763, 12225, 25], [12225, 12911, 26], [12911, 13682, 27], [13682, 14187, 28], [14187, 15055, 29], [15055, 15596, 30], [15596, 16328, 31], [16328, 17062, 32], [17062, 17642, 33]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 17642, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-25
|
2024-11-25
|
cedc1a935ba2a5fa4bf2c80a65fcbf3d268ea0ab
|
CHAPTER FOUR - RESEARCH METHODOLOGY
4.1 RESEARCH
4.1.1 RESEARCH METHODOLOGY AND DESIGN
Research is a process of organized and meticulous investigation through the searching of new facts. Research methodology is the method utilized to gather information and data for the purpose of taking decisions related to business (Kothari, 2004). This research is predominantly focused on collection of primary data and the usage of data analysis techniques and interpretation through statistical tools. Prior to starting the actual research, the focus on research methodology and research design provides a blueprint for better planning of research, its execution and for obtaining the intended results.
4.1.2 RESEARCH OBJECTIVES
The gaps identified in the previous chapters have led to the formation and formulation of the research objectives. This study focuses on the identification and evaluation of a framework for software development teams and the measurement of work outcomes. These challenges act as the overarching factors governing the overall success of the software project. The global workplace has a lot of challenges that need to be overcome along with the opportunities offered. The identification of the important characteristics of agile SW teams is a significant factor in the creation of the framework for SW teams that will lead to successful work outcomes. In this context, the study aims to identify and understand the key characteristics of agile software development teams that lead to success in project delivery through the identification and evaluation of a framework for software development teams and the measurement of work outcomes. The importance of the identification of the various characteristics of agile SW teams that lead to successful work outcomes and the focus on the consideration of the agile teams as a complex adaptive system (CAS) have been established through this study. The research objectives mentioned below are giving the detailed line of study -
1. To identify the various characteristics of agile software development teams that could lead to successful project delivery and work outcomes.
2. To study the impact of these characteristics on the agile software development team’s performance measured in terms of successful work outcomes (conformance to customer
requirements and business value delivered, adherence to time and budget) and thereby project success.
3. To study the consideration of the agile SW team as a CAS that leads to successful work outcomes and greater probability of project success.
The first research objective to identify the key characteristics of agile software development teams through the creation of an appropriate framework has been achieved through rigorous literature review. The key characteristics are defined as follows and the corresponding citations have been given in the previous chapters/sections –
A. Agile methodologies are more suitable for emergent requirements and specifications that are based on capability than conventional top-down approaches. Agile methods provide rapid business value to the customer, often delivering capability while conventional methods are still focused on plans. Agile methods empower the teams who might be burdened by heavy process constraints. Agile practices have been proven over a period of time as per various agile tool vendors (Rally Software, Version One, Atlassian) and research agencies (Gartner, Forrester) and they generally work as well as or better than some of the currently accepted practices (Boehm & Turner, 2005; McGeachy, Robert, 2010).
B. It is important to understand the relevance and origin of the key components that are needed in a successful agile team in order to comprehend the key characteristics of agile teams. Agile SW teams need to exhibit characteristics and traits that emphasize the ability to respond to change quickly, which is a basic characteristic of agility, apart from other factors. These traits enable the agile teams to be successful at the work place and meet the requirements of the fluctuating market place. In order for this to occur, key component categories that need to be focused are related to people, the interaction of the people with the environment and the innovative work techniques used to arrive at the solution to a problem or business need quickly and comprehensively (indicated below).
C. Generally, as per the industry or problem domain and the nature of work, people usually adopt generally practiced and accepted techniques for solving problems. However, in order to respond quickly to change and to arrive at solutions for complex problems (which is generally the case for problems in the software product development domain), innovative work techniques are generally a pre-requisite to
resolve these complex problems. We may still obtain the solution using the same generally practiced and accepted repetitive techniques, but the response to obtain the specific solution may be slow. Additionally, there are still chances that the issue may recur again in the future, if it has not been solved thoroughly. Hence, only a combination of these factors will enable the team to deliver the product successfully in the market place and also facilitate them to respond quickly to any change that may happen in the market place.
D. The relevant component/dimensional categories may or may not be inter-dependent among each other. Further, within a component category, the components may or may not be inter-related. While responding to a stimulus from the environment, an agile team viewed as a CAS will give an emergent output that is more than the simple addition of all the constituent components within the component categories. This is generally the case when people operating as agents within the context of CAS respond or interact with the environment.
E. The people related factors component is made up of various people related factors. Behavioral factors include various traits of people like maturity, empathy, creativity and related factors. Apart from this, other key people related factors are – Leadership, Reward and Motivation and team member skills (diversity). The team is comprised of people who come together to work in a common place called the workplace. Hence, other key aspects under focus are - collaboration and communication, physical work environment and the impact of the organizational culture which are prevalent at the work place. Additionally, due to advances in technology, many teams are no longer able to be co-located due to space, cost and other constraints. A team comprising of multiple team members may be geographically distributed across the world. In these cases, an additional key factor – virtual work environment also has to be considered. However, in the background of agile SW teams, co-location of teams is preferred, wherever possible to maximize the benefits/outcomes for agile teams.
F. For teams to be viewed as high performing agile teams, the team members should also know and focus on innovative techniques to arrive at solutions for complex problems. This is a breakthrough and key component that facilitates an agile team to respond quickly to the changes that may need to be undertaken on account of the market factors. This will also enable the agile team to maximize their ability to respond quickly
and appropriately to the market place changes. The nomenclature of teams which are called as agile teams also implies that the teams exhibit agile behavior effectively by imbibing the agile values and agile principles effectively. These various component categories when they are combined together lead to the formation of a structure and framework within which we can identify, focus and channelize the key characteristics of agile teams appropriately to achieve successful work outcomes and project delivery. It is important to note that the outcomes should also meet the fitness for purpose requirement apart from other requirements.
In the context of this entire study, agile teams imply agile software development teams only (as agile teams can also be formed in other areas). Software development implies development, maintenance, research and development and related activities performed by the agile teams.
The above objectives lead to the formation of the research hypotheses and the hypothetical framework.
4.1.3 HYPOTHESES
In research studies, hypotheses are a technique to forecast or predict certain assumptions which can be further tested subsequently. This study aims at understanding the relationship of nine independent variables (IV) with the dependent variable (DV).
4.1.3.1 Hypothesis for Independent Variables
The earlier work undertaken in the area of SPM (Software Project Management), Agile Software Development Teams and Complex Adaptive Systems (CAS) has led to the formation of various hypotheses which have been worked out. (Appendix 1 and Appendix 2 give detailed information on the derivation of the hypothesis and the linkage to the literature). The attributes/characteristics of agile teams are measured through a common framework that identifies the key attributes and components of an agile team that lead to improved work outcomes leading to software project success and hence the effect of the three high level dimensional categories identified through literature review on the attributes of agile teams that lead to improved work outcomes and thereby software project success is attempted to be measured through the following hypotheses –
4.1.3.2 Focus on Agile SW Teams as CAS
The consideration of agile software development teams as CAS is an important focus area that changes the way we view agile teams when they are executing work to meet the customer requirements.
H9a - There is a significant and positive relationship between the improved work outcomes for the software project and the application, understanding and consideration of the agile team as a complex adaptive system (CAS).
Software project success can be viewed differently by different customers. Hence, improved work outcome measures which are generally linked to software project success in the context of agile SW teams are considered as an appropriate focus area. Thus, all the research objectives are converted to nine hypotheses which will be further tested empirically.
4.1.4 HYPOTHETICAL RESEARCH MODEL
![Diagram of Hypothetical Research Model]
- People Related Factors
- Selection of Team and Skills (H1)
- Behavioral Factors (H2)
- Leadership (H3)
- Reward and Motivation (H4)
- Interaction of the People with the Environment
- Organizational Culture (H5)
- Collaboration and Communication (H6)
- Virtual and Physical Work Environment (H7)
- Innovative Work Techniques for Problem Solving
- Disruptive Innovation (H8)
- Complex Adaptive System (CAS) (H9)
- Work Outcomes
- Conformance to Customer Requirements and Business Value Delivered
- Adherence to estimated time
- Adherence to estimated budget
Figure 4.3: Hypothetical Research Model
The hypothetical research model proposed in this research recommends to bridge the gaps discovered in the preceding work. The empirical testing of the proposed model has been elaborated in chapter five.
4.2 SAMPLING DESIGN
In most of the research studies, it may not be possible to consider the entire population for collecting the data in order to carry out the research. Thus, a sample form the population is taken for the study. The various methods of determining the sample and the sample size is labeled as the sample design (Zikmund, 2010).
4.2.1 POPULATION
All the IT companies operating and registered in India and all the branches in India of the Multi-National Corporations (MNC) working in the field of software development was the total population for this research.
Polit and Hungler (1999) define population as a cumulative or the total group of all the objects, members or subjects that meet a set of criteria.
4.2.2 SURVEY AREA
The research aims at identifying the key attributes and components of agile software development teams working for software projects for Indian IT companies/MNCs having branches in India and hence, the entire country was considered as the sample area for the study. All cities may not have an IT setup and hence, NASSCOM report has been taken as the basis for selecting the cities for the survey. This has been further elaborated in the population and sampling section. The Appendix section gives the map of the Indian Cities having major IT Hubs.
4.2.3 DESIGN OF THE RESEARCH STUDY
Design of the research acts as a blueprint for all the systematic steps that are taken during the research to obtain the answers in accordance with the goals of the study (Zikmund, 2010). There are broadly two approaches that can be considered - qualitative and quantitative. This research is based on a quantitative approach by formulating the hypotheses and then testing the hypotheses empirically. The research is explanatory and causal in nature since the area of study is focused on the identification of the key
attributes and components of the framework of an agile team which lead to improved work outcomes through the use of three dimensional categories.
4.2.4 COLLECTION OF DATA
The information and data are gathered through direct interactions with the respondents at various industrial locations and the questions measured the respondent’s agreement. A Likert scale having a seven point answer set was used as the collection mechanism in the questionnaire (Boone, Harry and Boone, Deborah, 2012). A Google form was designed to create web based questionnaire and emails were sent to various software members in organizations as per the sampling details.
4.2.5 SAMPLING AND SAMPLE SIZE
As part of the research and data gathering exercise, the sampling technique used is Simple Random sampling, which is a type of probability sampling. There is an equivalent chance or probability of each unit being selected from the population under study when the sample is under creation (when the simple random sample is under focus), since a simple random sample is an unbiased surveying technique.
When the cases are selected and included in the sample, there is a possibility that the samples may not be up to the mark due to human bias. However, this aspect is reduced when the simple random sampling technique is used. Thus, this technique gives us a sample that is greatly illustrative of the population under study. It is also assumed that there is limited data that is missing. This technique also helps us to make statistical inferences (i.e. generalizations) from the sample to the population. This is on account of the fact that probabilistic methods are used for identifying the units that will be included in the sample. This is also a key benefit as these generalities are more probable to be considered as having external validation. The administration of the sample is focused on -
a. Frame: IT organizations
b. Elements: Managers, Team Leads, Agile Coaches, Designers, Architects, Database administrators and software developers, Testers, Business Analysts, Product Owners, Unit Heads, ScrumMasters
The data was collected from cities in India termed as IT hubs by NASSCOM. As ninety percent of the software development work is concentrated in Delhi–NCR, Bangalore, Hyderabad, Pune, Kolkata, Mumbai, Coimbatore, Kochi, Thiruvananthapuram, Bhubaneswar, Chennai and Indore, these cities were considered as the prime target areas for the study. The respondents were from the organizations – Societe Generale GSC, TESCO, Target, General Electric, ABB, Honeywell, Tata Consultancy Services, IBM, Cognizant, Wipro, Infosys, Capgemini, HCL, Valtech India, Nokia, Tech Mahindra, L and T Infotech, Accenture, Mphasis and R1 RCM. The following paragraphs give brief details of these organizations --
1. Societe Generale GSC – It is a subsidiary of Societe Generale (SG), the European banking and financial services organization and it is 100% owned by SG. It came into being in 2000. It is founded as an ODC in Bangalore and it also has an office in Chennai. SG GSC has more than 15 years of expertise in sustainable delivery to its name. It has developed best practices globally to promote the strategic ideas of the group.
2. TESCO – It has got its headquarters in England (Hertfordshire, Welwyn Garden City), United Kingdom. TESCO PLC is one of the largest retailers in the world. It is a British multinational general merchandise and grocery retailer. Tesco Bengaluru, the services arm globally for Tesco worldwide provides important services related to business for global Tesco operations. The Tesco team in Bengaluru is currently taking part in creating and executing strategic initiatives focused on Commercial, Financial, IT and Property.
3. Target - Target Corporation (NYSE TGT) is a discount retailer serving the upscale market segment and who provides high-quality and trendy merchandise at prices that are reasonable in friendly and clean environments. It is the second-largest discount store retailer in the United States. It is behind Walmart. It forms a component of the S&P 500 Index. Currently, key functions related to business at the Target headquarters in Minneapolis are supported with team members in India. They provide additional knowledge and capacity. It started operations in Bangalore in 2005 and the technology unit supported the retail domain in the US.
4. General Electric - GE India Technology Center in Bangalore is focused on providing tech support in various industrial domains. General Electric (GE) is a US MNC and it is having its headquarters in Massachusetts (Boston), USA. Currently, the organization
has its presence in various market segments - Transportation, Renewable Energy, Aviation, Healthcare, Global Research, Oil and Gas, Lighting, Power and Capital which meets the needs of the Automotive, Engineering, Life Sciences, Financial Services, Pharmaceutical, Medical Devices and Software Development domains.
5. ABB - ABB is an MNC focused on the engineering domain. In Bangalore, it is focused on providing tech support in various industrial domains. ABB has got its headquarters in Switzerland (Zürich). It operates predominantly in the power, automation and robotics domain. It has operations in about hundred countries. It has about 0.13 million employees (December 2016).
6. Honeywell – They have five global centers of excellence and seven engineering and manufacturing centers focused on innovation and technology development in India. It is a $40 billion software-industrial company with about 130,000 employees across the world. They help to solve difficult issues focusing on productivity, energy, security and urbanization (global).
7. TCS- It is the one of the biggest IT organizations in Asia and India. It is one of the main organizations belonging to the Tata Group. The organization was established in 1978. It is spread across the globe in 47 countries.
8. IBM–They have branches and main centers in many of the key cities in India. IBM came to India in 1992. They are focused on the IT domain and other areas.
9. Cognizant - It is headquartered in Teaneck, New Jersey, United States. Cognizant is an American multinational corporation that provides consulting, technology, digital and operations services. Cognizant has branches in many of the IT hubs in the major cities of India – Mumbai, Chennai, Gurgaon, Kochi, Kolkata, Bangalore, Noida, Hyderabad, Coimbatore, Mangalore and Pune.
10. Wipro - The organization has branches in most of the IT Hubs in India. It is an IT organization focused on application development services, consulting services and outsourcing services.
11. Infosys - It is headquartered in Bengaluru, India. Infosys is an Indian multinational corporation that provides information technology, business consulting and outsourcing services. It is an IT organization focused on consulting services and technology
services. The organization has branches in most of the IT Hubs in India. They facilitate customers to enable them to build and deliver their digital transformation strategy.
12. Capgemini - Capgemini has 85,000+ people working in India. It has branches in the important cities - Gurgaon, Gandhinagar, Noida, Bangalore, Mumbai, Hyderabad, Tiruchirappalli, Chennai, Pune, Kolkata, and Salem. Capgemini India was established in 2001 with its first office in Mumbai.
13. HCL Technologies - It is headquartered in Noida, Uttar Pradesh. It has branches in Hyderabad, Gurgaon, Bangalore, Mumbai, Chennai, and Kolkata. It was started in 1991.
14. Valtech India - Valtech is a global digital agency that provides digital and advisory services across the entire value chain with a mission to challenge the OTT-business beyond merely technology. They have offices in Bangalore, Gurugram, many cities in Europe and Singapore.
15. Nokia - Nokia development is an innovative leader in technology and has software development center in Noida and Bangalore in India.
16. Tech Mahindra - It is the fifth largest software exporter in India. It has seen good growth since its beginning in 1986. The Mahindra Group and the BT Group plc, UK established a joint venture to execute IT services and related activities.
17. L&T Infotech (LTI) - It was established in 1997. L&T Infotech is known for its Business to IT connect solutions. It has offices in Mumbai, Pune, Bangalore and Chennai. Larsen & Toubro Infotech (LTI) is a subsidiary of Larsen & Toubro. It is a global IT solutions & services organization headquartered in Mumbai, India.
18. Accenture - It is headquartered in Mumbai, India. Its branches are in eight cities - Chennai, Bangalore, Hyderabad, Pune, Kolkata, Noida, Gurgaon & Delhi. Accenture India operates as a management consulting, outsourcing and technology services organization. The company was incorporated in 1991.
19. Mphasis - It is an IT services organization headquartered in Bangalore, India. It is spread across 14 countries in the world. Mphasis was founded in India in 2000. They focus on providing IT services, outsourcing services and other services in the related areas. It has offices in the major cities in India.
20. R1 RCM – The head office is in Chicago, Illinois, USA. They have offices in Gurgaon and Noida in India. R1 RCM is one of the United States' largest hospital revenue cycle management organizations. It sells finance related services to the healthcare industry. The organization provides end-to-end revenue cycle management solutions through shared service operations, operational processes and technology solutions.
**Sample Size Calculation**
The sample size is based on the confidence interval (Naing & Rusli, 2006). The study is based on 95% confidence interval and the sample size is calculated as given below -
\[ n = Z^2 \times R \times (1 - R) / d^2 \]
Where \( n \) = sample size,
\( Z \) = Z statistic for a level of confidence,
\( R \) = expected prevalence or proportion
(In proportion of one; if 20%, \( R = 0.2 \)), and
\( d \) = precision
(In proportion of one; if 5%, \( d = 0.05 \)).
\( Z \) statistic (\( Z \)): For the level of confidence of 95%, which is conventional, \( Z \) value is 1.96.
Figure 4.4: Sample Size Calculation (Formula)
1. **Calculation of Sample Size:**
Sample Size = \( A / B \)
\( A \) = (Distribution of 50%)
\( B \) = ((Margin of Error% / Confidence Level Score) squared)
2. **Correction of Finite Population:** True Sample = \( C / D \)
\( C \) = (Sample Size \times Population)
\( D \) = (Sample Size + Population – 1)
Figure 4.5: Sample Size Calculation (Finite Population Correction)
Confidence level score and the distribution details and the explanation for these details are given below –
The confidence level score is the confidence level indicate along with the standard deviation details. When the confidence level is 95%, the confidence level score is taken as 1.96. Similarly, Distribution indicates how the respondents on a topic are skewed. It is appropriate to work out the details at a 50% distribution level. This is taken as a conservative distribution level.
\[ n = \frac{(1.96)^2 \times 0.5 \times 0.5}{(0.05)^2} \]
Or
\[ \text{Necessary Sample Size} = (Z\text{-score})^2 \times \text{StdDev}^2 \times (1-\text{StdDev}) \times (\text{margin of error})^2 \]
@ 95% confidence level, 0.5 standard deviation and a margin of error (confidence interval) of +/- 5%.
The calculation is worked out as given below -
\[ = \frac{((1.96)^2 \times 0.5 \times 0.5)}{(0.05)^2} \times \frac{(3.8416 \times 0.25)}{0.0025} \]
\[ = 0.9604 / 0.0025 \]
\[ = 384.16 \]
~ 385 respondents are needed approximately
Thus, it is evident that the sample size for a research study based on 95% confidence interval has to be around three hundred and eighty five.
For this study, the sample size covered is 400.
4.2.6 INSTRUMENT DESIGN AND DATA COLLECTION
The extensive literature review provided the three dimensions affecting the framework identifying the key characteristics of agile team leading to improved work outcomes. The questionnaire is created on the foundation of the definition of these dimensions in order to conduct the survey and collect the data. Following model describes in detail the measurement of the data –
<table>
<thead>
<tr>
<th>Dimensions</th>
<th>Variables</th>
<th>Citations</th>
</tr>
</thead>
<tbody>
<tr>
<td>Agile</td>
<td>Selection of Team and Skills</td>
<td>McGeachy, Robert (2010)</td>
</tr>
<tr>
<td>Software</td>
<td></td>
<td>Strode, Diane (2015)</td>
</tr>
<tr>
<td>Development</td>
<td>Behavioral Factors</td>
<td>Lalsing, Kishnah and Pudaruth (2012)</td>
</tr>
<tr>
<td>Team</td>
<td></td>
<td>McGeachy, Robert (2010)</td>
</tr>
<tr>
<td>Performance</td>
<td></td>
<td>Moe, Nils Brede, Dingsøyr, Torgeir & Dybå, Tore (2009)</td>
</tr>
<tr>
<td>Improved</td>
<td>People Related Factors</td>
<td>Xu, Peng & Shen, Yide (2015)</td>
</tr>
<tr>
<td>Outcomes</td>
<td></td>
<td>Whitworth (2006)</td>
</tr>
<tr>
<td>(Software</td>
<td>Organizational Culture</td>
<td>Zannier and Maurer (2007)</td>
</tr>
<tr>
<td>Project</td>
<td></td>
<td>Zannier and others (2006)</td>
</tr>
<tr>
<td>Success)</td>
<td></td>
<td>Zannier, Chiasson and Maurer (2007)</td>
</tr>
<tr>
<td></td>
<td>Collaboration and Communication</td>
<td>Whitworth (2006)</td>
</tr>
<tr>
<td></td>
<td></td>
<td>Tselikovska, Ganna (2013)</td>
</tr>
<tr>
<td></td>
<td>Interaction of the People with the Environment</td>
<td>Mishra, Deepti; Mishra, Alok and Ostrovska, Sofiya (2012)</td>
</tr>
<tr>
<td></td>
<td>Virtual and Physical Work Environment</td>
<td>Dwivedi, Shubhra (2015)</td>
</tr>
<tr>
<td></td>
<td></td>
<td>Ashmore, Sondra (2012)</td>
</tr>
<tr>
<td></td>
<td>Innovative Work Techniques for Problem</td>
<td>McCandless Keith & Lipmanowicz, Henri (2014)</td>
</tr>
<tr>
<td>Solving</td>
<td>Disruptive Innovation</td>
<td>Wördenweber, Burkard & Weissflog, Uwe (2006)</td>
</tr>
<tr>
<td></td>
<td></td>
<td>Jain, Radhika and Meso, Peter (2004)</td>
</tr>
</tbody>
</table>
Table 4.1: Research Model
The three dimensions and the corresponding measurement through nine variables described in the above table are -
a. People Related Factors
b. Interaction of the People with the Environment
c. Innovative Work Techniques for Problem Solving
These dimensions were identified through extensive literature review and the above table shows the corresponding measurement of these dimensions with the appropriate reference in the literature. The identification of the key characteristics of an agile SW team in the form of a framework that will enable the team to exhibit optimal and high performance leading to improved work outcomes is measured through the success of the software project. A survey questionnaire is developed on the basis of this table and was revised five times with the inputs of academic and industry experts. The questionnaire is given in Appendix 2.
The three dimensions evaluated through nine items are measured through the seven point Likert scale. The demographical data is captured through nominal data in the form of multiple choice questions or open ended questions.
Figure 4.6: Agile Software Development Team Performance
4.2.7 ANALYSIS OF DATA
Research analysis of the data in this study was done through multiple regression by using the tool - SPSS, version 21. The interpretation was done on the basis of the result generated in the form of various tables and graphs after running multiple regressions.
4.2.8 STATISTICAL PROCEDURE
In this research study, multiple regression was done using SPSS to identify the impact of the independent variables (key attributes and components of the framework of the characteristics of an agile SW team) on the dependent variables (performance of the agile software development team leading to improved work outcomes and thereby successful project delivery).
4.2.9 ETHICAL CONSIDERATIONS
Any research contributes a lot to the existing corporate/industry body of knowledge and hence, it is imperative that an ethical approach needs to be followed while conducting the research. Confidentiality of data shared by the respondents is essential and hence, the questionnaire contained the statement regarding this point (Refer Appendix 2).
|
{"Source-Url": "http://shodhganga.inflibnet.ac.in:8080/jspui/bitstream/10603/209452/11/11_chapter%204.pdf", "len_cl100k_base": 6286, "olmocr-version": "0.1.53", "pdf-total-pages": 18, "total-fallback-pages": 0, "total-input-tokens": 37900, "total-output-tokens": 6999, "length": "2e12", "weborganizer": {"__label__adult": 0.0008382797241210938, "__label__art_design": 0.0010862350463867188, "__label__crime_law": 0.001216888427734375, "__label__education_jobs": 0.105712890625, "__label__entertainment": 0.0001571178436279297, "__label__fashion_beauty": 0.0004367828369140625, "__label__finance_business": 0.021484375, "__label__food_dining": 0.0007801055908203125, "__label__games": 0.0013523101806640625, "__label__hardware": 0.0010080337524414062, "__label__health": 0.0016336441040039062, "__label__history": 0.0009474754333496094, "__label__home_hobbies": 0.0005087852478027344, "__label__industrial": 0.001247406005859375, "__label__literature": 0.001373291015625, "__label__politics": 0.000690460205078125, "__label__religion": 0.00075531005859375, "__label__science_tech": 0.039886474609375, "__label__social_life": 0.0006241798400878906, "__label__software": 0.01447296142578125, "__label__software_dev": 0.80126953125, "__label__sports_fitness": 0.0007305145263671875, "__label__transportation": 0.0012073516845703125, "__label__travel": 0.0005922317504882812}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 30667, 0.03514]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 30667, 0.23965]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 30667, 0.93523]], "google_gemma-3-12b-it_contains_pii": [[0, 36, false], [36, 2313, null], [2313, 4385, null], [4385, 4790, null], [4790, 7362, null], [7362, 9544, null], [9544, 10356, null], [10356, 11062, null], [11062, 13116, null], [13116, 15218, null], [15218, 17756, null], [17756, 20021, null], [20021, 22259, null], [22259, 23642, null], [23642, 24934, null], [24934, 28574, null], [28574, 29613, null], [29613, 30667, null]], "google_gemma-3-12b-it_is_public_document": [[0, 36, true], [36, 2313, null], [2313, 4385, null], [4385, 4790, null], [4790, 7362, null], [7362, 9544, null], [9544, 10356, null], [10356, 11062, null], [11062, 13116, null], [13116, 15218, null], [15218, 17756, null], [17756, 20021, null], [20021, 22259, null], [22259, 23642, null], [23642, 24934, null], [24934, 28574, null], [28574, 29613, null], [29613, 30667, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 30667, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 30667, null]], "pdf_page_numbers": [[0, 36, 1], [36, 2313, 2], [2313, 4385, 3], [4385, 4790, 4], [4790, 7362, 5], [7362, 9544, 6], [9544, 10356, 7], [10356, 11062, 8], [11062, 13116, 9], [13116, 15218, 10], [15218, 17756, 11], [17756, 20021, 12], [20021, 22259, 13], [22259, 23642, 14], [23642, 24934, 15], [24934, 28574, 16], [28574, 29613, 17], [29613, 30667, 18]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 30667, 0.13836]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
8b4e03405d29a18463264899b5114fc96350ac5f
|
LiveCode 8.1.9-rc-2 Release Notes
- Overview
- Known issues
- Platform support
- Windows
- Linux
- Mac
- iOS
- Android
- HTML5
- Setup
- Installation
- Uninstallation
- Reporting installer issues
- Activating LiveCode Indy or Business edition
- Command-line installation
- Command-line uninstallation
- Command-line activation for LiveCode Indy or Business edition
- Engine changes
- Throw error when changing behavior from behavior script (8.1.9-rc-1)
- Specific engine bug fixes (8.1.9-rc-2)
- Specific engine bug fixes (8.1.9-rc-1)
- IDE changes
- Specific IDE bug fixes (8.1.9-rc-1)
- Previous release notes
Overview
LiveCode 8.1 provides important improvements for delivering high-quality cross-platform applications!
- LiveCode Indy and Business editions now come with the tsNet external, which supercharges LiveCode's Internet features and performance. LiveCode 8.1 also introduces mergHealthKit, for accessing activity, sport and health data on iOS devices.
- The standalone builder now has a greatly-improved user experience for including externals, script libraries and LiveCode Builder extensions in your cross-platform application. Usually, it'll now do the right thing automatically, but you can still select the specific inclusions you need.
- The IDE has lots of other upgrades, too: a keyboard-navigable Project Browser that
highlights any scripts that failed to compile, an improved dictionary user interface, and access to the message box just by starting to type.
- The player control can be used in Windows application without any need for users to install any additional libraries or dependencies, thanks to a brand new player implementation based on DirectShow. For most apps, it should now be unnecessary to install or use QuickTime at all.
- The LiveCode Builder programming language has had some enhancements as part of the Infinite LiveCode project. Variables now get initialised by default, unsafe blocks and handlers can be used to flag sections of code that do dangerous things, and you can even include raw bytecode if necessary.
### Known issues
- The installer will currently fail if you run it from a network share on Windows. Please copy the installer to a local disk before launching on this platform.
- The browser widget does not work on 32-bit Linux.
- 64-bit standalones for Mac OS X do not have support for audio recording.
### Platform support
The engine supports a variety of operating systems and versions. This section describes the platforms that we ensure the engine runs on without issue (although in some cases with reduced functionality).
#### Windows
LiveCode supports the following versions of Windows:
- Windows XP SP2 and above
- Windows Server 2003
- Windows Vista SP1 and above (both 32-bit and 64-bit)
- Windows 7 (both 32-bit and 64-bit)
- Windows Server 2008
- Windows 8.x (Desktop)
- Windows 10
**Note:** On 64-bit Windows installations, LiveCode runs as a 32-bit application through the WoW layer.
Linux
LiveCode supports the following Linux distributions, on 32-bit or 64-bit Intel/AMD or compatible processors:
- Ubuntu 14.04 and 16.04
- Fedora 23 & 24
- Debian 7 (Wheezy) and 8 (Jessie) [server]
- CentOS 7 [server]
LiveCode may also run on Linux installations which meet the following requirements:
- Required dependencies for core functionality:
- glibc 2.13 or later
- glib 2.0 or later
- Optional requirements for GUI functionality:
- GTK/GDK 2.24 or later
- Pango with Xft support
- esd (optional, needed for audio output)
- mplayer (optional, needed for media player functionality)
- Icms (optional, required for color profile support in images)
- gksu (optional, required for privilege elevation support)
Note: If the optional requirements are not present then LiveCode will still run but the specified features will be disabled.
Note: The requirements for GUI functionality are also required by Firefox and Chrome, so if your Linux distribution runs one of those, it will run LiveCode.
Note: It may be possible to compile and run LiveCode Community for Linux on other architectures but this is not officially supported.
Mac
The Mac engine supports:
- 10.6.x (Snow Leopard) on Intel
- 10.7.x (Lion) on Intel
- 10.8.x (Mountain Lion) on Intel
- 10.9.x (Mavericks) on Intel
- 10.10.x (Yosemite) on Intel
- 10.11.x (El Capitan) on Intel
- 10.12.x (Sierra) on Intel
- 10.13.x (High Sierra) on Intel
iOS
iOS deployment is possible when running LiveCode IDE on a Mac, and provided Xcode is installed and has been set in LiveCode Preferences (in the Mobile Support pane).
Currently, the supported versions of Xcode are:
- Xcode 4.6 on MacOS X 10.7
- Xcode 5.1 on MacOS X 10.8
- Xcode 6.2 on MacOS X 10.9
- Xcode 6.2 and 7.2 on Mac OS X 10.10
- Xcode 8.2 on MacOS X 10.11
- Xcode 9.2 on MacOS 10.12 (Note: You need to upgrade to 10.12.6)
- Xcode 9.2 on MacOS 10.13
It is also possible to set other versions of Xcode, to allow testing on a wider range of iOS simulators. For instance, on OS X 10.10 (Yosemite), you can add Xcode 5.1 in the Mobile Support preferences, to let you test your stack on the iOS Simulator 7.1.
We currently support deployment for the following versions of iOS:
- 6.1 [simulator]
- 7.1 [simulator]
- 8.2 [simulator]
- 9.2
- 10.2
- 11.2
Android
LiveCode allows you to save your stack as an Android application, and also to deploy it on an Android device or simulator from the IDE.
Android deployment is possible from Windows, Linux and Mac OSX.
The Android engine supports devices using ARMv6, ARMv7 or ARMv8 processors. It will run on the following versions of Android:
- 2.3.3-2.3.7 (Gingerbread)
- 4.0 (Ice Cream Sandwich)
- 4.1-4.3 (Jelly Bean)
- 4.4 (KitKat)
- 5.0-5.1 (Lollipop)
- 6.0 (Marshmallow)
- 7.0 (Nougat)
- 8.0 (Oreo)
To enable deployment to Android devices, you need to download the Android SDK, and then use the 'Android SDK Manager' to install:
- the latest "Android SDK Tools"
- the latest "Android SDK Platform Tools"
You also need to install the Java Development Kit (JDK). On Linux, this usually packaged as "openjdk". LiveCode requires JDK version 1.6 or later.
Once you have set the path of your Android SDK in the "Mobile Support" section of the LiveCode IDE’s preferences, you can deploy your stack to Android devices.
Some users have reported successful Android Watch deployment, but it is not officially supported.
**HTML5**
LiveCode applications can be deployed to run in a web browser, by running the LiveCode engine in JavaScript and using modern HTML5 JavaScript APIs.
HTML5 deployment does not require any additional development tools to be installed.
LiveCode HTML5 standalone applications are currently supported for running in recent versions of Mozilla Firefox, Google Chrome or Safari. For more information, please see the "HTML5 Deployment" guide in the LiveCode IDE.
### Setup
### Installation
Each version of LiveCode installs can be installed to its own, separate folder. This allow multiple versions of LiveCode to be installed side-by-side. On Windows (and Linux), each version of LiveCode has its own Start Menu (or application menu) entry. On Mac OS X, each version has its own app bundle.
On Mac OS X, install LiveCode by mounting the `.dmg` file and dragging the app bundle to the Applications folder (or any other suitable location).
For Windows and Linux, the default installation locations when installing for "All Users" are:
<table>
<thead>
<tr>
<th>Platform</th>
<th>Path</th>
</tr>
</thead>
<tbody>
<tr>
<td>Windows</td>
<td><code><x86 program files folder>/RunRev/LiveCode <version></code></td>
</tr>
<tr>
<td>Linux</td>
<td><code>/opt/livecode/livecode-<version></code></td>
</tr>
</tbody>
</table>
The installations when installing for "This User" are:
<table>
<thead>
<tr>
<th>Platform</th>
<th>Path</th>
</tr>
</thead>
<tbody>
<tr>
<td>Windows</td>
<td><code><user roaming app data folder>/RunRev/Components/LiveCode <version></code></td>
</tr>
<tr>
<td>Linux</td>
<td><code>~/.runrev/components/livecode-<version></code></td>
</tr>
</tbody>
</table>
**Note:** If installing for "All Users" on Linux, either the `gksu` tool must be available, or you must manually run the LiveCode installer executable as root (e.g. using `sudo` or `su`).
### Uninstallation
On Windows, the installer hooks into the standard Windows uninstall mechanism. This is accessible from the "Add or Remove Programs" applet in the windows Control Panel.
On Mac OS X, drag the app bundle to the Trash.
On Linux, LiveCode can be removed using the `setup.x86` or `setup.x86_64` program located in LiveCode's installation directory.
**Reporting installer issues**
If you find that the installer fails to work for you then please report it using the [LiveCode Quality Control Centre](mailto:report@livecode.com) or by emailing support@livecode.com.
Please include the following information in your report:
- Your platform and operating system version
- The location of your home or user folder
- The type of user account you are using (guest, restricted, admin etc.)
- The installer log file.
The installer log file can be located as follows:
<table>
<thead>
<tr>
<th>Platform</th>
<th>Path</th>
</tr>
</thead>
<tbody>
<tr>
<td>Windows 2000/XP</td>
<td><code><documents and settings folder>/<user>/Local Settings/</code></td>
</tr>
<tr>
<td>Windows Vista/7</td>
<td><code><users folder>/<user>/AppData/Local/RunRev/Logs</code></td>
</tr>
<tr>
<td>Linux</td>
<td><code><home>/.runrev/logs</code></td>
</tr>
</tbody>
</table>
**Activating LiveCode Indy or Business edition**
The licensing system ties your product licenses to a customer account system, meaning that you no longer have to worry about finding a license key after installing a new copy of LiveCode. Instead, you simply have to enter your email address and password that has been registered with our customer account system and your license key will be retrieved automatically.
Alternatively it is possible to activate the product via the use of a specially encrypted license file. These will be available for download from the customer center after logging into your account. This method will allow the product to be installed on machines that do not have access to the internet.
**Command-line installation**
It is possible to invoke the installer from the command-line on Linux and Windows. When doing command-line installation, no GUI will be displayed. The installation process is controlled by arguments passed to the installer.
Run the installer using a command in the form:
```bash
<installer> install noui [OPTION ...]
```
where `<installer>` should be replaced with the path of the installer executable or app (inside the DMG) that has been downloaded. The result of the installation operation will be written to the
console.
The installer understands any of the following OPTIONS:
<table>
<thead>
<tr>
<th>Option</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>-allusers</td>
<td>Install the IDE for "All Users". If not specified, LiveCode will be installed for the current user only.</td>
</tr>
<tr>
<td>-desktopshortcut</td>
<td>Place a shortcut on the Desktop (Windows-only)</td>
</tr>
<tr>
<td>-startmenu</td>
<td>Place shortcuts in the Start Menu (Windows-only)</td>
</tr>
<tr>
<td>-location LOCATION</td>
<td>The folder to install into. If not specified, the LOCATION defaults to those described in the "Installation" section above.</td>
</tr>
<tr>
<td>-log LOGFILE</td>
<td>The file to which to log installation actions. If not specified, no log is generated.</td>
</tr>
</tbody>
</table>
**Note:** the command-line installer does not do any authentication. When installing for "All Users", you will need to run the installer command as an administrator.
As the installer is actually a GUI application, it needs to be run slightly differently from other command-line programs.
On Windows, the command is:
```
start /wait <installer> install noui [OPTION ...]
```
**Command-line uninstallation**
It is possible to uninstall LiveCode from the command-line on Windows and Linux. When doing command-line uninstallation, no GUI will be displayed.
Run the uninstaller using a command of the form:
```
<uninstaller> uninstall noui
```
Where is .setup.exe on Windows, and .setup.x86 on Linux. This executable, for both of the platforms, is located in the folder where LiveCode is installed.
The result of the uninstallation operation will be written to the console.
**Note:** the command-line uninstaller does not do any authentication. When removing a version of LiveCode installed for "All Users", you will need to run the uninstaller command as an administrator.
**Command-line activation for LiveCode Indy or Business edition**
It is possible to activate an installation of LiveCode for all users by using the command-line. When performing command-line activation, no GUI is displayed. Activation is controlled by passing
command-line arguments to LiveCode.
Activate LiveCode using a command of the form:
```
<livecode> activate -file LICENSEFILE -passphrase SECRET
```
where `<livecode>` should be replaced with the path to the LiveCode executable or app that has been previously installed.
This loads license information from the manual activation file `LICENSEFILE`, decrypts it using the given `SECRET` passphrase, and installs a license file for all users of the computer. Manual activation files can be downloaded from the My Products page in the LiveCode account management site.
It is also possible to deactivate LiveCode with:
```
<livecode> deactivate
```
Since LiveCode is actually a GUI application, it needs to be run slightly differently from other command-line programs.
On Windows, the command is:
```
start /wait <livecode> activate -file LICENSE -passphrase SECRET
start /wait <livecode> deactivate
```
On Mac OS X, you need to do:
```
<livecode>/Contents/MacOS/LiveCode activate -file LICENSE -passphrase SECRET
<livecode>/Contents/MacOS/LiveCode deactivate
```
---
**Engine changes**
**Throw error when changing behavior from behavior script (8.1.9-rc-1)**
Previously it was theoretically possible to change the behavior of an object from that object's existing behavior script. This will now result in an execution error:
```
parentScript: can't change parent while parent script is executing
```
This change was necessarily as the engine would occasionally crash when changing a behavior this way, and would be guaranteed to crash if stepping over the behavior script line that changes the behavior.
Specific engine bug fixes (8.1.9-rc-2)
20582 Ensure the iOS device plist has correct values for the version of Xcode and SDKs used to build the standalone
20884 Fixed AppStore submission error when minimum deployment target is iOS 11.0 or more
Specific engine bug fixes (8.1.9-rc-1)
11039 Throw error when changing behavior from behavior script
18243 Ensure horizontal two finger scrolling on Linux respects the system settings
20256 Ensure iOS picker subview width scales correctly
20419 Fix accelRender issues on Android
20627 Set default timeout in tsNet to prevent app hangs when Internet connection drops
20633 Ensure vtab doesn't interfere with styling
20641 Added support for splash screens and icon for iPhone X
20642 Fix crash when undoing a group deletion
20742 Add tsNet builds for iOS SDK 11.2
20755 Fix crash when calling iPhoneSetRemoteControlDisplay
20758 Fix performance regression in replaceText
20760 Fixed documentation for tsNetSetTimeouts to specify correct units for all parameters
20763 Fix crash when deleting datagrid then undoing
IDE changes
Specific IDE bug fixes (8.1.9-rc-1)
17819 Enable cmd+c in dictionary
20713 Enable lock/unlock text from popUp for fields
Previous release notes
- LiveCode 8.1.8 Release Notes
- LiveCode 8.1.7 Release Notes
LiveCode 8.1.6 Release Notes
LiveCode 8.1.5 Release Notes
LiveCode 8.1.4 Release Notes
LiveCode 8.1.3 Release Notes
LiveCode 8.1.2 Release Notes
LiveCode 8.1.1 Release Notes
LiveCode 8.1.0 Release Notes
LiveCode 8.0.2 Release Notes
LiveCode 8.0.1 Release Notes
LiveCode 8.0.0 Release Notes
LiveCode 7.1.4 Release Notes
LiveCode 7.1.3 Release Notes
LiveCode 7.1.2 Release Notes
LiveCode 7.1.1 Release Notes
LiveCode 7.1.0 Release Notes
LiveCode 7.0.6 Release Notes
LiveCode 7.0.4 Release Notes
LiveCode 7.0.3 Release Notes
LiveCode 7.0.1 Release Notes
LiveCode 7.0.0 Release Notes
LiveCode 6.7.9 Release Notes
LiveCode 6.7.8 Release Notes
LiveCode 6.7.7 Release Notes
LiveCode 6.7.6 Release Notes
LiveCode 6.7.4 Release Notes
LiveCode 6.7.2 Release Notes
LiveCode 6.7.11 Release Notes
LiveCode 6.7.10 Release Notes
LiveCode 6.7.1 Release Notes
LiveCode 6.7.0 Release Notes
LiveCode 6.6.2 Release Notes
LiveCode 6.6.1 Release Notes
LiveCode 6.6.0 Release Notes
LiveCode 6.5.2 Release Notes
LiveCode 6.5.1 Release Notes
LiveCode 6.5.0 Release Notes
LiveCode 6.1.3 Release Notes
LiveCode 6.1.2 Release Notes
LiveCode 6.1.1 Release Notes
LiveCode 6.1.0 Release Notes
LiveCode 6.0.2 Release Notes
LiveCode 6.0.1 Release Notes
LiveCode 6.0.0 Release Notes
|
{"Source-Url": "https://downloads.livecode.com/livecode/8_1_9/LiveCodeNotes-8_1_9_rc_2.pdf", "len_cl100k_base": 4384, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 19249, "total-output-tokens": 4752, "length": "2e12", "weborganizer": {"__label__adult": 0.0003528594970703125, "__label__art_design": 0.00025010108947753906, "__label__crime_law": 0.0001653432846069336, "__label__education_jobs": 0.0001786947250366211, "__label__entertainment": 9.810924530029296e-05, "__label__fashion_beauty": 0.00012230873107910156, "__label__finance_business": 0.00018274784088134768, "__label__food_dining": 0.00018465518951416016, "__label__games": 0.0013408660888671875, "__label__hardware": 0.0009617805480957032, "__label__health": 0.00010311603546142578, "__label__history": 7.927417755126953e-05, "__label__home_hobbies": 4.9173831939697266e-05, "__label__industrial": 0.00015735626220703125, "__label__literature": 0.00010520219802856444, "__label__politics": 8.863210678100586e-05, "__label__religion": 0.0002505779266357422, "__label__science_tech": 0.0007190704345703125, "__label__social_life": 5.429983139038086e-05, "__label__software": 0.06219482421875, "__label__software_dev": 0.93212890625, "__label__sports_fitness": 0.00018656253814697263, "__label__transportation": 0.00014078617095947266, "__label__travel": 0.00012159347534179688}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 16764, 0.05365]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 16764, 0.12621]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 16764, 0.81116]], "google_gemma-3-12b-it_contains_pii": [[0, 1381, false], [1381, 3007, null], [3007, 4613, null], [4613, 6322, null], [6322, 8243, null], [8243, 10638, null], [10638, 12617, null], [12617, 14029, null], [14029, 15516, null], [15516, 16764, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1381, true], [1381, 3007, null], [3007, 4613, null], [4613, 6322, null], [6322, 8243, null], [8243, 10638, null], [10638, 12617, null], [12617, 14029, null], [14029, 15516, null], [15516, 16764, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 16764, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 16764, null]], "pdf_page_numbers": [[0, 1381, 1], [1381, 3007, 2], [3007, 4613, 3], [4613, 6322, 4], [6322, 8243, 5], [8243, 10638, 6], [10638, 12617, 7], [12617, 14029, 8], [14029, 15516, 9], [15516, 16764, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 16764, 0.07042]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
a1bf674ac30be15b4fb2c8f079662209821cc282
|
[REMOVED]
|
{"Source-Url": "https://web.engr.oregonstate.edu/~mjb/cs575/Handouts/mpi.6pp.pdf", "len_cl100k_base": 5006, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 32552, "total-output-tokens": 5765, "length": "2e12", "weborganizer": {"__label__adult": 0.00033545494079589844, "__label__art_design": 0.00034308433532714844, "__label__crime_law": 0.00027108192443847656, "__label__education_jobs": 0.0007562637329101562, "__label__entertainment": 0.0001188516616821289, "__label__fashion_beauty": 0.00015878677368164062, "__label__finance_business": 0.0002083778381347656, "__label__food_dining": 0.000400543212890625, "__label__games": 0.0009245872497558594, "__label__hardware": 0.00498199462890625, "__label__health": 0.0005006790161132812, "__label__history": 0.0003161430358886719, "__label__home_hobbies": 0.000148773193359375, "__label__industrial": 0.0008101463317871094, "__label__literature": 0.00020933151245117188, "__label__politics": 0.0002193450927734375, "__label__religion": 0.0005807876586914062, "__label__science_tech": 0.1407470703125, "__label__social_life": 0.00011140108108520508, "__label__software": 0.01415252685546875, "__label__software_dev": 0.83251953125, "__label__sports_fitness": 0.0003943443298339844, "__label__transportation": 0.0006136894226074219, "__label__travel": 0.0002315044403076172}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 18708, 0.01331]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 18708, 0.50071]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 18708, 0.74734]], "google_gemma-3-12b-it_contains_pii": [[0, 1947, false], [1947, 4546, null], [4546, 7412, null], [7412, 8762, null], [8762, 10310, null], [10310, 13118, null], [13118, 13980, null], [13980, 16389, null], [16389, 18599, null], [18599, 18708, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1947, true], [1947, 4546, null], [4546, 7412, null], [7412, 8762, null], [8762, 10310, null], [10310, 13118, null], [13118, 13980, null], [13980, 16389, null], [16389, 18599, null], [18599, 18708, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 18708, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 18708, null]], "pdf_page_numbers": [[0, 1947, 1], [1947, 4546, 2], [4546, 7412, 3], [7412, 8762, 4], [8762, 10310, 5], [10310, 13118, 6], [13118, 13980, 7], [13980, 16389, 8], [16389, 18599, 9], [18599, 18708, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 18708, 0.02757]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
e8d2703d60501f12285418530352b6feb7862dc3
|
[REMOVED]
|
{"Source-Url": "https://hal.science/hal-00199198v1/document", "len_cl100k_base": 7155, "olmocr-version": "0.1.50", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 39624, "total-output-tokens": 9190, "length": "2e12", "weborganizer": {"__label__adult": 0.0004045963287353515, "__label__art_design": 0.0006303787231445312, "__label__crime_law": 0.0005564689636230469, "__label__education_jobs": 0.0014848709106445312, "__label__entertainment": 0.00010776519775390624, "__label__fashion_beauty": 0.0002294778823852539, "__label__finance_business": 0.00037169456481933594, "__label__food_dining": 0.000507354736328125, "__label__games": 0.0008440017700195312, "__label__hardware": 0.001544952392578125, "__label__health": 0.0011854171752929688, "__label__history": 0.0004911422729492188, "__label__home_hobbies": 0.0002551078796386719, "__label__industrial": 0.00098419189453125, "__label__literature": 0.00041961669921875, "__label__politics": 0.0003924369812011719, "__label__religion": 0.000759124755859375, "__label__science_tech": 0.298828125, "__label__social_life": 0.00019550323486328125, "__label__software": 0.00968170166015625, "__label__software_dev": 0.6787109375, "__label__sports_fitness": 0.0003986358642578125, "__label__transportation": 0.000957012176513672, "__label__travel": 0.0002658367156982422}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28044, 0.05141]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28044, 0.51315]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28044, 0.82645]], "google_gemma-3-12b-it_contains_pii": [[0, 1011, false], [1011, 3556, null], [3556, 6610, null], [6610, 8794, null], [8794, 11992, null], [11992, 14742, null], [14742, 16548, null], [16548, 19029, null], [19029, 21390, null], [21390, 23629, null], [23629, 26661, null], [26661, 28044, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1011, true], [1011, 3556, null], [3556, 6610, null], [6610, 8794, null], [8794, 11992, null], [11992, 14742, null], [14742, 16548, null], [16548, 19029, null], [19029, 21390, null], [21390, 23629, null], [23629, 26661, null], [26661, 28044, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28044, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28044, null]], "pdf_page_numbers": [[0, 1011, 1], [1011, 3556, 2], [3556, 6610, 3], [6610, 8794, 4], [8794, 11992, 5], [11992, 14742, 6], [14742, 16548, 7], [16548, 19029, 8], [19029, 21390, 9], [21390, 23629, 10], [23629, 26661, 11], [26661, 28044, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28044, 0.14904]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
5e2243a15ce3c864506963f0c5aa7a926c305e5a
|
Domain Specific Modelling for Clinical Research
Jim Davies, Jeremy Gibbons, Adam Milward, David Milward, Seyyed Shah, Monika Solanki, and James Welch
Department of Computer Science, University of Oxford, UK
firstname.lastname@cs.ox.ac.uk
Abstract
The value of integrated data relies upon common data points having an accessible, consistent interpretation; to achieve this at scale requires appropriate informatics support. This paper explains how a model-driven approach to software engineering and data management, in which software artefacts are generated automatically from data models, and models are used as metadata, can achieve this. It introduces a simple data modelling language, consistent with standard object modelling notations, together with a set of tools for model creation, maintenance, and deployment. It reports upon the application of this approach in the provision of informatics support for two large-scale clinical research initiatives.
Categories and Subject Descriptors D.2.12 [Interoperability]: D.3.3 Programming Languages: Specialized application languages
Keywords domain-specific modeling languages, interoperability, mapping, code generation, model transformation, meta-modeling
1. Introduction
To obtain the evidence required to support the development and introduction of a new treatment, or a new diagnostic tool, we need to consider the results of detailed, clinical observations of a large number of individuals. These observations need to be made, and the results recorded, in a consistent fashion.
The usual way of achieving this is through prior agreement upon a study protocol: a detailed specification of the information required, and an account of the proposed analysis. Clinical staff receive training and support to ensure that data collection proceeds according to the protocol, and data is recorded using a single set of ‘case report forms’.
There are two problems with this. The first is the cost of manual data collection, of additional training, and of bespoke systems development. The second is a lack of any guarantee of consistency across studies: even where two studies require what is essentially the same information, differences in specifications may mean that the data collected is incompatible.
For example, in the investigation of a patient with breast cancer, ‘histological type of tumour’ is a commonly collected piece of information. However, one study might record this against an enumeration such as
<table>
<thead>
<tr>
<th>in-situ ductal only</th>
<th>tubular/cribriform</th>
</tr>
</thead>
<tbody>
<tr>
<td>ductal grade unknown</td>
<td>mixed</td>
</tr>
</tbody>
</table>
whereas another might offer a choice of
| invasive ductal or no specific type | tubular | mucinous | invasive cribriform |
Even if we assume that the clinical staff have the same interpretation of these technical terms, the resulting data cannot be combined without additional effort and some loss of information.
We can reduce the cost of new studies by re-using data already collected: in earlier studies, or in the clinical information systems used to support patient care. We can reduce the cost of bespoke systems development by generating case report forms, queries, databases, and workflows from the detailed specifications in the protocol. We can increase consistency across studies, and facilitate re-use of the data, by coordinating the design of specifications.
To do this in practice, and at scale, requires effective, domain-specific modelling. We need models that describe study data: the detailed data specifications mentioned above. We need models that describe the relevant contents of clinical information systems. We need models that describe forms, queries, databases, and workflows used for data collection, transmission, and integration.
We need also a mechanism for relating the declarations of individual data items in different models. We need to be able to record the fact that two items, declared in different models, represent the same information: that any value assigned would have the same interpretation in both contexts.
This is precisely what is needed if we are to re-use data from different systems, or combine data from different studies.
In this paper, we describe the notion of a model catalogue: an application that stores and presents models, links data declarations, and supports the generation of artifacts such as case report forms and data schemas. We introduce the domain-specific language that describes the catalogue contents. We then report upon the experience of deploying the catalogue, and the domain-specific modelling language, in the development of national infrastructure for clinical research.
2. Data Models
2.1 Data Sets and Data Standards
A data set definition for clinical research study will consist in a number of different parts, each of which declares a set of related data items. Typically, this will be a set of data items that would be collected together: the results of a particular kind of observation, or the account of a particular kind of intervention. These parts may be ‘repeating’: the same kind of observation may be made many times of a single study participant.
A data item declaration should explain not only the name under which values are to be stored, but also the type of those values. If the type is numeric, then the unit of measurement should be given. If the type is an enumeration, then the intended interpretation of each value should be explained. Finally, the parts of the dataset may be connected or related to one another, and these relationships may have constrained multiplicities.
It should be clear that a dataset definition can be represented as a class diagram or object model. Data items can be introduced as attributes, parts of the model as classes, and relationships between classes as associations—complete with multiplicities. Data types and enumerations can be used to support attribute declarations.
A data set definition may apply to more than one study. It may also be used as a data standard for communication between information systems used in healthcare. The UK National Laboratory Medicines Catalogue, for example, provides a set of standard definitions for pathology reports, to facilitate safe, effective data transfer across different systems.
2.2 Studies, Forms, Schemas, and Databases
A data set definition will not contain enough information to completely characterise a study. The study protocol document will contain precise information about study timetables, workflows, and procedures, as well as a considerable amount of free text explanation. A domain-specific modelling language for studies would be more expressive than a domain-specific language of data set definitions.
Similarly, a domain-specific modelling language for case report forms will support the description of form structures, sections, and ‘skip logic’: for example, ‘if yes, then go to Question 5’. A modelling language for XML schemas will support the description of schema structures, choices, and complex types, and a modelling language for databases will include information about queries and constraints.
A model of a case report form will contain a number of data item declarations, each with the same information content as a data item definition in an abstract data model. We might be forgiven, then, for thinking that we might not need an abstract data modelling language: we could simply consider, relate, and re-use data definitions from models of ‘real’ artefacts: studies, forms, schemas, and databases.
However, the fact that a model corresponds to a particular artefact, or even a particular kind of artefact, provides additional context for the data definitions it contains. Whether or not there are additional, explicit constraints upon a data item, the fact that any data collected will have been entered into an implementation of the form tells us strictly more about it—narrowing the interpretation of the data definition.
2.3 Models as Metadata
A domain-specific model used in the generation or documentation of an artefact represents valuable metadata about that artefact, and also about any data that the artefact is used to collect or produce. A domain-specific data model—or a data component of any model, for that matter—can be used as metadata about other models. In this way, we can relate artefacts described by different models, and hence the data collected by different studies, forms, or schemas.
To use a model as metadata for an artefact, we have only to create a link between the artefact and a published instance of the model, held in a repository or model catalogue. This link may be created automatically if the artefact is generated from the model, or if the model is generated from the artefact: for example, we may generate a more abstract data model from the schema of a relational database.
To use a model as metadata for another model, we create links between the two models. Typically, these will be links between individual data items: for example, an attribute in a model of a form, labelled height, could be linked to an attribute in a model of a data set or data standard, labelled patient’s height in cm, measured without shoes, to indicate that form attribute has all of the properties described in the data set definition.
As we argued above, the form attribute will be further constrained by the remainder of the form model, so the relationship between the two is asymmetric. For this reason, we refer to such a link as an ‘implements’ relationship. If a pair of attributes are ‘implementations’ of each other, then we refer to the pair of links as a ‘same as’ relationship.
In general, we do not expect to find ‘same as’ relationships between attributes. Different models will add different constraints to the definition of a data item. Instead, consistency of data definitions between studies, forms, or schemas will be represented by ‘implements’ links to the same data
set or data standard. The data definitions are not identical, but are consistent as far as the constraints of the data standard are concerned.
3. Implementation
3.1 Generic Data Modelling Language
A model for a generic data modelling language is shown in Figure 1. All data classes, data elements, and data types are declared and managed within Models. A Class may contain many Elements, and may have other classes as components—corresponding to the UML concept of composition. An element has a unique Type, which may be reference-valued, a Primitive type, or an Enumeration. Reference types correspond to class names within this or some other model. EnumValue, enumeration values, are managed as separate, identified items.
A model may be declared as a new version of an existing model. Any of the items within a model may be declared as a refinement of an existing item. This indicates that its interpretation or semantics should be seen as an extension of those associated with that other item. Typically, this will correspond to the author of the model recording that a particular data class or data element is intended to conform to some existing, published standard.
3.2 Domain-specific Data Models
The current model catalogue implementation supports the creation, storage, and management of data models in the language of Figure 1. Domain-specific models of studies, forms, and schemas are generated from these models, using different sets of heuristics, but are not themselves managed as catalogue items. A more comprehensive approach would involve persisting, managing, and editing these models alongside the generic data models that they correspond to; the generation process could then work in both directions.
Figure 2 shows the relationship between domain-specific data models—in particular, models of form designs—and generic data models. It shows also the implementations derived from the form designs using a model-driven approach.
In the terminology of MDA [18], we may see the form model as a platform-independent entity (at the M1 metamodeling level) and the form implementation as a platform-specific entity (also at M1).
The data model language and the form model language are both entities at the M2 level. The catalogue would support both of these as instances of a data metamodeling language (or model metalanguage) at the M3 level. The relationship between the generic data model and the corresponding domain-specific model would be one of data refinement, in the sense of [19]: in the case of a form model, this would be a simple correspondence between classes, elements, and datatypes; for a workflow or process model, the ways in which data is exposed through transactions and events would need to be considered.
The advantage of this more comprehensive approach is that aspects of form design and implementation can be introduced and managed directly, through editing of Form Models, rather than being encoded as options in a generation pipeline. The corresponding generic data model may be abstracted automatically from the form model; alternatively, the form model may be partially (re-)generated from the generic data model, in the sense of [6].
An alternative approach is shown in Figure 3, in which the generic data model is used as a metamodel for domain-specific data modelling languages. Existing language and tool support for the use of models as metamodels in this context does not allow for the definition and maintenance of the ‘instance of’ relationships in the diagram, and the catalogue implementation under development follows the approach of Figure 2. However, this alternative approach would remove the need to maintain separate, generic data models, and it remains the subject of active investigation.
3.3 Catalogue Implementation
In designing the catalogue, we paid considerable attention to the ISO/IEC 11179 standard for metadata registration, which sets out a design for metadata catalogues. Some diffi-
culties have been encountered in the practical application of ISO/IEC 11179 at scale: see, for example [13] and [14].
The principal complaint is that there is no structuring mechanism for data definitions: data items can be associated only at the conceptual level. As a result, each item has to be defined separately: there is no opportunity to add the same information to several data item definitions at once, whether this is within the model, or as a link to another model.
The approach taken in our implementation of the model catalogue is more general: the use of tagging supports multiple classification schemes, and allows the representation of relationships as well as simple taxonomies. However, it should be clear that our catalogue could be used, under suitable constraints, as an effective implementation of the ISO/IEC 11179 standard.
This applies also to the processes of registration, versioning, and publication. Every object stored in the catalogue is managed as an administered item, in the language of the standard. The notion of linking in the catalogue implementation allows us to exploit this administrative information in the automatic creation and maintenance of semantic links, and the administrative processes are generalised to provide support for collaborative development.
The existing model catalogue is built using the Groovy/Grails framework, which takes a model-view-controller approach to data management and presentation. The key advantage of this platform has been the ability to revisit the underlying data representation—the domain model—without needing to re-implement the presentation layer, and vice versa. As the software was developed in the course of application, this was particularly important.
A ‘discourse’ plugin provides support for collaborative development of models and data definitions, with users able to contribute to a comment history for each administered item, prompting responses from other users as necessary. This proved particularly important given that many of the clinical scientists were contributing to the dataset development in their spare time.
3.4 Generation Pipelines
The existing implementation has been used to generate several different types of artefact, including:
**Case report form models** for consumption by the OpenClinica clinical trials management system. These take the form of Excel spreadsheets with columns specifying form structure, question text, response types, logical constraints (including skip logic), and presentation controls. These models are generated from form models in the data modelling language by way of a complex transformation: the hierarchical structure of the data model is flattened to produce lists of sections, repeating groups, and questions. Default values and implementations are included as part of the transformation: for example, we provide custom validation for textual fields that are tagged with constraints in the form of regular expressions.
**Database triggers** To support the automatic processing of data received from the clinical trials system, we require a collection of triggers for the underlying database. These ensure that the combination of existing and newly-received data is properly normalised. This is particularly important where data is being collected against different versions of the same form.
**XML schemas for electronic document submission** This is a more straightforward transformation, as the structure of the XML schemas is closer to that of the data models. However, additional processing is required to produce normalised, readable schemas. For example, if there are several data elements sharing the same datatype, we would wish to include that datatype only once within the schema.
**Tools for creating and validating .csv files** For some of the systems that we are working with, the easiest way to import or export information is in comma-separated value format. In this case, we are not generating a specification of the data format in some implementation language; we are instead generating tools that will ensure that the values presented in a file comply with the model constraints.
**Data manuals** Datasets and data standards in health informatics are communicated through documents in which each
data point is listed along with its intended interpretation. These manuals are automatically generated, ensuring consistency between the information that they present and the tools used for data acquisition and processing.
4. Experience
4.1 Re-use of Data from Clinical Information Systems
The UK National Institute of Health Research (NIHR) is funding an £11m programme of work across five large university-hospital partnerships: at Oxford, Cambridge, Imperial College London, University College London, and Guy’s and St. Thomas’. The aim of the programme is to create the infrastructure needed to support data re-use and translational research across these five institutions.
The programme, the NIHR Health Informatics Collaborative (HIC), was initiated in 2013, with a focus upon five therapeutic areas: acute coronary syndromes, renal transplantation, ovarian cancer, hepatitis, and intensive care. The scope was increased in 2015 to include other cancers—breast, colorectal, lung, and prostate—and other infectious diseases, including tuberculosis.
The key component of the infrastructure consists in repositories of patient data within each of the five institutions. The intention is that these repositories should hold a core set of data for each therapeutic area, populated automatically from clinical systems, together with detailed documentation on the provenance and interpretation of each data point.
Researchers can use the documentation to determine the availability and suitability of data for a particular study. They can use it also to determine comparability across institutions: whether there are any local differences in processes or equipment that would have a bearing upon the combination and re-use of the corresponding data. Once a study is approved, the repositories act as a single source of data, avoiding the need for data flows from individual clinical systems.
The development of the infrastructure required the development of a ‘candidate data set’ for each therapeutic area, as a core list of data points collected in the course of routine care that would have value also in translational research. Each institution then set out to determine which information systems, within their organisation, could be used to populate each of the candidate data sets: this was termed the ‘data exploration exercise’.
The results of the exercise informed further development of the data sets, and data flows were established. To demonstrate and evaluate the new capability, ‘exemplar research studies’ were initiated in each therapeutic area, using data from all five institutions.
Each institution had a different combination of existing systems, a different approach to data integration, and a different strategy for informatics development. It was not feasible or appropriate to develop a common ‘data repository’ product for installation. Instead, a set of data models were distributed, and each institution worked to implement these using their own messaging, business intelligence, or data warehousing technologies.
None of the institutions had the capability to provide documentation on the provenance and interpretation of their data in any standard, computable format; the model or metadata aspect of the infrastructure was entirely new. It was this that drove—and continues to drive—the development of a comprehensive model catalogue application.
At the start of the project, teams of clinical researchers and leading scientists were given the responsibility of creating the candidate data sets for each therapeutic area. They did this by exchanging spreadsheets of data definitions in email. This proved to be a slow process, and face to face meetings were needed before any real progress could be made.
It proved difficult to properly represent repeating sections of the dataset—corresponding to investigations or interventions that may happen more than once for the same patient. Researchers resorted to Visio diagrams to try to explain how observations fitted into clinical pathways or workflows—and discovered that there were significant differences between pathways for the same disease at different institutions.
In one therapeutic area, these differences had a profound effect upon the interpretation of certain observations, and the candidate dataset was extended to include additional information on the pathway. Due to the complexity of the pathways involved, this was a time-consuming and error-prone process. Furthermore, the spreadsheets quickly became inconsistent with the Visio diagrams.
The candidate datasets were distributed to the informatics teams at the five institutions in the form of XML schemas. At first, these were created from scratch, rather than being generated. There were many requests for changes to the schemas; these proved difficult to track and coordinate.
The exploration exercise was reported by adding columns to the distributed versions of the candidate dataset spreadsheets, listing the information systems containing the data points in question, or suggested alternatives where there were significant differences due to local systems and processes.
This was despite the availability of an initial version of the model catalogue. Researchers and local informatics teams preferred to work with spreadsheets, having little or no knowledge of modelling languages such as UML and no automatic support for model creation and maintenance. It fell to the software engineering team at the coordinating centre to record the datasets and variations in the catalogue.
While it was disappointing to have the researchers still working in spreadsheets, the ability to generate XML schemas from models, and to manage relationships between data items in different models and different versions, proved invaluable. In the second phase of the project, researchers are starting to abandon the spreadsheet mode of working, and are instead maintaining the datasets as data models, in the catalogue.
4.2 Coordination of Clinical Data Acquisition
The UK Department of Health, through the NIHR and the National Health Service (NHS), is providing funding for the whole genome sequencing of blood and tissue samples from patients with cancer, rare disorders, and infectious disease. A network of regional centres is being established to collect samples and data, and to provide access to genomic medicine across the whole of the country. The funding committed to date is approximately £300m.
The results of the whole genome sequencing will be linked to detailed information on each participant: clinical and laboratory information drawn from health records, ontological statements regarding abnormal features or conditions, and further information obtained from the participant or their representatives. The information required will depend upon the nature of the disease that the patient is suffering from. For example, information on breast density is required in the case of breast cancer, but not for other diseases.
131 different diseases have been included in the sequencing programme thus far. Each disease corresponds to a different combination of clinical and laboratory data points, a different set of ontological statements, and a different set of questions for the participant. There are, however, significant overlaps between diseases: for example, many different rare diseases will require the same information on kidney or heart function.
The modelling task is at least an order of magnitude greater than that required for the NIHR HIC, and yet candidate datasets have already been created for more than half of the diseases included. This is due partly to the availability of the model catalogue application from the start of the project, and partly to the availability, within the catalogue, of the full complement of HIC-defined data models and related data sets—including the national NHS data dictionary and the national cancer reporting datasets.
Two routes are available for the provision of data from the network of contributing centres: direct data entry into electronic case report forms, in an on-line clinical trials management system; and electronic submission of data in XML format. The intended interpretation of the data required is explained in a regularly-updated set of data manuals.
It is important that the forms used for direct data entry, the schemas used for XML submission, and the data manuals are properly synchronised. An initial approach to this, in which a single model was used as the basis for the generation of all three kinds of artefact, proved inconvenient in practice. Although the same data points were to be collected in each case, the distribution of these data points across classes and sections was different.
Accordingly, the model catalogue is used to store three different data models for each dataset: one for the generation of the forms, another for the generation of the XML schemas, and one for the generation of the data manual. These models are semantically-linked. If one is updated, then the fact that the others may now be inconsistent will be flagged to the user.
The same linkage is made with regard to existing reporting datasets and clinical audits. To avoid duplication of effort, the reporting datasets for the genomic medicine programme have been aligned with these activities. The existing datasets have been modelled, and updates to them will be tracked in the catalogue: again, potential inconsistencies can be flagged.
5. Related Work
The work described in this paper has evolved from the CancerGrid project [8], where an ISO/IEC 11179-compliant metadata registry was developed for curation of semantic metadata and model-driven generation of trial-specific software [5, 7]. The approach to generating forms in the CancerGrid project has been generalised significantly with the introduction of a data modelling language and a broader notion of semantic linking.
Another effort to develop an implementation of ISO/IEC 11179 is found in the US caBIG initiative [12]; however, their caCORE software development kit [11] applies model-driven development only to generate web service stubs, requiring developers to create application logic by hand, whereas our technique integrates with existing clinical Elec-
Electronic Data Capture tools and workflows, such as OpenClinica [4]. Several efforts have addressed ontological representations for enabling data integration across metadata registries (MDRs). Sinaci and Erturkmen [16] describe a federated semantic metadata registry framework where Common Data Elements (CDEs) are exposed as Linked Open Data resources. Jeong et al. [10] present the Clinical Data Element Ontology (CDEO) for unified indexing and retrieval of elements across MDRs: they organise and represent CDEO concepts using SKOS. Tao et al. [17] present case studies in representing HL7 Detailed Clinical Models (DCMs) and the ISO/IEC 11179 model in the Web Ontology Language (OWL), but do not present any systematic metamodelling or language definition framework.
Ontology repositories can be considered closely analogous to model catalogues, they provide the infrastructure for storing, interlinking, querying, versioning, and visualising ontologies. Relationships capturing the alignments and mappings between ontologies are also captured, allowing easy navigability. Linked Open Vocabularies [2] provides a service for discovering vocabularies and ontologies published following the principles of linked data.
In the Model Driven Health Tools (MDHT) [3] project, the HL7 Clinical Document Architecture (CDA) standard [9] for managing patient records is implemented using Eclipse UML tools [1]. In principle, this is similar to our Model Catalogue approach, where the CDA metadata can be represented and implementations derived. However, MDHT supports only the CDA standard, whereas the Model Catalogue can interoperate with any metadata standard. The CDA standards are large and complex: Scott and Worden [15] advocate a model-driven approach to simplify the HL7 CDA.
6. Conclusion
The experience of applying the data model language, the model catalogue, and the associated generation tools in the context of clinical research informatics has led to the following suggestions.
A data dictionary is not enough. A simple, flat list of data definitions does not support re-use at scale: it requires the user to place all of the contextual information into the definition of each data item, and mitigates against the automatic generation and application of definitions. Instead, a compositional approach is required, in which data elements are defined in explicit context.
A catalogue is not enough. The models in the catalogue must be linked to implementations, and to each other, with a considerable degree of automatic support. If the models are out of sync with the implementations, and with the data, then their value is sharply diminished. If you are going to manage data at scale, you need a data model-driven approach.
The tools must be usable by domain experts. To have the processes of model creation and maintenance mediated by software engineers is problematic: there may be misunderstandings regarding interpretation, but—more importantly—there are not enough software engineers to go around. An appropriate user interface, that closely matches the intuition and expectations of domain experts, is essential.
There will be more models than you think. Different models will be required for different types of implementation, and—in any research domain, at least—data models will be constantly evolving, with data being collected against different versions.
Intelligent, automatic support is essential. The information content of precise data models is considerable, and there may be complex dependencies between data concepts and constraints. A considerable degree of automation is required if users are to cope with this complexity.
The model catalogue and the associated toolset should, as far as possible, automatically: create or propose links, including classifications; manage model versioning, and the consequences for linked data concepts; manage dependencies, including those between different models for same dataset, targeted at different implementation platforms.
This should come as no surprise. If, as Warmer and Kleppe [18] suggest, the model-driven approach is about “using modelling languages as programming languages rather than merely as design languages” then we should aim to provide modellers with the same kind of support that programmers have come to expect from modern integrated development environments.
Acknowledgments
We would like to acknowledge the support of the National Institute for Health Research (NIHR), and in particular that of the NIHR Health Informatics Collaborative, and the financial support of the Medical Research Council. We would like to acknowledge also the support of the European Union Horizon 2020 Project ALIGNED, Project Number 644055. We are very grateful for the contributions made by colleagues at the University of Oxford: in particular, Steve Harris and Charles Crichton.
References
|
{"Source-Url": "http://users.ox.ac.uk/~coml0597/papers/splashws15dsmmain-dsmmainid15-p-268a515-25922-final.pdf", "len_cl100k_base": 6280, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 25110, "total-output-tokens": 7750, "length": "2e12", "weborganizer": {"__label__adult": 0.001056671142578125, "__label__art_design": 0.000751495361328125, "__label__crime_law": 0.0011034011840820312, "__label__education_jobs": 0.0078582763671875, "__label__entertainment": 0.00013005733489990234, "__label__fashion_beauty": 0.0006098747253417969, "__label__finance_business": 0.0009765625, "__label__food_dining": 0.0013265609741210938, "__label__games": 0.0008969306945800781, "__label__hardware": 0.002048492431640625, "__label__health": 0.08905029296875, "__label__history": 0.0009031295776367188, "__label__home_hobbies": 0.0003478527069091797, "__label__industrial": 0.001026153564453125, "__label__literature": 0.000942230224609375, "__label__politics": 0.0005588531494140625, "__label__religion": 0.0011692047119140625, "__label__science_tech": 0.36181640625, "__label__social_life": 0.0003681182861328125, "__label__software": 0.02520751953125, "__label__software_dev": 0.49853515625, "__label__sports_fitness": 0.0012664794921875, "__label__transportation": 0.0013017654418945312, "__label__travel": 0.0005888938903808594}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 36457, 0.01865]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 36457, 0.42331]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 36457, 0.91095]], "google_gemma-3-12b-it_contains_pii": [[0, 4053, false], [4053, 9938, null], [9938, 13907, null], [13907, 18179, null], [18179, 22725, null], [22725, 28439, null], [28439, 33871, null], [33871, 36457, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4053, true], [4053, 9938, null], [9938, 13907, null], [13907, 18179, null], [18179, 22725, null], [22725, 28439, null], [28439, 33871, null], [33871, 36457, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 36457, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 36457, null]], "pdf_page_numbers": [[0, 4053, 1], [4053, 9938, 2], [9938, 13907, 3], [13907, 18179, 4], [18179, 22725, 5], [22725, 28439, 6], [28439, 33871, 7], [33871, 36457, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 36457, 0.0303]]}
|
olmocr_science_pdfs
|
2024-11-28
|
2024-11-28
|
c5bcb34fcb77502539d8547c36bad6251ff5bb5f
|
Model Checking the Time to Reach Agreement*
MARTIJN HENDRIKS
Institute for Computing and Information Sciences,
Radboud University Nijmegen, The Netherlands
M.Hendriks@cs.ru.nl
Abstract
The timed automaton framework of Alur and Dill is a natural choice for the specification of partially synchronous distributed systems. The past has shown, however, that verification of these systems by model checking usually is very difficult. Therefore, model checking techniques have thus far not really been used for their design, even though these techniques are widely used in other areas, e.g., hardware verification. The present paper demonstrates that the revolutionary development of both the usability and the efficiency of model checking tools may change this. It is shown that a complex partially synchronous distributed algorithm can easily be modeled with the UPPAAL model checker, and that it is possible to analyze some interesting and non-trivial instances with reasonable computational resources. Clearly, such analysis results can greatly support the design of these systems: model checking tools may provide valuable early feedback on subtle design errors and hint at system invariants that can subsequently be used in the general correctness proof.
Keywords: Distributed systems, agreement algorithm, partially synchronous model, model checking, timed automata.
1 Introduction
Distributed systems are in general hard to understand and to reason about due to their complexity and inherent non-determinism. That is why formal models play an important role in the design of these systems: one can specify the system and its properties in an unambiguous and precise way, and it enables a formal correctness proof. The I/O-automata of Lynch and Tuttle provide a general formal modeling framework for distributed systems [21, 20, 19]. Although the models and proofs in this framework can be very general (e.g., parameterized by the number of processes or the network topology), the proofs require – as usual – a lot of human effort.
Model checking provides a more automated, albeit less general way of proving the correctness of systems [13]. The approach requires the construction of a
model of the system and the specification of its correctness properties. A model checker then automatically computes whether the model satisfies the properties or not. The power of model checkers is that they are relatively easy to use compared to manual verification techniques or theorem provers, but they also have some clear drawbacks. In general only instances of the system can be verified (i.e., the algorithm can be verified for 5 processes, but not for n processes). Furthermore, model checking suffers from the state space explosion problem: the number of states grows exponentially in the number of system components. This often renders the verification of realistic systems impossible.
Model checkers still can be useful for the design of distributed systems. Consider the following approach. First, one specifies the system in the language of the model checker. This can reveal inconsistencies and incompletenesses. Second, the model can be simulated using the model checker. This also may reveal design errors in an early stage of the design phase. When one is satisfied with the model, then, as a third step, one can try to verify some interesting properties for small instances of the system. Finally, if one has enough faith in the correctness of the system, then one can start with the construction of a general proof (either by hand or with a theorem prover), which in general is a very time-consuming task. The intuitions that one has gotten during the work with the model checker (and the invariants that were possibly obtained for some instances in the third step) can, however, make the construction of the proof less cumbersome. The work of [18] and [22] demonstrates the feasibility and effectiveness of the first three steps of this approach. In both papers, the SPIN model checker is used to give feedback on IEEE standards that at that time were still under development.
A class of distributed systems for which model checking has yielded no apparent successes is the subclass of partially synchronous systems in which (i) message delay is bounded by some constant, and (ii) many messages can be in transit simultaneously. In the partially synchronous model, system components have some information about timing, although the information might not be exact. It lies between the extremes of the synchronous model (the processes take steps simultaneously) on one end and the asynchronous model (the processes take steps in an arbitrary order and at arbitrary relative speeds) on the other end [19]. The timed automata framework of Alur and Dill [2] is a natural choice for the specification of such systems (as is the Timed I/O-automaton framework [17], which, however, does not support model checking). Verification of these systems by model checking is often very difficult since every message needs its own clock to model the bounds on message delivery time. This is disastrous since the state space of a timed automaton grows exponentially in the number of clocks. Moreover, if messages may get lost or message delivery is unordered, then on top of that also the discrete part of the model explodes rapidly.
Many realistic algorithms and protocols fall into the class of “difficult” partially synchronous systems. Examples include the sliding window protocol for the reliable transmission of data over unreliable channels [23][11], a protocol to monitor the presence of network nodes [9][16], and the ZeroConf protocol whose purpose is to dynamically configure IPv4 link-local addresses [10][24]. Furthermore, the agreement algorithm described in [3] (see also Chapter 25 of [19]) also is a partially
synchronous system that is difficult from the perspective of model checking. The analysis of this algorithm with the UPPAAL model checker is the subject of the present paper. It is shown that some non-trivial instances can be formally verified (which has not been done before to the author’s knowledge). Our results provide evidence that the class of partially synchronous distributed systems, which is an important class since many realistic algorithms and protocols fall into it, is within reach of the current state-of-the-art model checking tools.
The remainder of this paper is structured as follows. The timed automaton framework and the UPPAAL model checker are briefly introduced in Section 2. Section 3 then presents an informal description of the distributed algorithm of [3], which consists of two parts: a timeout task and a main task. Section 4 describes the UPPAAL model that is used to verify the timeout task. A model for the parallel composition of the timeout task and the main task is proposed in Section 5. Two properties of the timeout task that have been verified in Section 4 are used to reduce the complexity of this latter model. Finally, Section 6 discusses the present work. The UPPAAL models from this paper are available at http://www.cs.ru.nl/ita/publications/papers/martijnh/.
2 Timed Automata
This section provides a very brief overview of timed automata and their core semantics, and of the UPPAAL tool, which is a model checker for timed automata. The reader is referred to [6] and [8] for more details.
Timed automata are finite automata that are extended with real valued clock variables [2]. Let $X$ be a set of clock variables, then the set $\Phi(X)$ of clock constraints $\phi$ is defined by the grammar
$\phi ::= x \sim c | \phi_1 \land \phi_2$, where $x \in X$, $c \in \mathbb{N}$, and $\sim \in \{<, \leq, =, \geq, >\}$. A clock interpretation $\nu$ for a set $X$ is a mapping from $X$ to $\mathbb{R}^+$, where $\mathbb{R}^+$ denotes the set of positive real numbers including zero. A clock interpretation $\nu$ for $X$ satisfies a clock constraint $\phi$ over $X$, denoted by $\nu \models \phi$, if and only if $\phi$ evaluates to true with the values for the clocks given by $\nu$. For $\delta \in \mathbb{R}^+$, $\nu + \delta$ denotes the clock interpretation which maps every clock $x$ to the value $\nu(x) + \delta$. For a set $Y \subseteq X$, $\nu[Y := 0]$ denotes the clock interpretation for $X$ which assigns 0 to each $x \in Y$ and agrees with $\nu$ over the rest of the clocks. We let $\Gamma(X)$ denote the set of all clock interpretations for $X$.
A timed automaton then is defined by a tuple $(L, l^0, \Sigma, X, I, E)$, where $L$ is a finite set of locations, $l^0 \in L$ is the initial location, $\Sigma$ is a finite set of labels, $X$ is a finite set of clocks, $I$ is a mapping that labels each location $l \in L$ with some clock constraint in $\Phi(X)$ (the location invariant) and $E \subseteq L \times \Sigma \times \Phi(X) \times 2^X \times L$ is a set of edges. An edge $(l, a, \phi, \lambda, l')$ represents a transition from location $l$ to location $l'$ on the symbol $a$. The clock constraint $\phi$ specifies when the edge is enabled and the set $\lambda \subseteq X$ gives the clocks to be reset with this edge. The semantics of a timed automaton $(L, l^0, \Sigma, X, I, E)$ is defined by associating a transition system with it. A state is a pair $(l, \nu)$, where $l \in L$, and $\nu \in \Gamma(X)$ such that $\nu \models I(l)$. The initial state is $(l^0, \nu^0)$, where $\nu^0(x) = 0$ for all $x \in X$. There are two types of transitions (let $\delta \in \mathbb{R}^+$ and let $a \in \Sigma$). First, $((l, \nu), (l, \nu + \delta))$ is a $\delta$-delay transition iff
\[ \nu + \delta' \models I(l) \text{ for all } 0 \leq \delta' \leq \delta. \] Second, \(((l, \nu), (l', \nu'))\) is an \textit{a-action transition} iff an edge \((l, a, \phi, \lambda, l')\) exists such that \(\nu \models \phi, \nu' = \nu[\lambda := 0]\) and \(\nu' \models I(l')\). Note that location invariants can be used to specify progress, and that they can cause time deadlocks.
The transition system of a timed automaton is infinite due to the real valued clocks. The region and zone constructions, however, are finite abstractions that preserve Timed Computation Tree Logic (TCTL) formulas and a subset of TCTL formulas (most notably reachability) respectively [1, 14]. This enables the application of finite state model checking techniques as implemented by UPPAAL for instance.
The UPPAAL modeling language extends the basic timed automata as defined above with bounded integer variables and binary blocking (CCS style) synchronization. Systems are modeled as a set of communicating timed automata. The UPPAAL tool supports simulation of the model and the verification of reachability and invariant properties. The question whether a state satisfying \(\phi\) is reachable can be formalized as \(\text{EF}(\phi)\). The question whether \(\phi\) holds for all reachable states is formalized as \(\text{AG}(\phi)\). If such a property is not satisfied, then UPPAAL can give a run that proves this. This run can be replayed in the simulator, which is very useful for debugging purposes.
### 3 Description of the Algorithm
This section presents an informal description of an algorithm that solves the problem of \textit{fault-tolerant distributed agreement} in a partially synchronous setting [3] (see also Chapter 25 of [19]). A system of \(n\) processes, denoted by \(p_1, ..., p_n\), is considered, where each process is given an input value and at most \(f\) processes may fail. Each process that does not fail must eventually (termination) choose a decision value such that no two processes decide differently (agreement), and if any process decides for \(v\), then this has been the input value of some process (validity). The process’s computation steps are atomic and take no time, and two consecutive computation steps of a non-faulty process are separated \(c_1\) to \(c_2\) time units. The processes can communicate by sending messages to each other. The message delay is bounded by \(d\) time units, and message delivery is unordered. Furthermore, messages cannot get lost nor duplicated. The constant \(D\) is defined as \(d + c_2\). As mentioned above, \(f\) out of the \(n\) processes may fail. A failure may occur at any time, and if a process fails at some point, then an arbitrary subset of the messages that would have been sent in the next computation step, is sent. No further messages are sent by a failed process. It is convenient to regard the algorithm, which is run by every process, as the merge of a \textit{timeout task} and a \textit{main task}, such that a process’s computation step consists of a step of the timeout task followed by a step of the main task.
The goal of the of the timeout task is to maintain the running state of all other processes. This is achieved by broadcasting an \((\text{alive}, i)\) message every computation step. If process \(p_i\) has run for sufficiently many computation steps without receiving an \((\text{alive}, j)\) message (from process \(p_j\)), then it assumes that \(p_j\) halted ei-
ther by decision or by failure. Figure 1 contains the description of a computation step of the timeout task of process $p_i$ in precondition-effect style. The boolean variable $blocked$ is used by the main task to stop the timeout task. Initially, this boolean is $false$. It is set to $true$ if the process decides. The other state components are a set $halted \subseteq \{1, ..., n\}$, initially $\emptyset$, and for every $j \in \{1, ..., n\}$ a counter $counter(j)$, initially set to $-1$. Additionally, every process has a message buffer $buff$ (a set), initially $\emptyset$.
**Precondition:**
$\neg blocked$
**Effect:**
```plaintext
broadcast((alive,i))
for j := 1 to n do
counter(j) := counter(j) + 1
if (alive,j) \in buff then
remove (alive,j) from buff
counter(j) := 0
else if counter(j) \geq \lfloor \frac{D}{c_1} \rfloor + 1 then
add j to halted
od
```
Figure 1: The timeout task for process $p_i$.
Two properties of the timeout task have been proven in [3].
**A1** If any $p_i$ adds $j$ to $halted$ at time $t$, then $p_j$ halts, and every message sent from $p_j$ to $p_i$ is delivered strictly before time $t$.
**A2** If $p_j$ halts at time $t$, then every $p_i$ either halts or adds $j$ to $halted$ by time $t + T$, where $T = D + c_2 \cdot (\lfloor \frac{D}{c_1} \rfloor + 1)$.
Figure 2 contains the description of a computation step of the main task of process $p_i$ in precondition-effect style. Apart from the input value $v_i$ and the state components used by the timeout task, there is one additional state component, namely the rounds counter $r$, initially zero. The input values are assumed to be either zero or one for simplicity.
Three main results that are obtained in [3] are the following.
**M1** (Agreement, Lemma 5.9 of [3]). No two processes decide on different values.
**M2** (Validity, Lemma 5.10 of [3]). If process $p_i$ decides on $n$, then $n = v_j$ for some process $j$.
**M3** (Termination, Theorem 5.1 of [3]). The upper bound on the time to reach agreement equals $(2f - 1)D + max \{T, 3D\}$.
1 The message complexity of this algorithm is quite high. Recently, an alternative with an adjustable “probing load” for each node has been proposed in [9] and analyzed in [16].
2 An extension to an arbitrary input domain is discussed in [3].
Precondition:
\( r = 0 \land v_i = 1 \)
Effect:
\( \text{broadcast}((0, i)) \)
\( r := 1 \)
Precondition:
\( r = 0 \land v_i = 0 \)
Effect:
\( \text{broadcast}((1, i)) \)
\( \text{decide}(0) \)
Precondition:
\( r \geq 1 \land \exists j (r, j) \in \text{buff} \)
Effect:
\( \text{broadcast}((r, i)) \)
\( r := r + 1 \)
Precondition:
\( r \geq 1 \land \forall j \in \text{halted} \) \( (r - 1, j) \in \text{buff} \land \neg \exists j (r, j) \in \text{buff} \)
Effect:
\( \text{broadcast}((r + 1, i)) \)
\( \text{decide}(r \mod 2) \)
Figure 2: The main task for process \( p_i \).
4 Verification of the Timeout Task
4.1 Modeling the Timeout Task
Note that every process runs the same algorithm, and that the timeout parts of different processes do not interfere with each other. Therefore, only two processes are considered, say \( p_i \) and \( p_j \). By the same argument, only one direction of the timeout task is considered: \( p_i \) (Observer) keeps track of the running state of \( p_j \) (Process). As required by the algorithm, Process broadcasts an alive message at each computation step. This action is modeled by a \( b \)-synchronization, which activates an instance of the broadcast template, shown in Figure 3. This template is parameterized with a constant \( id \) in order to give each instance a unique identifier. Clearly, the UPPAAL model must ensure output enabledness of Process: it must be able to broadcast the alive message when it wants to. Since the maximal number of simultaneous broadcasts equals \( \left\lfloor \frac{d}{c_1} \right\rfloor + 2 \), this many instances of the broadcast template must be present in the model. The guard \( \text{turn}() \) and the assignments to \( \text{active}[id] \) implement a trick to reduce the reachable state space by exploiting the symmetry among the broadcast instances. After a \( b \)-synchronization, a broadcast automaton may spend at most \( d \) time units in location sending, which is modeled using the local clock \( x \). The actual message delivery is modeled by the assignment alive=true on the transition back to idle. The reset of the global clock \( t \) is used for the verification of property \( A_1 \).
Figure 3: The broadcast template.
Figure 4: The Process automaton.
\[^3\] The next release of UPPAAL will hopefully support symmetry reduction, which can automatically exploit the symmetry among broadcast automata. \[13\].
Figure 4 shows the UPPAAL automaton of the merge of the timeout task and abstract main task of Process (the only functionality of the main task is to halt). It has one local clock $x$ to keep track of the time between two consecutive computation steps. The Process automaton must spend exactly $c_2$ time units in the initial location $init$ before it takes the transition to location $comp$ (the reason for this is explained below). It then immediately either fails or does a computation step. Failure of Process is modeled by the pair of edges to halted, which models the non-deterministic choice of the subset of messages to send. The computation step is modeled by the self-loop and by the upper transition to halted (a decision transition that blocks the timeout task). Note that $x$ is reset on every edge to halted for verification purposes.
```c
void update ()
{
cnt++;
if (alive)
{
alive = false;
cnt = 0;
}
has_halted = cnt&=(D/c1)+1;
}
```
Figure 5 shows the Observer automaton, which is the composition of an abstract main task (whose only purpose again is to halt) and the “receiving part” of the timeout task. It has a local integer variable $cnt$, initialized to $-1$, and a local clock $x$. Furthermore, the boolean $has\_halted$ models whether $Process \in halted\_Observer$.
The Observer automaton must first spend $c_2$ time units in the initial location before taking the edge to location $comp$. Then, it must immediately either do a computation step or fail. The computation step is modeled by the self-loop and by the upper transition to halted. The assignment $update()$ updates the variables $cnt$, $has\_halted$ and $alive$ as specified in Figure 6. Failure is modeled by the lower edge to halted.
Both the Observer automaton and the Process automaton must first spend $c_2$ time units in their initial location. This is a modeling trick to fulfill the requirement from [3] that “every process has a computation or failure event at time 0”. I.e., our model starts at time $-c_2$. (If UPPAAL would allow the initialization of a clock to any natural number, then both initial locations can be removed.)
---
A straightforward model contains a third edge to halted with the guard $x \geq c_1$, the synchronization $!b$, and the reset $x = 0$. Such an edge is, however, “covered” by the present upper edge to halted and can therefore be left out.
4.2 Verifying the Timeout Task
Property $A_1$ is translated to the following invariant property of the UPPAAL model (a broadcast automaton with identifier $i$ is denoted by $b_i$):
\[
AG \left( \text{has\_halted} \rightarrow (\text{Process\_halted} \land \forall_i b_i.idle \land t > 0) \right)
\] (1)
The state property $\forall_i b_i.idle \land t > 0$ ensures that all messages from Process to Observer are delivered strictly before the conclusion of Observer that Process halted. Property $A_2$ is translated as follows:
\[
AG \left( (\text{Process\_halted} \land \text{Process\_x} > T) \rightarrow (\text{Observer\_halted} \lor \text{has\_halted}) \right)
\] (2)
The branching time nature of $A_2$ is specified by this invariance property due to the structure of our model: $\text{Process\_x}$ measures the time that has been elapsed since $\text{Process}$ arrived in the location halted.
Properties (1) and (2) have been verified for the following parameter values:
- $c_1 = 1$, $c_2 = 1$ and $d \in \{0, 1, 2, 3, 5, 10\}$.
- $c_1 = 1$, $c_2 = 2$ and $d \in \{0, 1, 2, 3, 4, 5, 6\}$, and
- $c_1 = 9$, $c_2 = 10$ and $d \in \{5, 9−11, 15, 20, 50\}$.
5 Verification of the Algorithm
The UPPAAL model of the parallel composition of the main task and the timeout task, which is used to verify properties $M_1$–$M_3$, is presented in this section. It is assumed that every process receives an input by time zero (synchronous start), since otherwise the state space becomes too big to handle interesting instances. If the timeout task is modeled explicitly, then many alive messages must be sent every computation step, which results in an overly complex model. Using properties $A_1$ and $A_2$, however, the explicit sending of alive messages can be abstracted away.
5.1 Modeling the Algorithm
Figure 7 shows the UPPAAL template of the behavior of the algorithm. This template is parameterized with two constants, namely its unique identifier $id$, and a boolean $may\_Fail$ which indicates whether this process may fail.\(^5\)
Similar to the model of the timeout task, a process first waits $c_2$ time units in its initial location. Then, it non-deterministically chooses an input value on an edge to $wait$. The global clock $t$ is used to measure the running time of the
\(^5\)Again, this is a trick that exploits the symmetry of processes to reduce the reachable state space.
algorithm. Then it either starts a computation step or fails. A computation step first activates the timeout automaton of the process, which is described below, on the edge to \texttt{timeout}. When the timeout automaton finishes (it may have updated the \textit{halted set}), the transition to \texttt{main} is taken. Then there are five possibilities: one of the one of the four preconditions of the main task transitions is satisfied (note that they are all mutually exclusive), or none of them is satisfied. In the first case, the specified actions are taken, and in the second case nothing is done. The committed locations (those with a “C” inside) specify that a computation step is atomic and that it takes no time (if a committed location is active, then no delay is allowed and the next action transition must involve a committed component). Note that broadcasting the message \((m, i)\) is achieved by assigning \(m\) to \(\text{bv}[id]\) on an edge with a \(\text{b}[id]/\text{sync}\). Figure 8 shows the functions that implement the preconditions of the four transitions of the main task (see also Figure 2).
Figure 7: The process template.
```c
bool pre1 ()
{
return r==0 && v[id]==1;
}
bool pre2 ()
{
return r==0 && v[id]==0;
}
bool pre3 ()
{
int j;
if (r<=0)
return false;
for (j=0; j<N; j++)
if (buff[id][r][j])
return true;
return false;
}
bool pre4 ()
{
int j;
if (r<=0 || pre3())
return false;
for (j=0; j<N; j++)
if (!halted[id][j] && !buff[id][r-1][j])
return false;
return true;
}
```
Figure 8: The preconditions for the four transitions of the main task.
A failure is modeled by the edge from `wait` to `update`. This edge is only enabled if less than \( f \) failures already have occurred. The \( \text{failValue()} \) function computes the value that would have been broadcast during the next computation step.
In location `update` the process has halted either by decision or by failure. It can stay there for a maximum of \( T \) time units and it provides a `stop[id]`-synchronization. This is used for the abstraction of the timeout task, which is explained below. When all other processes have been informed that this process has halted (`allInformed()` returns `true`), then the transition to location `finished` is enabled.
Similar to the model of the timeout task, the broadcasts are modeled by instances of the broadcast template which is shown in Figure 9.

The template is parameterized with two constants, namely `id`, the identifier of the process automaton this broadcast automaton belongs to, and `bid`, an identifier that is unique among the other broadcast automata of process automaton `id`. Note that this template is tailored to a model with \( n = 3 \) (there are three self-loops from location `sending`) for reasons of efficiency.
The broadcast automaton is started with a `b[id]`-synchronization. If the value of `bv[id]` is smaller than zero, then nothing is done (this is convenient for modeling in the process template). If the value is larger than or equal to zero and \( \text{turn()} \) returns `true`\(^6\), then this broadcast automaton can start delivering the message that has been passed to it in `bv[id]`. The `shouldDeliver()` and `allDelivered()` functions ensure that it delivers all messages on time, but only if necessary. I.e., it is not useful to deliver a message to a process that already has halted, since that message is never used; it only increases the reachable state space.
Each process automaton has a separate timeout automaton that has two functions. First, it is activated at the beginning of each computation step of the process it belongs to in order to update the `halted` set of the process. Second, it serves as a test automaton to ensure that the process it belongs to is output enabled\(^7\). The timeout template is shown in Figure 10. It has one parameter, namely the constant `id`, which refers to the process it belongs to.
\(^6\)Similarly as in the model of the timeout task in the previous section, the guard \( \text{turn()} \) exploits the symmetry between the broadcast automata of a single process to reduce reachable the state space.
\(^7\)In this model, the number of necessary broadcast automata is no longer easily to determine. Therefore, an explicit check is useful.
The timeout template is tailored to \( n = 3 \) for reasons of efficiency. When it is activated, it checks for each process \( j \) whether it may add it to the halted set, and if so, it non-deterministically chooses whether to add it or not. Here properties \( A_1 \) and \( A_2 \) of the timeout task come in. The function \( \text{mayAdd()} \) checks for a given process \( j \) whether all messages from \( j \) to this process have been delivered. If not, then it may not add \( j \) to halted (property \( A_1 \)). Furthermore, the synchronization over the channel \( \text{stop}[j] \) must be enabled. In Figure 7 can be seen that this is only the case for the \( T \) time units after \( j \) has halted (property \( A_2 \)). But if this process has not added \( j \) to halted by that time, then \( j \) cannot proceed to location finished (in that case allInformed() returns false), with a time deadlock as result. This is exactly the case when \( T - p_i.x < c_1 - p_j.x \) for processes \( i \) and \( j \).
The second function of the timeout template is implemented by the edge to the error location. This location is reachable if the process wants to broadcast and all its broadcast automata are active already. In a correct model, the error location therefore is not reachable.
### 5.2 Verifying the Algorithm
Properties \( M_1-M_3 \) are translated as follows (where \( U \) is the upper bound on the running time of the protocol as specified before).
\[
\begin{align*}
\text{AG} \left( \forall_{i,j} \text{dec}_i \geq 0 \land \text{dec}_j \geq 0 \rightarrow \text{dec}_i = \text{dec}_j \right) \quad (3) \\
\text{AG} \left( \forall_i \text{dec}_i \geq 0 \rightarrow \exists_j \text{dec}_i = v_j \right) \quad (4) \\
\text{AG} \left( \exists_i p_i.\text{wait} \rightarrow t \leq U \right) \quad (5)
\end{align*}
\]
The following properties are health checks to ensure that (i) the processes are output enabled, and (ii) the only deadlocks in the model are those that are expected.
\[
\begin{align*}
\text{AG} \left( \neg \exists_i T_i.\text{error} \right) \quad (6) \\
\text{AG} \left( \text{deadlock} \rightarrow \left( \forall_i p_i.\text{finished} \lor \exists_{i,j} p_j.x - p_i.x > T - c_1 \right) \right) \quad (7)
\end{align*}
\]
The properties (3)–(6) have been verified (using the convex-hull approximation of UPPAAL with a breadth-first search order) for the following parameter values:
- \( n = 3, f \in \{0, 1\}, c_1 = 1, c_2 = 1, \) and \( d \in \{0, 1, 2, 3, 5, 10\} \),
- \( n = 3, f \in \{0, 1\}, c_1 = 1, c_2 = 2, \) and \( d \in \{0, 1, 2, 3, 5, 10\} \), and
- \( n = 3, f \in \{0, 1\}, c_1 = 9, c_2 = 10, \) and \( d \in \{5, 9, 10, 11, 15, 20, 50\} \).
The verification of any instance needs at most 1.5 GB of memory and at most 30 minutes of time on a regular desktop computer. Property (7) has been verified for a subset of the above parameter values, namely for the models with the three smallest values for \( d \) in each item. This property is more difficult to model check since it involves the deadlock state property, which disables UPPAAL’s LU-abstraction algorithm [5] (a less efficient one is used instead), and which is computationally quite complex due to the symbolic representation of states.
6 Conclusions
Despite the fact that model checkers are in general quite easy to use (in the sense that their learning curve is not so steep as for instance the one of theorem provers), making a good model still is difficult. The algorithm that has been analyzed in this paper, for instance, can quit easily be modeled “literally”. The message complexity then is huge due to the many broadcasts of alive messages, with the result that model checking interesting instances becomes impossible. This has been solved by a non-trivial abstraction of the timeout task. Ideally of course, model checkers can even handle such “naive” models. Fortunately, much research still is aimed at improving these tools. For instance, the UPPAAL model checker is getting more and more mature, both w.r.t. usability as efficiency. An example of the former is the recent addition of a C-like language. This makes the modeling of the agreement protocol much easier, and makes the model more efficient. A loop over an array, as for instance used in the \( \text{pre3}() \) and \( \text{pre4}() \) functions shown in Figure 8, can now be encoded with a C-like function instead of using a cycle of committed locations and/or an auxiliary variable. This saves the allocation and deallocation of intermediate states and possibly a state variable. Other examples of efficiency improvements of UPPAAL are enhancements like symmetry reduction [15] and the sweep line method [12], which are planned to be added to UPPAAL soon. Especially symmetry reduction would greatly benefit distributed systems, which often exhibit full symmetry. Furthermore, current research also focuses on distributing UPPAAL, which may even give a super-linear speed-up [7, 4].
Summarizing, model checking tools become more and more applicable to the design of distributed systems due to the steady increase of their usability and efficiency. They may provide valuable early feedback on subtle design errors and hint at system invariants that can subsequently be used in the general correctness proof that is either constructed by hand or by use of a theorem prover. The present paper has demonstrated that model checking now even is feasible for some small yet interesting instances of an agreement algorithm which thus far was considered
out of reach for model checking technology. This result shows that the class of partially synchronous systems is within reach of the current state-of-the-art model checking tools.
Acknowledgements. The author thanks Frits Vaandrager and Jozef Hooman for valuable discussions and comments on earlier versions of the present paper.
References
|
{"Source-Url": "http://repository.ubn.ru.nl/bitstream/handle/2066/32745/32745.pdf?sequence=1", "len_cl100k_base": 8159, "olmocr-version": "0.1.53", "pdf-total-pages": 15, "total-fallback-pages": 0, "total-input-tokens": 44324, "total-output-tokens": 10628, "length": "2e12", "weborganizer": {"__label__adult": 0.0004835128784179687, "__label__art_design": 0.0004935264587402344, "__label__crime_law": 0.000606536865234375, "__label__education_jobs": 0.0009326934814453124, "__label__entertainment": 0.0001341104507446289, "__label__fashion_beauty": 0.0002397298812866211, "__label__finance_business": 0.00039124488830566406, "__label__food_dining": 0.0005550384521484375, "__label__games": 0.00104522705078125, "__label__hardware": 0.002483367919921875, "__label__health": 0.0012369155883789062, "__label__history": 0.0005664825439453125, "__label__home_hobbies": 0.0001703500747680664, "__label__industrial": 0.0009393692016601562, "__label__literature": 0.0004436969757080078, "__label__politics": 0.000514984130859375, "__label__religion": 0.0008134841918945312, "__label__science_tech": 0.328857421875, "__label__social_life": 0.00012671947479248047, "__label__software": 0.0080108642578125, "__label__software_dev": 0.64892578125, "__label__sports_fitness": 0.0005183219909667969, "__label__transportation": 0.001506805419921875, "__label__travel": 0.0003209114074707031}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 37087, 0.03008]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 37087, 0.51005]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 37087, 0.85824]], "google_gemma-3-12b-it_contains_pii": [[0, 0, null], [0, 2333, false], [2333, 5967, null], [5967, 9717, null], [9717, 13184, null], [13184, 15497, null], [15497, 17964, null], [17964, 20374, null], [20374, 22767, null], [22767, 24447, null], [24447, 27191, null], [27191, 29449, null], [29449, 32730, null], [32730, 35398, null], [35398, 37087, null]], "google_gemma-3-12b-it_is_public_document": [[0, 0, null], [0, 2333, true], [2333, 5967, null], [5967, 9717, null], [9717, 13184, null], [13184, 15497, null], [15497, 17964, null], [17964, 20374, null], [20374, 22767, null], [22767, 24447, null], [24447, 27191, null], [27191, 29449, null], [29449, 32730, null], [32730, 35398, null], [35398, 37087, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 37087, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 37087, null]], "pdf_page_numbers": [[0, 0, 1], [0, 2333, 2], [2333, 5967, 3], [5967, 9717, 4], [9717, 13184, 5], [13184, 15497, 6], [15497, 17964, 7], [17964, 20374, 8], [20374, 22767, 9], [22767, 24447, 10], [24447, 27191, 11], [27191, 29449, 12], [29449, 32730, 13], [32730, 35398, 14], [35398, 37087, 15]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 37087, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
652aff237262357c876fd19a046633a1c42befb1
|
Lecture 10: Program Development versus Execution Environment
CSE 30: Computer Organization and Systems Programming
Winter 2010
Rajesh Gupta / Ryan Kastner
Dept. of Computer Science and Engineering
University of California, San Diego
C Memory Management
- C has 3 pools of memory
- **Static storage**: global variable storage, basically permanent, entire program run
- **The Stack**: local variable storage, parameters, return address
("stack frame" in C)
- **The Heap** (dynamic storage): data lives until deallocated by programmer
- C requires knowing where objects are in memory, otherwise don't work as expected
Normal C Memory Management
- A program’s *address space* contains 4 regions:
- **stack**: local variables, grows downward
- **heap**: space requested for pointers via `malloc()`; resizes dynamically, grows upward
- **static data**: variables declared outside main, does not grow or shrink
- **code**: loaded when program starts, does not change
For now, OS somehow prevents accesses between stack and heap (gray hash lines). Wait for virtual memory
Intel 80x86 C Memory Management
- A C program’s 80x86 *address space*:
- **heap**: space requested for pointers via `malloc()`; resizes dynamically, grows upward
- **static data**: variables declared outside main, does not grow or shrink
- **code**: loaded when program starts, does not change
- **stack**: local variables, grows downward
```
~ 08000000_{hex}
```
UCSD
Memory Management
- How do we manage memory?
- **Code, Static storage are easy:**
they never grow or shrink
- **Stack space is also easy:**
stack frames are created and destroyed in last-in, first-out (LIFO) order
- **Managing the heap is tricky:**
memory can be allocated / deallocated at any time
Where allocated?
```c
int myGlobal;
main() { int temp; }
```
- Structure declaration **does not** allocate memory
- Variable declaration **does** allocate memory
- If declare **outside** a procedure, allocated in static storage
- If declare **inside** procedure, allocated on the stack and **freed when procedure returns**
The Stack
- Stack frame includes:
- Return address
- Parameters
- Space for other local variables
- Stack frames contiguous blocks of memory; stack pointer tells where top stack frame is
- When procedure ends, stack frame is tossed off the stack; frees memory for future stack frames
Stack
- Last In, First Out (LIFO) memory usage
```c
main ()
{ a(0); }
void a (int m)
{ b(1); }
void b (int n)
{ c(2); }
void c (int o)
{ d(3); }
void d (int p)
{
}
```
Stack Pointer
Stack Pointer
Stack Pointer
Stack Pointer
UCSD
Who cares about stack management?
- Pointers in C allow access to deallocated memory, leading to hard-to-find bugs!
```c
int * ptr () {
int y;
y = 3;
return &y;
};
main () {
int *stackAddr, content;
stackAddr = ptr();
content = *stackAddr;
printf("%d", content); /* 3 */
content = *stackAddr;
printf("%d", content); /*13451514 */
};
```
The Heap (Dynamic memory)
- Large pool of memory, **not** allocated in contiguous order
- back-to-back requests for heap memory could result blocks very far apart
- where C++/Java `new` command allocates memory
- In C, specify number of **bytes** of memory explicitly to allocate item
```c
int *ptr;
ptr = (int *) malloc(4);
/* malloc returns type (void *),
so need to cast to right type */
```
- `malloc()`: Allocates raw, uninitialized memory from heap
Dynamic Memory Allocation
- C has operator `sizeof()` which gives size in bytes (of type or variable)
- Assume size of objects can be misleading & is bad style, so use `sizeof(type)`
- Many years ago an `int` was 16 bits, and programs assumed it was 2 bytes
Dynamic Memory Allocation
- To allocate room for something new to point to, use `malloc()` (with the help of a typecast and `sizeof`):
```c
ptr = (int *) malloc (sizeof(int));
```
- Now, `ptr` points to a space somewhere in memory of size `(sizeof(int))` in bytes.
- `(int *)` simply tells the compiler what will go into that space (called a typecast).
- `malloc` is almost never used for 1 variable
```c
ptr = (int *) malloc (n*sizeof(int));
```
- This allocates an array of `n` integers.
Dynamic Memory Allocation
- Once `malloc()` is called, the memory location might contain anything, so don’t use it until you’ve set its value.
- After dynamically allocating space, we must dynamically free it:
```c
free(ptr);
```
- Use this command to clean up.
In general, we interpret a high level language if efficiency is not critical or translated to a lower level language to improve performance.
Interpretation vs. Translation
- How do we run a program written in a source language?
- **Interpreter**: Directly executes a program in the source language
- **Translator**: Converts a program from the source language to an equivalent program in another language
Interpretation
MIPS program: foo.m
MARS
Translation
Scheme program: foo.c
C Compiler (gcc)
Executable (mach lang pgm): a.out
Hardware
° C Compiler is a translator from C to machine language
Interpretation
- Any good reason to interpret machine language in software?
- MARS – useful for learning / debugging
- Apple Macintosh conversion
- Switched from Motorola 680x0 instruction architecture to PowerPC.
- Could require all programs to be re-translated from high level language
- Instead, let executables contain old and/or new machine code, interpret old code in software if necessary
Interpretation vs. Translation
- Easier to write interpreter
- Interpreter closer to high-level, so gives better error messages (e.g., MARS)
- Translator reaction: add extra information to help debugging (line numbers, names)
- Interpreter slower (10x?) but code is smaller (1.5X to 2X?)
- Interpreter provides instruction set independence: run on any machine
- HP switched to VLIW processor. Instead of retranslating all SW, let executables contain old and/or new machine code, interpret old code in software if necessary
Steps to Running a Program
C program: foo.c
Compiler
Assembly program: foo.s
Assembler
Object (mach lang module): foo.o
Linker
Executable (mach lang pgm): a.out
Loader
Memory
lib.o
Compiler
❑ Input: High-Level Language Code (e.g., C, Java such as foo.c, foo.java)
❑ Output: Assembly Language Code (e.g., foo.s for MIPS)
❑ Note: Output *may* contain pseudoinstructions
❑ **Pseudoinstructions**: instructions that assembler understands but not in machine For example:
❑ `mov $s1, $s2 = or $s1, $s2, $zero`
Where Are We Now?
C program: foo.c
Compiler
Assembly program: foo.s
Assembler
Object (mach lang module): foo.o
Linker
Executable (mach lang pgm): a.out
Lib.o
Loader
Memory
Assembler
- Input: Assembly Language Code (e.g., foo.s for MIPS)
- Output: Object Code, information tables (e.g., foo.o for MIPS)
- Reads and Uses Directives
- Replace Pseudoinstructions
- Produce Machine Language
- Creates Object File
Assembler Directives (p. A–51 to A–53)
- Give directions to assembler, but do not produce machine instructions
- `.text`: Subsequent items put in user text segment
- `.data`: Subsequent items put in user data segment
- `.globl sym`: declares `sym` global and can be referenced from other files
- `.ascii str`: Store the string `str` in memory and null-terminate it
- `.word w1...wn`: Store the `n` 32-bit quantities in successive memory words
### Pseudoinstruction Replacement
- Assembler treats convenient variations of machine language instructions as if real instructions.
<table>
<thead>
<tr>
<th>Pseudo:</th>
<th>Real:</th>
</tr>
</thead>
<tbody>
<tr>
<td>subu $sp,$sp,32</td>
<td>addiu $sp,$sp,-32</td>
</tr>
<tr>
<td>sd $a0, 32($sp)</td>
<td>sw $a0, 32($sp)</td>
</tr>
<tr>
<td>mul $t7,$t6,$t5</td>
<td>mul $t6,$t5</td>
</tr>
<tr>
<td>addu $t0,$t6,1</td>
<td>addiu $t0,$t6,1</td>
</tr>
<tr>
<td>ble $t0,100,loop</td>
<td>slti $at,$t0,101</td>
</tr>
<tr>
<td>la $a0, str</td>
<td>lui $at,left(str)</td>
</tr>
<tr>
<td></td>
<td>ori $a0,$at,right(str)</td>
</tr>
</tbody>
</table>
Producing Machine Language
- Simple Case
- Arithmetic, Logical, Shifts, and so on
- All necessary info is within the instruction already
- What about Branches?
- PC-Relative
- So once pseudoinstructions are replaced by real ones, we know by how many instructions to branch
- So these can be handled easily
What about jumps (j and jal)?
- Jumps require absolute address
What about references to data?
- la gets broken up into lui and ori
- These will require the full 32-bit address of the data
- These can’t be determined yet, so we create two tables...
Symbol Table
- List of “items” in this file that may be used by other files
- What are they?
- Labels: function calling
- Data: anything in the `.data` section; variables which may be accessed across files
- First Pass: record label-address pairs
- Second Pass: produce machine code
- Result: can jump to a later label without first declaring it
Relocation Table
- List of “items” for which this file needs the address
- What are they?
- Any label jumped to: j or jal
- internal
- external (including lib files)
- Any piece of data
- such as the la instruction
Object File Format
- **object file header**: size and position of the other pieces of the object file
- **text segment**: the machine code
- **data segment**: binary representation of the data in the source file
- **relocation information**: identifies lines of code that need to be “handled”
- **symbol table**: list of this file’s labels and data that can be referenced
- **debugging information**
Where Are We Now?
- C program: foo.c
- Assembly program: foo.s
- Assembly program: foo.s
- Object (mach lang module): foo.o
- Linker
- Executable (mach lang pgm): a.out
- Loader
- Memory
Link Editor/Linker
- **Input:** Object Code, information tables (e.g., `foo.o` for MIPS)
- **Output:** Executable Code (e.g., `a.out` for MIPS)
- Combines several object (.o) files into a single executable ("linking")
- Enable Separate Compilation of files
- Changes to one file do not require recompilation of whole program
- Red Hat Linux 7.1 source is ~30 M lines of code!
- Link Editor name from editing the "links" in jump and link instructions
Link Editor/Linker
- `.o` file 1:
- text 1
- data 1
- info 1
- `.o` file 2:
- text 2
- data 2
- info 2
- Linker
- Relocated text 1
- Relocated text 2
- Relocated data 1
- Relocated data 2
- a.out
Link Editor/Linker
- Step 1: Take text segment from each .o file and put them together.
- Step 2: Take data segment from each .o file, put them together, and concatenate this onto end of text segments.
- Step 3: Resolve References
- Go through Relocation Table and handle each entry
- That is, fill in all absolute addresses
Four Types of Addresses
- PC-Relative Addressing (beq, bne): never relocate
- Absolute Address (j, jal): always relocate
- External Reference (usually jal): always relocate
- Data Reference (often lui and ori): always (??) relocate
Resolving References
- Linker *assumes* first word of first text segment is at address 0x00000000
- Linker knows:
- length of each text and data segment
- ordering of text and data segments
- Linker calculates:
- absolute address of each label to be jumped to (internal or external) and each piece of data being referenced
Resolving References
- To resolve references:
- search for reference (data or label) in all symbol tables
- if not found, search library files (for example, for `printf`)
- once absolute address is determined, fill in the machine code appropriately
- Output of linker: executable file containing text and data (plus header)
Where Are We Now?
C program: foo.c
Compiler
Assembly program: foo.s
Assembler
Object (mach lang module): foo.o
Linker
Executable (mach lang pgm): a.out
Loader
Memory
Loader
- Input: Executable Code (e.g., $a.out$ for MIPS)
- Output: (program is run)
- Executable files are stored on disk
- When one is run, loader’s job is to load it into memory and start it running
- In reality, loader is the operating system (OS)
- loading is one of the OS tasks
So what does a loader do?
Reads executable file’s header to determine size of text and data segments
Creates new address space for program large enough to hold text and data segments, along with a stack segment
Copies instructions and data from executable file into the new address space (this may be anywhere in memory)
Loader
- Copies arguments passed to the program onto the stack
- Initializes machine registers
- Most registers cleared, but stack pointer assigned address of 1st free stack location
- Jumps to start-up routine that copies program’s arguments from stack to registers and sets the PC
- If main routine returns, start-up routine terminates program with the exit system call
Example: C ⇒ Asm ⇒ Obj ⇒ Exe ⇒ Run
```c
#include <stdio.h>
int main (int argc, char *argv[]) {
int i;
int sum = 0;
for (i = 0; i <= 100; i = i + 1)
sum = sum + i * i;
printf ("The sum of squares from 0 .. 100 is %d\n", sum);
}
```
Example: C ⇒ Asm ⇒ Obj ⇒ Exe ⇒ Run
```assembly
.text
.align 2
.globl main
main:
subu $sp,$sp,32
sw $ra, 20($sp)
sd $a0, 32($sp)
sw $0, 24($sp)
sw $0, 28($sp)
loop:
lw $t6, 28($sp)
mul $t7, $t6, $t6
lw $t8, 24($sp)
addu $t9,$t8,$t7
sw $t9, 24($sp)
addu $t0, $t6, 1
sw $t0, 28($sp)
ble $t0, 100, loop
la $a0, str
lw $a1, 24($sp)
jal printf
move $v0, $0
lw $ra, 20($sp)
addiu $sp,$sp,32
jr $ra
.data
.align 0
.str:
.asciiz "The sum from 0 .. 100 is %d\n"
```
Where are 7 pseudo-instructions?
Example: C ⇒ Asm ⇒ Obj ⇒ Exe ⇒ Run
.text
.align 2
.globl main
main:
subu $sp, $sp, 32
sw $ra, 20($sp)
sd $a0, 32($sp)
sw $0, 24($sp)
sw $0, 28($sp)
loop:
lw $t6, 28($sp)
mul $t7, $t6, $t6
lw $t8, 24($sp)
addu $t9, $t8, $t7
sw $t9, 24($sp)
addu $t0, $t6, 1
sw $t0, 28($sp)
ble $t0, 100, loop
la $a0, str
lw $a1, 24($sp)
jal printf
move $v0, $0
lw $ra, 20($sp)
addiu $sp, $sp, 32
jr $ra
.data
.align 0
str:
.asciiz "The sum from 0 .. 100 is %d\n"
7 pseudo-instructions underlined
## Symbol Table Entries
- **Symbol Table**
- **Label** **Address**
- main: ?
- loop:
- str:
- printf:
- **Relocation Table**
- **Address** **Instr. Type** **Dependency**
Example: C ⇒ Asm ⇒ Obj ⇒ Exe ⇒ Run
- Remove pseudoinstructions, assign addresses
<table>
<thead>
<tr>
<th>C</th>
<th>Asm</th>
<th>Obj</th>
</tr>
</thead>
<tbody>
<tr>
<td>00</td>
<td>addiu $29, $29, -32</td>
<td>30 addiu $8, $14, 1</td>
</tr>
<tr>
<td>04</td>
<td>sw $31, 20 ($29)</td>
<td>34 sw $8, 28 ($29)</td>
</tr>
<tr>
<td>08</td>
<td>sw $4, 32 ($29)</td>
<td>38 slti $1, $8, 101</td>
</tr>
<tr>
<td>0c</td>
<td>sw $5, 36 ($29)</td>
<td>3c bne $1, $0, loop</td>
</tr>
<tr>
<td>10</td>
<td>sw $0, 24 ($29)</td>
<td>40 lui $4, l.str</td>
</tr>
<tr>
<td>14</td>
<td>sw $0, 28 ($29)</td>
<td>44 ori $4, $4, r.str</td>
</tr>
<tr>
<td>18</td>
<td>lw $14, 28 ($29)</td>
<td>48 lw $5, 24 ($29)</td>
</tr>
<tr>
<td>1c</td>
<td>multu $14, $14</td>
<td>4c jal printf</td>
</tr>
<tr>
<td>20</td>
<td>mflo $15</td>
<td>50 add $2, $0, $0</td>
</tr>
<tr>
<td>24</td>
<td>lw $24, 24 ($29)</td>
<td>54 lw $31, 20 ($29)</td>
</tr>
<tr>
<td>28</td>
<td>addu $25, $24, $15</td>
<td>58 addiu $29, $29, 32</td>
</tr>
<tr>
<td>2c</td>
<td>sw $25, 24 ($29)</td>
<td>5c jr $31</td>
</tr>
</tbody>
</table>
Symbol Table Entries
- **Symbol Table**
- **Label** | **Address**
- main: 0x00000000
- loop: 0x00000018
- str: 0x10000430
- printf: 0x000003b0
- **Relocation Information**
- **Address** | **Instr.** | **Type** | **Dependency**
- 0x00000040 | lui | l.str
- 0x00000044 | ori | r.str
- 0x0000004c | jal | printf
### Addresses start at 0x00000000
<table>
<thead>
<tr>
<th>Address</th>
<th>Instruction</th>
<th>Address</th>
<th>Instruction</th>
</tr>
</thead>
<tbody>
<tr>
<td>00</td>
<td>addiu $29, $29, -32</td>
<td>30</td>
<td>addiu $8, $14, 1</td>
</tr>
<tr>
<td>04</td>
<td>sw $31, 20($29)</td>
<td>34</td>
<td>sw $8, 28($29)</td>
</tr>
<tr>
<td>08</td>
<td>sw $4, 32($29)</td>
<td>38</td>
<td>slti $1, $8, 101</td>
</tr>
<tr>
<td>0c</td>
<td>sw $5, 36($29)</td>
<td>3c</td>
<td>bne $1, $0, -10</td>
</tr>
<tr>
<td>10</td>
<td>sw $0, 24($29)</td>
<td>40</td>
<td>lui $4, 4096</td>
</tr>
<tr>
<td>14</td>
<td>sw $0, 28($29)</td>
<td>44</td>
<td>ori $4, $4, 1072</td>
</tr>
<tr>
<td>18</td>
<td>lw $14, 28($29)</td>
<td>48</td>
<td>lw $5, 24($29)</td>
</tr>
<tr>
<td>1c</td>
<td>multu $14, $14</td>
<td>4c</td>
<td>jal 236</td>
</tr>
<tr>
<td>20</td>
<td>mflo $15</td>
<td>50</td>
<td>add $2, $0, $0</td>
</tr>
<tr>
<td>24</td>
<td>lw $24, 24($29)</td>
<td>54</td>
<td>lw $31, 20($29)</td>
</tr>
<tr>
<td>28</td>
<td>addu $25, $24, $15</td>
<td>58</td>
<td>addiu $29, $29, 32</td>
</tr>
<tr>
<td>2c</td>
<td>sw $25, 24($29)</td>
<td>5c</td>
<td>jr $31</td>
</tr>
</tbody>
</table>
Example: C ⇒ Asm ⇒ Obj ⇒ Exe ⇒ Run
Things to Remember
C program: foo.c
Compiler
Assembly program: foo.s
Assembler
Object (mach lang module): foo.o
Linker
Executable (mach lang pgm): a.out
Loader
Memory
lib.o
Things to Remember
- Compiler converts a single HLL file into a single assembly language file.
- Assembler removes pseudoinstructions, converts what it can to machine language, and creates a checklist for the linker (relocation/symbol table). This changes each .s file into a .o file.
- Linker combines several .o files and resolves absolute addresses.
- Loader loads executable into memory and begins execution.
Things to Remember
- Stored Program concept means instructions just like data, so can take data from storage, and keep transforming it until load registers and jump to routine to begin execution
- Compiler $\Rightarrow$ Assembler $\Rightarrow$ Linker ($\Rightarrow$ Loader)
- Assembler does 2 passes to resolve addresses, handling internal forward references
- Linker enables separate compilation, libraries that need not be compiled, and resolves remaining addresses
Conclusion
- C has 3 pools of memory
- **Static storage**: global variable storage, basically permanent, entire program run
- **The Stack**: local variable storage, parameters, return address
- **The Heap** (dynamic storage): `malloc()` grabs space from here, `free()` returns it.
- `malloc()` handles free space with freelist. Three different ways to find free space when given a request:
- **First fit** (find first one that’s free)
- **Next fit** (same as first, but remembers where left off)
- **Best fit** (finds most “snug” free space)
|
{"Source-Url": "http://mesl.ucsd.edu/gupta/cse30/lecture10.pdf", "len_cl100k_base": 5640, "olmocr-version": "0.1.50", "pdf-total-pages": 53, "total-fallback-pages": 0, "total-input-tokens": 77078, "total-output-tokens": 7535, "length": "2e12", "weborganizer": {"__label__adult": 0.0003113746643066406, "__label__art_design": 0.00030112266540527344, "__label__crime_law": 0.0002167224884033203, "__label__education_jobs": 0.00152587890625, "__label__entertainment": 5.751848220825195e-05, "__label__fashion_beauty": 0.00013446807861328125, "__label__finance_business": 0.00016188621520996094, "__label__food_dining": 0.0003712177276611328, "__label__games": 0.0006246566772460938, "__label__hardware": 0.0015869140625, "__label__health": 0.0002925395965576172, "__label__history": 0.00021541118621826172, "__label__home_hobbies": 0.00013625621795654297, "__label__industrial": 0.00048470497131347656, "__label__literature": 0.00020897388458251953, "__label__politics": 0.00021088123321533203, "__label__religion": 0.0004405975341796875, "__label__science_tech": 0.01070404052734375, "__label__social_life": 7.95125961303711e-05, "__label__software": 0.003704071044921875, "__label__software_dev": 0.97705078125, "__label__sports_fitness": 0.0003483295440673828, "__label__transportation": 0.0006842613220214844, "__label__travel": 0.0002148151397705078}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 19108, 0.03418]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 19108, 0.50588]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 19108, 0.73134]], "google_gemma-3-12b-it_contains_pii": [[0, 235, false], [235, 629, null], [629, 1088, null], [1088, 1468, null], [1468, 1774, null], [1774, 2103, null], [2103, 2396, null], [2396, 2636, null], [2636, 3012, null], [3012, 3475, null], [3475, 3736, null], [3736, 4250, null], [4250, 4519, null], [4519, 4660, null], [4660, 4929, null], [4929, 4971, null], [4971, 5126, null], [5126, 5529, null], [5529, 6057, null], [6057, 6248, null], [6248, 6576, null], [6576, 6758, null], [6758, 6995, null], [6995, 7449, null], [7449, 8103, null], [8103, 8420, null], [8420, 8669, null], [8669, 9022, null], [9022, 9258, null], [9258, 9659, null], [9659, 9847, null], [9847, 10306, null], [10306, 10526, null], [10526, 10856, null], [10856, 11089, null], [11089, 11421, null], [11421, 11753, null], [11753, 11928, null], [11928, 12215, null], [12215, 12539, null], [12539, 12916, null], [12916, 13176, null], [13176, 13751, null], [13751, 14326, null], [14326, 14566, null], [14566, 15976, null], [15976, 16322, null], [16322, 17450, null], [17450, 17485, null], [17485, 17668, null], [17668, 18082, null], [18082, 18553, null], [18553, 19108, null]], "google_gemma-3-12b-it_is_public_document": [[0, 235, true], [235, 629, null], [629, 1088, null], [1088, 1468, null], [1468, 1774, null], [1774, 2103, null], [2103, 2396, null], [2396, 2636, null], [2636, 3012, null], [3012, 3475, null], [3475, 3736, null], [3736, 4250, null], [4250, 4519, null], [4519, 4660, null], [4660, 4929, null], [4929, 4971, null], [4971, 5126, null], [5126, 5529, null], [5529, 6057, null], [6057, 6248, null], [6248, 6576, null], [6576, 6758, null], [6758, 6995, null], [6995, 7449, null], [7449, 8103, null], [8103, 8420, null], [8420, 8669, null], [8669, 9022, null], [9022, 9258, null], [9258, 9659, null], [9659, 9847, null], [9847, 10306, null], [10306, 10526, null], [10526, 10856, null], [10856, 11089, null], [11089, 11421, null], [11421, 11753, null], [11753, 11928, null], [11928, 12215, null], [12215, 12539, null], [12539, 12916, null], [12916, 13176, null], [13176, 13751, null], [13751, 14326, null], [14326, 14566, null], [14566, 15976, null], [15976, 16322, null], [16322, 17450, null], [17450, 17485, null], [17485, 17668, null], [17668, 18082, null], [18082, 18553, null], [18553, 19108, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 19108, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 19108, null]], "pdf_page_numbers": [[0, 235, 1], [235, 629, 2], [629, 1088, 3], [1088, 1468, 4], [1468, 1774, 5], [1774, 2103, 6], [2103, 2396, 7], [2396, 2636, 8], [2636, 3012, 9], [3012, 3475, 10], [3475, 3736, 11], [3736, 4250, 12], [4250, 4519, 13], [4519, 4660, 14], [4660, 4929, 15], [4929, 4971, 16], [4971, 5126, 17], [5126, 5529, 18], [5529, 6057, 19], [6057, 6248, 20], [6248, 6576, 21], [6576, 6758, 22], [6758, 6995, 23], [6995, 7449, 24], [7449, 8103, 25], [8103, 8420, 26], [8420, 8669, 27], [8669, 9022, 28], [9022, 9258, 29], [9258, 9659, 30], [9659, 9847, 31], [9847, 10306, 32], [10306, 10526, 33], [10526, 10856, 34], [10856, 11089, 35], [11089, 11421, 36], [11421, 11753, 37], [11753, 11928, 38], [11928, 12215, 39], [12215, 12539, 40], [12539, 12916, 41], [12916, 13176, 42], [13176, 13751, 43], [13751, 14326, 44], [14326, 14566, 45], [14566, 15976, 46], [15976, 16322, 47], [16322, 17450, 48], [17450, 17485, 49], [17485, 17668, 50], [17668, 18082, 51], [18082, 18553, 52], [18553, 19108, 53]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 19108, 0.07629]]}
|
olmocr_science_pdfs
|
2024-11-27
|
2024-11-27
|
8b8f871984fc243451cee09aa9547364b2f7f1ae
|
<table>
<thead>
<tr>
<th><strong>Title</strong></th>
<th>A technique for process preemption in the transputer</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Other Contributor(s)</strong></td>
<td>University of Hong Kong. Dept. of Computer Science.</td>
</tr>
<tr>
<td><strong>Author(s)</strong></td>
<td>Cheung, M. H.; Lau, Francis C. M.; Shea, K. M.</td>
</tr>
<tr>
<td><strong>Citation</strong></td>
<td></td>
</tr>
<tr>
<td><strong>Issued Date</strong></td>
<td>1994</td>
</tr>
<tr>
<td><strong>URL</strong></td>
<td><a href="http://hdl.handle.net/10722/54867">http://hdl.handle.net/10722/54867</a></td>
</tr>
<tr>
<td><strong>Rights</strong></td>
<td>This work is licensed under a Creative Commons Attribution-NonCommercial-NoDerivatives 4.0 International License.</td>
</tr>
</tbody>
</table>
A TECHNIQUE FOR PROCESS PREEMPTION IN THE TRANSPUTER
M.H. Cheung, K.M. Shea, and Francis C.M. Lau
Technical Report TR-94-11
August 1994
DEPARTMENT OF COMPUTER SCIENCE
FACULTY OF ENGINEERING
UNIVERSITY OF HONG KONG
POKFULAM ROAD
HONG KONG
This book was a gift
from
Dept. of Computer Science
The University of Hong Kong
A Technique for Process Preemption in the Transputer
M.H. Cheung, K.M. Shea, and Francis C.M. Lau*
Department of Computer Science
The University of Hong Kong
August 1994
Abstract
The transputer hardware (the T8 series) allows a process to be interrupted momentarily but not preempted and saved away for later execution. The latter implies that the context of the preempted process must be completely extracted from the system. There is difficulty in doing so in the T8 transputer because parts of the context of a preempted process are not so accessible. We present a technique, which we have successfully implemented in several versions of a scheduler, that can get around the problem by forcing a process to save the context by itself before giving up the CPU. Although the technique takes five context switches, the time (referred to as the scheduler overhead) turns out to be rather small—less than 50 µs in a 25 MHz transputer. We also present a method for adding a process control block (PCB) to a transputer process, which can be used to hold the saved context of a preempted process. This requires solving the "floating workspace pointer" problem.
Keywords: operating system, process preemption, real-time computing, process scheduling, transputer.
*Correspondence: Dr F.C.M. Lau. Department of Computer Science, The University of Hong Kong, Hong Kong / Email femau@csd.hku.hk / Fax. (+852) 559 8447
1 Introduction
The transputer [8, 9] is a rare kind among existing microprocessor designs: it has communication capabilities and process scheduling built into the chip. The former makes convenient the construction of parallel systems out of multiple transputer chips, and the latter is the basis for highly efficient execution of concurrent processes in a single chip. In fact, the notion of concurrent processes is well represented in the instruction set, and because of the transputer's high efficiency in handling processes, high level languages (e.g., Occam [14]) for the transputer could afford to provide concurrency as a language primitive. Much has been said about the communication capabilities of the transputer [13, 15]. This paper concentrates on aspects related to processes, in particular, preemption of processes. The software solutions we present here apply to the T8 series of the transputer which, at the time of writing, is the de facto representative (in terms of market quantities and its wide acceptance) of the transputer family. The latest series, T9000 [11], has just begun to emerge and has retained most of the design elements found in the T8 series. We comment at the end on what changes are necessary for porting our solutions to the T9000 transputer.
The efficiency of concurrent processes execution can be attributed to the simplistic design of the hardware scheduling mechanism. The major design elements that concern us here are as follows.
- There are two priorities, denoted HIGH and LOW, for processes; and there are two queues of ready processes, one for each priority.
- A HIGH priority process executes continuously until it gives up the CPU voluntarily (e.g., exit, wait for communication, or wait for timer).
- A LOW priority process executes only when there is no HIGH priority ready process; when a HIGH priority process becomes ready, it preempts (interrupts) the LOW priority process.
- LOW priority processes execute in a round-robin fashion through a timeslicing mechanism of the hardware.
- When a LOW priority is descheduled by a timeslice, its general registers are not saved.
While such a design is adequate for general applications involving multiple processes, it is too simple for time-critical applications in which multiple levels of priorities might be necessary. In these applications, low priority processes must give way to high priority processes without undue delay. In fact, the provision of two hardware priorities, HIGH and LOW, was never meant to support this type of applications; processes belonging to the application are expected to run strictly in LOW priority, with the HIGH priority reserved for special system tasks. Therefore, in order to create a multi-priority environment in which to execute time-critical applications, software-defined priorities must be introduced. To distinguish between hardware priorities and software-defined priorities, we use the following terminology.
<p>| | |</p>
<table>
<thead>
<tr>
<th></th>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>HIGH</td>
<td>transputer's high priority</td>
</tr>
<tr>
<td>LOW</td>
<td>transputer's low priority</td>
</tr>
<tr>
<td>high/higher/highest</td>
<td>software-defined priorities</td>
</tr>
<tr>
<td>low/lower/lowest</td>
<td>software-defined priorities</td>
</tr>
</tbody>
</table>
What needs to be done is similar to process scheduling in traditional operating systems: to add a priority level to every application process (which is a LOW priority process in this case), and to implement a software scheduler (a HIGH priority process) to schedule the application processes according to their priority levels. Within the transputer community, quite a few researchers have worked on this subject of multi-priority scheduling for the transputer (e.g., [2, 3, 18, 1]). In our work, we have concentrated on the problem of optimizing the overhead incurred by our scheduler's intrusion into the operation of the hardware scheduling [16, 17, 6]. To make multiple priorities work, we must implement preemptions among the LOW priority processes, the success of which is measured in terms of the preemption latency and the scheduler overhead (see Figure 1). In the final version of our scheduler, we successfully achieved a preemption latency of less than 100 µs, of which only a small percentage is overhead due to the scheduler [6]. In this paper, we discuss in detail the technique we used in our scheduler to do preemptions among the LOW priority processes which have been accorded a software-defined priority. We also present the way we attach a process control block (PCB) to a transputer process, in which the process' priority, its interrupted state, and perhaps other information are to be stored. Both the preemption technique and the attachment of the PCB turned out to be somewhat tricky to come up with, which is due to the rather peculiar design of the transputer hardware.
2 Preemption of a transputer process
There are four possible situations in which the execution of a LOW priority process is temporarily halted:
1. It executes a "wait" (for communication or timer).
2. Its timeslice expires; it is inserted into the ready process queue.
3. It is interrupted by a HIGH priority process and returns to execution as soon as there is no more ready HIGH priority process.
4. It is interrupted by a HIGH priority process (e.g., our scheduler) and is inserted into some queue.
(1)-(3) are normal operations of the transputer hardware. (4) is the situation of a process preemption, which is to be handled by our software scheduler. In this case, the interrupted (preempted) process is not necessarily the next LOW priority process to return to execution; it is inserted into some queue maintained by the scheduler, and the scheduler will then pick the one with the highest priority to run. This is similar to (2), but in (2), the descheduling, by design, will not take place until the next j (jump) or lend (loop end) instruction. These instructions do not leave any result in the registers and so saving of register contents is not necessary when the process is swapped out to the ready queue. For (4), however, register contents must be saved in order for the process to resume execution later on when its turn comes. This is where the difficulty lies. To understand the difficulty, we have to have the picture in mind of an executing
process as it is found inside the transputer hardware. Figure 2 shows such a picture of an active process. Its data is contained in a per-process workspace and is being pointed at
![Diagram of a transputer process]
**Figure 2: Makeup of a transputer process**
by the workspace pointer, \( Wptr \). The special locations below the \( Wptr \) are for holding information related to the process when the process is in a waiting or ready state. If the process is involved with floating point operations, the registers in the floating point unit might contain valid data. The evaluation registers in the main (integer) processor are for non-floating point operations and integer arithmetics.
When the currently executing process is preempted (situation (4)), its state is saved in some temporary areas by the hardware. The contents of the main processor's registers, including the workspace pointer, the instruction pointer, and the evaluation registers, are saved in some locations near the bottom of the transputer's memory map. As these locations are addressable, these saved values can be easily retrieved, such as by our scheduler. However, this is not the case with the floating point registers whose contents are copied into some "save registers" inside the floating point unit when interrupt occurs. These saved values are not retrievable by other processes, as these save registers because of efficiency are hidden deep inside the floating point unit (see Sections 7.10 and 9.5 of [10]). The challenge then is how to switch an interrupted process out completely.
(including the contents of the floating point registers) and save it in our scheduler's data structure.
The solution is to let the process, the one that is being preempted due to some event (let's call it a *preemption event*) that might cause the readiness of a higher priority process, to switch itself out voluntarily. The process, before it gives up the CPU, can certainly access and save its floating register values in some safe place (its PCB). To force the process to carry out the necessary context saving and then relinquish the CPU, we have to replace the next instruction (the one following the interrupt) by an instruction that would invoke a context saving subroutine (Figure 3). This idea is borrowed from breakpoint implementation in debugging. Figure 4 outlines the steps involved, from the moment the preemption event occurs till the moment the highest priority process begins execution.
A preemption event (Step 1) could be a timer interrupt as in the second version of our scheduler [17]. There we used a timer to wake up the scheduler periodically. When the scheduler wakes up, it would execute a queue manipulation algorithm (detail of which can be found in [16]) to select the highest priority ready process to run. As the timer is a built-in device of the transputer, the period of waking up the scheduler can be easily adjusted, thus tuning the preemption latency. The other type of preemption events can be found in our third scheduler [6] in which we wrapped all potential preemptive actions, such as process creation, communication, timer expiry, and change of priority, with special code including a switch to HIGH priority, so that when they occur they would preempt the executing process immediately. Without having to rely on a timer.
**Figure 4**: The preemption technique
**Legend**:
- L = low priority process
- H = high priority process
- LPS = LOW priority part of scheduler
- HPS = HIGH priority part of scheduler
this approach can result in very short preemption latency (less than 100 μs).
We denote the process to be preempted L, and the highest priority process that is chosen to run at the end H. The scheduler is actually divided into two parts and run as two processes—a HIGH priority part (HPS) and a LOW priority part (LPS). LPS is a dummy whose function is just to wake up HPS, as will be explained below. When the preemptive event occurs, HPS wakes up. Thus interrupting L in order to let L save its own context, HPS will let L return from interrupt, but before it does so, HPS has to first save L’s registers (Step 2) which have been pushed to low memory by the hardware. These values might be ruined upon return from interrupt and so the saving must be done at this point. The next action of HPS is to modify the next instruction of L (see Figure 3) before it gives up the CPU (by waiting for a communication with LPS) and let L return from interrupt. Now process L returns from interrupt (Step 3). Should the last instruction executed by L before the interrupt be an interruptible instruction (e.g., a block move), L would finish it first, which increases the preemption latency. It is almost impossible to try to save the state of the process at this point, without letting it finish the interrupted instruction, as state information regarding the unfinished instruction is not so accessible. It is also possible that the instruction in question is an interruptible instruction whose action would deschedule L (e.g., a send message instruction). If L finds this to be the case, it would simply restore the replaced instruction and proceed on. As there is no need to deschedule itself a second time. If this is not the case, which is what the figure shows, L would execute the next instruction which is the modified instruction. The modified instruction invokes a save and restore routine. This routine is divided into two portions. The first portion restores the replaced instruction, saves the process state and relinquishes the processor. When L resumes execution later on, the second portion of this routine is executed which would restore the saved state of L. As soon as L finishes the first portion and relinquishes the CPU, LPS, which has been arranged to be at the front of the LOW priority ready queue, would gain control and wake up HPS (Step 4). HPS then executes the queue manipulation algorithm to switch in the highest priority ready process (Step 5). HPS would also put LPS back in the front of the LOW priority ready queue; as such, no two application processes may run consecutively without any invocation of our scheduler in between.
This implementation is “safe” in the sense that it obeys the rules in [10] regarding the saving of registers. It is easy to prove that the implementation is correct in the sense that it always schedules the highest priority ready process to run. Outlines of the actual
code (in Occam) used in our implementation can be found in [5].
The time it takes to execute Steps 1 to 4 is about 30 μs.\textsuperscript{1} The queue manipulation algorithm takes about 10 μs to execute in normal circumstances [16]. Hence, the scheduler overhead in about 50 μs. However, if the last instruction before interrupt is a block move instruction which must run to completion before the steps of the preemption procedure above can be completed, then the time could be much longer. Specifically, the preemption latency is calculated as
\[ T_{\text{respond}} + T_{\text{sch}} + T_{\text{instr}} \]
where \(T_{\text{respond}}\) is the time between the moment a higher priority process becomes ready and when the corresponding preemption event actually occurs. For the second version of the scheduler which uses a timer, this is equal to the timer's period which is adjustable. For the third version of the scheduler, this is the time for the execution of several instructions of the wrapping code. \(T_{\text{sch}}\) is the overhead of the scheduler, which is bounded by 50 μs, and \(T_{\text{instr}}\) is the time to complete the interrupted (interruptible) instruction. In general, the probability of always interrupting a block move instruction that has a very long piece of data can be assumed to be very small. To really make sure that this problem due to \(T_{\text{instr}}\) will never surface, such as well dealing with a time critical application, the programmer will have to skillfully break up long messages or data into small pieces before they are sent or moved around.
3 Crafting the PCB
Transputer processes are very primitive, and they do not even have an identity, and so in order to associate some extra data (in our case, the software-defined priority and the saved context) with a particular process, we have to somehow link the data with the process through a pointer. The data is to be placed in a block of memory known as the process control block (PCB). A transputer process is represented entirely by its workspace which is pointed at by the Wptr register if the process is executing, or linked within one of the two ready queues if it is ready (Figure 5), or linked to some special locations if it is waiting for communication or timer. Therefore, to add a PCB to a process and connect the PCB with the process, we have to establish a link between the PCB and the process' workspace.
\textsuperscript{1}Running in a 25 MHz T800 transputer.
Referring to Figure 2 again, we note that the workspace behaves like a stack with the workspace pointer pointing at the top of the stack. When the process calls a procedure, a new stack frame is pushed on top and the workspace pointer would move downward. All local variables are addressed as positive offsets from the workspace pointer. Similarly, a return from a procedure would case the stack to shrink, and the workspace pointer would move up. It is this change of the workspace pointer that makes establishing a link between the process and its PCB difficult.
One possible solution to this "floating" workspace pointer problem is to have a fixed workspace pointer for the entire life of a process. The PCB for the process can then be pointed at by a pointer which is at a fixed offset from the workspace pointer. As the workspace pointer is fixed, the workspace cannot grow or shrink, and therefore cannot have any local variables. All local variables must then be placed elsewhere, such as in the PCB. As a result, all accesses to local variables must be done indirectly through the PCB pointer. This method obviously would decrease the performance of a program as all local variables become non-local.
A more efficient method is to let the PCB pointer (Pptr) float with the workspace pointer, as shown in Figure 6. The workspace remains unchanged as before, but then whenever the workspace pointer moves, the PCB pointer must also move with it so that it is at a fixed offset from the workspace pointer and is therefore always retrievable. This offset, Pptr.offset, can be defined at system's initialization and within the compiler. In the T8 transputer, the special locations Wptr−1 to Wptr−5 are reserved for such
uses as keeping the saved Iptr and a pointer to the next process in the queue, etc. Therefore, \( Pptr_{offset} \) can be chosen to be just below \( Wptr-5 \). The moving of the

Figure 6: Adding and linking a PCB to a process
\( Pptr \) must be done carefully as the register stack might contain useful data when the moving is performed. The following piece of code can guarantee safe moving of the \( Pptr \).
1. STL \( Pptr_{offset} - 1 \) -- save Areg
2. LDL \( Pptr_{offset} \) -- get Pptr
3. STL \( Pptr_{offset} + \text{change} \) -- move it
4. LDL \( Pptr_{offset} - 1 \) -- restore Areg
This segment of code must be executed before executing the instruction that causes the workspace pointer to move. First of all, the contents of Areg which is the top register of the evaluation stack is saved to the location below the \( Pptr \). This leaves the stack to be of depth 2 (i.e., two used registers), and therefore we can use Areg to carry out the moving of the \( Pptr \). The distance in terms of number of bytes (change) to move is pre-calculated by the compiler based on the instruction that is about to cause the
workspace pointer to move. Finally, Areg is restored. After this, the change of the workspace pointer can take place.
The second method we just described is used in our implementation. We use the PCB to hold the software-defined priority of a process and the saved context of the process when it is preempted. We had to modify the compiler\(^2\) so that all instructions that will change the workspace pointer are prepended the code presented above. Some of the library routines had to be changed as well: for example, those dealing with priorities now use the software-defined priority in the PCB instead of the hardware HIGH and LOW priorities. Details of these changes can be found in [5].
4 The T9000 transputer
The main difficulty we had with switching out a process was the inaccessibility of some of the process’ context. In the T9000 transputer [12], this problem does not exist as the context of an interrupted process is readily available in a set of special registers, the shadow registers. There are two special instructions for manipulating these registers:
\[
\begin{align*}
st\text{shadow} & \quad \text{store shadow registers} \\
ld\text{shadow} & \quad \text{load shadow registers}
\end{align*}
\]
For our purposes, the \textit{stshadow} instruction can be used to store the entire state of the preempted process in memory, and the \textit{ldshadow} instruction to resume a process.
Other instructions that might be useful for a version of our scheduler for the T9000 transputer include \textit{swapqueue}, \textit{insertqueue}, \textit{inidis/intenb}, and \textit{settimeslice}. The \textit{swapqueue} instruction can swap a queue of workspaces of ready processes prepared somewhere in memory with one of the two ready process queues. The \textit{insertqueue} instruction can be used to insert a process, such as our LPS, at the front of a ready process queue safely. The \textit{inidis/intenb} instructions are for disabling and enabling interrupt respectively: they can allow the scheduler to manipulate the process queue safely without having to worry about any unpredictable changes to the queue. Similarly, the \textit{settimeslice}, which is for turning on and off the hardware timeslicing, can be used by our LPS to ensure that it will never be descheduled by a timeslice when it is executing. In the T8 transputer, the only way for a LOW priority process to avoid being timesliced is to do away with
\(^2\)Logical Systems C compiler, version 99 1 [7]
using timesliceable instructions completely, such as the $j$ (jump) and the $lend$ (loop end) instructions.
The "floating" workspace problem still exists in the T9000 transputer, and so our method of attaching a PCB to a process can still apply.
5 Concluding remarks
The preemption technique we presented is rather unusual in terms of the number of context switches. Referring again to Figure 4, there are a total of five context switches (from Step 4 to Step 5, the HPS takes over to manipulate the queue) between the occurrence of the preemption event and the execution of the selected process. If it had not been because of the fast context switching capability of the transputer hardware, this number of context switches would be unacceptable. Even with five context switches, the time for the procedure is below 50 $\mu$s. This fast context switching can be attributed to the extremely light weight of transputer processes. In the T9000 transputer, processes become a little more sophisticated and heavy weight, but preemption can now be done in a much simpler way, and two context switches would be sufficient (from the preempted process to the scheduler and then to the selected process).
The design and implementation of the techniques we presented were called for in our trying to build a real-time multi-priority scheduler in the transputer. Similar work on multi-priority schedulers has been performed by other researchers and research groups, which can be classified into cooperative and non-cooperative methods. In cooperative method, a user process is modified so that it would communicate (cooperate) with a high-level scheduler every now and then to allow the scheduler to decide whether the process should be allow to continue right away or not. This method obviously does not work for time-critical processes as the delay incurred due to the round-robin mode of the underlying scheduling queue could be large and unpredictable. Our schedulers are examples of non-cooperative methods, and we achieved the desired low overhead and short preemption latency by manipulating the (LOW priority) ready queue directly and using the context switching method as described in this paper. Our method is safe as we have followed the rules as prescribed in [10] and left the saved context of the interrupted process untouched during the interrupt. In contrast, the scheduler presented in [4] had to modify the saved registers of the interrupted process during the interrupt. Moreover, their scheduler cannot handle the case in which the interrupted process is
involved in floating point operations. In [19], a multi-priority real-time kernel for the transputer, called TRANS-RTXc, is described, but the detail of how they implemented multi-priority scheduling is not presented.
Acknowledgements
We acknowledge the early contributions to this project made by S.W. Lau.
References
Cheung, M. H.
A technique for process preemption in the transputer
Hong Kong: Department of Computer Science, Faculty of Engineering, University of
|
{"Source-Url": "http://hub.hku.hk/bitstream/10722/54867/2/31800865.pdf", "len_cl100k_base": 5722, "olmocr-version": "0.1.50", "pdf-total-pages": 20, "total-fallback-pages": 0, "total-input-tokens": 21131, "total-output-tokens": 7395, "length": "2e12", "weborganizer": {"__label__adult": 0.0006022453308105469, "__label__art_design": 0.00080108642578125, "__label__crime_law": 0.0004930496215820312, "__label__education_jobs": 0.000751495361328125, "__label__entertainment": 0.00016486644744873047, "__label__fashion_beauty": 0.0003097057342529297, "__label__finance_business": 0.0004336833953857422, "__label__food_dining": 0.0005960464477539062, "__label__games": 0.00106048583984375, "__label__hardware": 0.049163818359375, "__label__health": 0.0008111000061035156, "__label__history": 0.0004968643188476562, "__label__home_hobbies": 0.00029277801513671875, "__label__industrial": 0.0017299652099609375, "__label__literature": 0.0003592967987060547, "__label__politics": 0.0003466606140136719, "__label__religion": 0.00084686279296875, "__label__science_tech": 0.42724609375, "__label__social_life": 8.45789909362793e-05, "__label__software": 0.01357269287109375, "__label__software_dev": 0.49755859375, "__label__sports_fitness": 0.0004172325134277344, "__label__transportation": 0.0014781951904296875, "__label__travel": 0.00026154518127441406}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28824, 0.02862]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28824, 0.41234]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28824, 0.92126]], "google_gemma-3-12b-it_contains_pii": [[0, 517, false], [517, 759, null], [759, 839, null], [839, 2252, null], [2252, 4386, null], [4386, 7031, null], [7031, 8498, null], [8498, 10069, null], [10069, 11838, null], [11838, 12024, null], [12024, 14948, null], [14948, 17427, null], [17427, 19152, null], [19152, 20308, null], [20308, 22792, null], [22792, 25360, null], [25360, 27026, null], [27026, 28676, null], [28676, 28824, null], [28824, 28824, null]], "google_gemma-3-12b-it_is_public_document": [[0, 517, true], [517, 759, null], [759, 839, null], [839, 2252, null], [2252, 4386, null], [4386, 7031, null], [7031, 8498, null], [8498, 10069, null], [10069, 11838, null], [11838, 12024, null], [12024, 14948, null], [14948, 17427, null], [17427, 19152, null], [19152, 20308, null], [20308, 22792, null], [22792, 25360, null], [25360, 27026, null], [27026, 28676, null], [28676, 28824, null], [28824, 28824, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28824, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28824, null]], "pdf_page_numbers": [[0, 517, 1], [517, 759, 2], [759, 839, 3], [839, 2252, 4], [2252, 4386, 5], [4386, 7031, 6], [7031, 8498, 7], [8498, 10069, 8], [10069, 11838, 9], [11838, 12024, 10], [12024, 14948, 11], [14948, 17427, 12], [17427, 19152, 13], [19152, 20308, 14], [20308, 22792, 15], [22792, 25360, 16], [25360, 27026, 17], [27026, 28676, 18], [28676, 28824, 19], [28824, 28824, 20]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28824, 0.10526]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
f9dc63f0cd8f6f33bfd3f10dd18f3175194e98be
|
8. Intermediate Code
- Intermediate code is closer to the target machine than the source language, and hence easier to generate code from.
- Unlike machine language, intermediate code is (more or less) machine independent. This makes it easier to retarget the compiler.
- It allows a variety of optimizations to be performed in a machine-independent way.
- Typically, intermediate code generation can be implemented via syntax-directed translation, and thus can be folded into parsing by augmenting the code for the parser.
8.1.1. Low-Level Intermediate Representations
**Examples**: Three Address Code
- This is a sequence of instructions of the form
\[ x := y \text{ op } z \]
where \( x, y, \) and \( z \) are *variable names, constants, or compiler generated variables* ("temporaries").
- Only one operator is permitted on the RHS, so there are no "built-up" expressions. Instead, expressions are computed using temporaries. E.g. the source language construct
\[ x := y + z*\text{w} \]
might translate to
\[ t1 := z * \text{w} \\
x := y + t1 \]
Different Kinds of Three-Address Statements
**Assignment**:
\[ x := y \ op z, \quad \text{op binary} \]
\[ x := \ op y, \quad \text{op unary} \]
\[ x := y \]
**Jumps**:
\[ \text{goto} \ L, \]
\[ \text{jumpt} \ t \ L, \]
\[ \text{jumpf} \ t \ L, \quad L \ a \ label \]
\[ \text{if} \ x \ \text{relop} \ y \ \text{goto} \ L, \ L \ a \ label \]
Procedure Call/Return:
param \( x \), \( x \) an actual parameter
call \( p, n \), \( n = \) no. of params to \( p \)
enter
exit initialization (if any)
cleanup actions (if any)
return
return \( x \)
retrieve \( x \)
save returned value in \( x \)
Indexed Assignment:
\[
x := y[i]
\]
\[
x[i] := y
\]
Address and Pointer Assignments:
\[
x := &y
\]
\[
x := *y
\]
\[
x := y
\]
Miscellaneous:
label \( L \)
8.1.2. Implementing Three-Address Instructions
Each instruction is implemented as a structure called a *quadruple*:
- contains (upto) 4 fields: operation, (upto) two operands, and destination;
- for operands: use a bit to indicate whether it’s a constant or a pointer into the symbol table.
\[ x := y + z \quad \text{if } t1 \geq t2 \text{ goto } L \]
```
<table>
<thead>
<tr>
<th>Op</th>
<th>PLUS</th>
<th>t1</th>
</tr>
</thead>
<tbody>
<tr>
<td>Src1</td>
<td></td>
<td>t2</td>
</tr>
<tr>
<td>Src2</td>
<td></td>
<td></td>
</tr>
<tr>
<td>Dest</td>
<td></td>
<td></td>
</tr>
</tbody>
</table>
```
```
<table>
<thead>
<tr>
<th>Op</th>
<th>JMP_GE</th>
</tr>
</thead>
<tbody>
<tr>
<td>Src1</td>
<td>t1</td>
</tr>
<tr>
<td>Src2</td>
<td>t2</td>
</tr>
<tr>
<td>Dest</td>
<td></td>
</tr>
</tbody>
</table>
```
*instruction labelled L*
8.2. Intermediate Code Generation
- Source language constructs are decomposed to simpler constructs at the intermediate code level.
- When generating code to evaluate expressions, temporary names must be made up for internal nodes in the syntax tree for the expression.
*Example:*
**Source:** if \( x + 2 > 3*(y - 1) + 4 \) then \( z := 0; \)
**Intermediate Code:**
\[
\begin{align*}
t1 & := x+2 \\
t2 & := y-1 \\
t3 & := 3*t2 \\
t4 & := t3+4 \\
\text{if } t1 & <= t4 \text{ goto L} \\
z & := 0 \\
\text{label L}
\end{align*}
\]
Intermediate Code Generation
Syntax-Directed Translation:
- Intermediate code represented as a list of instructions. Instruction sequences are concatenated using the operator `||`.
(In practice, we might choose to write the intermediate code instructions out into a file.)
- **Attributes for Expressions** $E$:
- $E.place$ : denotes the location that holds the value of $E$.
- $E.code$ : denotes the instruction sequence that evaluates $E$.
- **Attributes for Statements** $S$:
- $S$.begin: denotes the first instruction in the code for $S$.
- $S$.after: denotes the first instruction after the code for $S$.
- $S$.code: denotes the instruction sequence that represents $S$.
• **Auxiliary Functions**:
- `newtemp()` : returns a *new* temporary each time it is called.
- returns a pointer to the ST entry of a temp.
- may take a parameter specifying the type of the temp (useful if reusing temps).
- `newlabel()` : returns a new label name each time it is called.
• **Notation** : we write
\[ \text{gen}(x \ ':=\ y \ '+z) \]
to represent the instruction \( x := y + z \).
## Intermediate Code Generation: Simple Expressions
<table>
<thead>
<tr>
<th>Production</th>
<th>Semantic Rule</th>
</tr>
</thead>
</table>
| $E \rightarrow \text{id}$ | $E\.place := \text{id\.place};$
| | $E\.code := "\$ \$;$
| $E \rightarrow (E_1)$ | $E\.place := E_1\.place;$
| | $E\.code := E_1\.code;$
| $E \rightarrow E_1 + E_2$ | $E\.place := \text{newtemp()};$
| | $E\.code := E_1\.code \parallel$
| | $E_2\.code \parallel$
| | $\text{gen}(E\.place \text{'}:=\text{'}$
| | $E_1\.place \text{'}+\text{'}$
| | $E_2\.place)$
| $E \rightarrow -E_1$ | $E\.place := \text{newtemp()};$
| | $E\.code := E_1\.code \parallel$
| | $\text{gen}(E\.place \text{'}:=\text{'}$
| | $\text{'}-\text{'} E_1\.place)$
CSE Department
8.3.2. Accessing Array Elements I
- Array elements can be accessed quickly if the elements are stored in a block of consecutive locations.
- Assume:
- we want the $i^{th}$ element of an array $A$ whose subscript ranges from $lo$ to $hi$;
- the address of the first element of the array is $base$.
- We can avoid address computations in the intermediate code if we have indexed “addressing modes” at the intermediate code level.
In this case, $A[i]$ is the $(i - lo)^{th}$ element of the array located at $base$ (starting at element 0). So a reference $A[i]$ translates to the code
\[
t1 := i - lo \\
t2 := A[t1]
\]
8.3.2. Accessing Array Elements II
- Address computations can’t be avoided in general, because of pointer and `struct` types.
- The size of array element can be greater than 1 say $w$.
- Assume:
- we want the $i^{th}$ element of an array $A$ whose subscript ranges from $lo$ to $hi$;
- the address of the first element of the array is $base$;
- each element of $A$ has width $w$.
Then, the address of \( x[i] \) is
\[
\text{base} + (i - \text{lo}) \times w \\
= (\text{base} - \text{lo} \times w) + i \times w \\
= C_A + i \times w
\]
where \( C_A \) depends on the array \( A \) and is known at compile time.
**Note**: \( C_A \) is a memory address if \( A \) is a global, and is a stack displacement if \( A \) is a local.
- The idea extends to multidimensional arrays in the obvious way: need to know whether the elements are stored in row-major or column-major order.
8.4. Logical Expressions
\[ BExp \rightarrow E_1 \text{ relop } E_2 \]
8.4.3. Naive but Simple Approach:
Intermediate Code \((\text{TRUE} == 1, \text{FALSE} == 0)\):
\[
\begin{align*}
&\left[ t1 \leftarrow \text{value of } E_1 \\
&\left[ t2 \leftarrow \text{value of } E_2 \\
&t3 := \text{TRUE} \\
&\text{if } t1 \text{ relop } t2 \text{ goto } L \\
t3 := \text{FALSE} \\
\text{label } L
\end{align*}
\]
\underline{Disadvantage}: Lots of (usually unnecessary) memory traffic.
8.4.1. Code Generation for Conditionals
Production: \( S \rightarrow \text{if } E \text{ then } S_1 \text{ else } S_2 \)
Semantic Rule:
\[
\begin{align*}
S.\text{begin} &:= \text{newlabel}(); \\
S.\text{after} &:= \text{newlabel}(); \\
S.\text{code} &:= \text{gen}('\text{label}\ ' S.\text{begin}) \ || \\
& \quad E.\text{code} \ || \\
& \quad \text{gen}('\text{if}' E.\text{place} = "\text{0}" \text{goto} S.\text{begin}) \ || \\
& \quad S_1.\text{code} \ || \\
& \quad \text{gen}('\text{goto}\ ' S.\text{after}) \ || \\
& \quad S_2.\text{code} \ || \\
& \quad \text{gen}('\text{label}\ ' S.\text{after}) \\
\end{align*}
\]
Structure of Generated Code
L1: Code for E
- if \( E == \text{FALSE} \) goto L3
- Code for S1
- goto L2
L3: Code for S2
L2: ...
8.4.1. Code Generation for Loops
Production: $S \rightarrow \text{while } E \text{ do } S_1$
Structure of Generated Code:
$L_1$:
- Code for $E$
- $\text{if } (E == \text{FALSE}) \text{ goto } L_2$
- Code for $S_1$
- $\text{goto } L_1$
$L_2: \ldots$
Semantic Rule:
```plaintext
{ S.begin := newlabel();
S.after := newlabel();
S.code := gen('label' S.begin) ||
E.code ||
gen('if' E.place = "0" goto S.after) ||
S_1.code ||
gen('goto' S.begin) ||
gen('label' S.after)
}
```
Intermediate Code Generation: Assignment
- Grammar productions:
\[ S \rightarrow Lhs := Rhs \]
- Semantic Rule:
\[
\{ \begin{align*}
S.code &:= Lhs.code || Rhs.code || \\
gen(Lhs.place ':=' Rhs.place)
\end{align*} \}
\]
Relational Expressions: Better Approach
- Often, relational expressions occur in the context of boolean conditions of control statements.
- Instead of creating temporaries which are set to true or false, based upon the outcome of evaluating a boolean condition, generate direct branches to true and false targets.
- Short circuit evaluation of boolean expressions can also be handled effectively by this approach.
Relational Expressions: Example
\[ E = a < b \text{ or } c < d \text{ and } e < f \]
100 : \text{ if } a < b \text{ goto } __
101 : \text{ goto 102}
102 : \text{ if } c < d \text{ goto 104}
103 : \text{ goto } __
104 : \text{ if } e < f \text{ goto } __
105 : \text{ goto } __
\[ E.\text{truelist} = \{100, 104\} \]
\[ E.\text{falselist} = \{103, 105\} \]
\[ E \rightarrow E_1 \text{ or } M \ E_2 \]
\[
\{
\text{backpatch}(E_1.falselist, M.quad);
E.truelist = \text{merge}(E_1.truelist, E_2.truelist);
E.falselist = E_2.falselist;
\}
\]
\[ M \rightarrow \epsilon \]
\[
\{ M.quad = \text{nextquad} \}
\]
\[ E \rightarrow E_1 \text{ and } M \ E_2 \]
\[
\{
\text{backpatch}(E_1.truelist, M.quad);
E.truelist = E_2.truelist;
E.falselist = \text{merge}(E_1.falselist, E_2.falselist);
\}
\]
Relational Expressions: cont’d.
\[ E \rightarrow \text{not } E_1 \]
\[
\{ \\
E.\text{truelist} = E_1.\text{falselist}; \\
E.\text{falselist} = E_1.\text{truelist}; \\
\}
\]
\[ E \rightarrow (E_1) \]
\[
\{ \\
E.\text{truelist} = E_1.\text{truelist}; \\
E.\text{falselist} = E_1.\text{falselist}; \\
\}
\]
\[ E \rightarrow id_1 \text{ relop id}_2 \]
\[
\{ \\
E.\text{truelist} = \text{makelist(nextquad)}; \\
E.\text{falselist} = \text{makelist(nextquad + 1)}; \\
generate(\text{if } id_1.\text{addr relop id}_2.\text{addr goto}__) \\
generate(\text{goto}) \\
\} \]
E → \textit{true}
\{
E.true\texttt{list} = \texttt{makelist} ( \texttt{nextquad} );
\texttt{generate} ( \texttt{goto\_} )
\}\n
E → \textit{false}
\{
E.false\texttt{list} = \texttt{makelist} ( \texttt{nextquad} );
\texttt{generate} ( \texttt{goto\_} )
\}\n
Code Generation for Loops and Conditionals
- Straightforward approach can introduce branch instructions whose targets are unconditional jumps.
```
while a < b do
if x < y then S endif
endwhile
```
100: if a < b go to 102
101: go to 106
102: if x < y go to 104
103: go to 105 100
104: S.code
105: go to 100
106:
- We can avoid this by maintaining an additional attribute for statements called the nextlist. This attribute tracks branches in the statements whose target should be set to code that follows them in the execution sequence.
Loops and Conditionals: cont’d.
\[ S \rightarrow \text{if } E \text{ then } M_1 \ S_1 \ N \text{ else } M_2 \ S_2 \]
\[
\{
\text{backpatch}(E.\text{truelist}, M_1.\text{quad});
\text{backpatch}(E.\text{falselist}, M_2.\text{quad});
S.\text{nextlist} = \text{merge}(S_1.\text{nextlist}, \text{merge}(N.\text{nextlist}, S_2.\text{nextlist}))
\}
\]
\[ N \rightarrow \epsilon \]
\[
\{
N.\text{nextlist} = \text{makelist}(\text{nextquad});
generate(\text{goto }_\text{ })
\}
\]
\[ M \rightarrow \epsilon \]
\[
\{M.\text{quad} = \text{nextquad}\}
\]
\[ S \rightarrow \text{if } E \text{ then } M \ S_1 \]
\[
\{
\text{backpatch}(E.\text{truelist}, M.\text{quad});
S.\text{nextlist} = \text{merge}(E.\text{falselist}, S_1.\text{nextlist})
\}
\]
Loops and Conditionals: cont’d.
\[ S \rightarrow \text{while} \ M_1 \ E \ \text{do} \ M_2 \ S_1 \]
\[
\begin{align*}
&\{ \text{backpatch}(S_1.\text{nextlist}, M_1.\text{quad}); \\
&\quad \text{backpatch}(E.\text{truelist}, M_2.\text{quad}); \\
&\quad S.\text{nextlist} = E.\text{falselist}; \\
&\quad \text{generate}(\text{goto} M_1.\text{quad}); \\
&\} \\
\end{align*}
\]
\[ S \rightarrow \text{begin} \ L \ \text{end} \]
\[
\{ S.\text{nextlist} = L.\text{nextlist} \}
\]
\[ S \rightarrow A \]
\[
\{ S.\text{nextlist} = \text{nil} \}
\]
\[ L \rightarrow L_1 \ ; \ M \ S \]
\[
\begin{align*}
&\{ \text{backpatch}(L_1.\text{nextlist}, M.\text{quad}); \\
&\quad L.\text{nextlist} = S.\text{nextlist}; \\
&\} \\
\end{align*}
\]
\[ L \rightarrow S \]
\[
\{ L.\text{nextlist} = S.\text{nextlist} \}
\]
Optimization takes place here
while a < b do
while x < y do
S
endwhile
endwhile
100: if a < b go to 102
101: go to 107
102: if x < y go to 104
103: go to 106 100
104: S.code
105: go to 102
106: go to 100
107: ....
Intermediate Code Generation: `case` Statements
Implementation issue: Need to generate code so that we can (efficiently) choose one of a set of different alternatives, depending on the value of an expression.
Implementation choices:
1. linear search
2. binary search
3. jump table
**Implementation considerations:**
1. **Execution Cost**: linear or binary search may be cheaper if the no. of cases is small.
For a large no. of cases, a jump table may be cheaper.
2. **Space cost**: a jump table may take too much space if the case values are not clustered closely together, e.g.:
```
switch (x) {
case 1 : ...
case 1000 : ...
case 1000000 : ...
}
```
8.5. Code Generation for Function Calls
Calling Sequence: Caller:
- Evaluate actual parameters; place actuals where the callee wants them.
*Instruction*: `param t`
- Save machine state (current stack and/or frame pointers, return address) and transfer control to callee.
*Instruction*: `call p, n` \( (n = \text{no. of actuals}) \)
Calling Sequence: Callee:
- Save registers (if necessary); update stack and frame pointers to accommodate \( m \) bytes of local storage.
*Instruction*: `enter m`.
Return Sequence: Callee:
- Place return value $x$ (if any) where the caller wants it; adjust stack/frame pointers (maybe); jump to return address.
*Instruction*: return $x$ or return.
Return Sequence: Caller:
- Save the value returned by the callee (if any) into $x$.
*Instruction*: retrieve $x$.
Intermediate Code for Function Calls: An Example
Source Code:
\[ x = f(0,y+1)-1; \]
Intermediate Code Generated:
\[
\begin{align*}
t1 & := y+1 \\
param & t1 \quad /* \text{arg 2} */ \\
param & 0 \quad /* \text{arg 1} */ \\
call & f, 2 \quad \\
retrieve & t2 \quad /* t2 := f(0,t1) */ \\
t3 & := t2-1 \\
x & := t3
\end{align*}
\]
Suppose function \( f \) needs 24 bytes of space for its locals and temporaries. Its code has the form
\[
\begin{align*}
& \text{enter 24} \\
& \quad \ldots \\
& \quad \text{return } t17 \\
& \quad /* \text{suppose return value is in } t17 */
\end{align*}
\]
Code Generation for Functions: Storage Allocation
Problem: The first instruction in a function is
```c
enter n /* n = space for locals, temps */
```
but \( n \) is not known until the whole function has been processed.
Solution 1: generate final code into a list, “back-patch” the appropriate instructions after processing the function body.
Advantage: Can also do machine-dependent optimizations (e.g., instruction scheduling).
Disadvantage: slower, requires more memory.
Solution 2 : Generate code of the form
code
for
function
foo
goto L1
L2:
code for
body of foo
L1:
code for enter n
goto L2
Reusing Temporaries
Storage requirements can be reduced considerably if we reuse temporaries:
- Maintain a free list of temporaries:
- When a temporary is no longer necessary, it is returned to the free list.
- The function `newtemp()` is modified to first search the free list, and to allocate a new temporary only if there is nothing in the free list.
- To handle objects of different sizes, we can maintain a free list for each type (or size).
|
{"Source-Url": "http://www.cs.ucr.edu/~gupta/teaching/152-21summ/PDFs/CodeGen.pdf", "len_cl100k_base": 5264, "olmocr-version": "0.1.50", "pdf-total-pages": 35, "total-fallback-pages": 0, "total-input-tokens": 42182, "total-output-tokens": 7011, "length": "2e12", "weborganizer": {"__label__adult": 0.00033283233642578125, "__label__art_design": 0.0002598762512207031, "__label__crime_law": 0.0002319812774658203, "__label__education_jobs": 0.00025773048400878906, "__label__entertainment": 4.273653030395508e-05, "__label__fashion_beauty": 0.0001156330108642578, "__label__finance_business": 0.00015211105346679688, "__label__food_dining": 0.0003943443298339844, "__label__games": 0.0003898143768310547, "__label__hardware": 0.0007443428039550781, "__label__health": 0.000217437744140625, "__label__history": 0.00016367435455322266, "__label__home_hobbies": 9.91225242614746e-05, "__label__industrial": 0.0003516674041748047, "__label__literature": 0.00014829635620117188, "__label__politics": 0.0001939535140991211, "__label__religion": 0.000347137451171875, "__label__science_tech": 0.001880645751953125, "__label__social_life": 5.5730342864990234e-05, "__label__software": 0.002960205078125, "__label__software_dev": 0.98974609375, "__label__sports_fitness": 0.0002715587615966797, "__label__transportation": 0.00040268898010253906, "__label__travel": 0.00020205974578857425}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 16012, 0.03207]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 16012, 0.38544]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 16012, 0.60155]], "google_gemma-3-12b-it_contains_pii": [[0, 528, false], [528, 1068, null], [1068, 1414, null], [1414, 1671, null], [1671, 1829, null], [1829, 2419, null], [2419, 2968, null], [2968, 3426, null], [3426, 3667, null], [3667, 4080, null], [4080, 5004, null], [5004, 5628, null], [5628, 6018, null], [6018, 6514, null], [6514, 6995, null], [6995, 7753, null], [7753, 8245, null], [8245, 8481, null], [8481, 8898, null], [8898, 9257, null], [9257, 9686, null], [9686, 10253, null], [10253, 10527, null], [10527, 11068, null], [11068, 11809, null], [11809, 12642, null], [12642, 12835, null], [12835, 13119, null], [13119, 13528, null], [13528, 14046, null], [14046, 14357, null], [14357, 14951, null], [14951, 15430, null], [15430, 15559, null], [15559, 16012, null]], "google_gemma-3-12b-it_is_public_document": [[0, 528, true], [528, 1068, null], [1068, 1414, null], [1414, 1671, null], [1671, 1829, null], [1829, 2419, null], [2419, 2968, null], [2968, 3426, null], [3426, 3667, null], [3667, 4080, null], [4080, 5004, null], [5004, 5628, null], [5628, 6018, null], [6018, 6514, null], [6514, 6995, null], [6995, 7753, null], [7753, 8245, null], [8245, 8481, null], [8481, 8898, null], [8898, 9257, null], [9257, 9686, null], [9686, 10253, null], [10253, 10527, null], [10527, 11068, null], [11068, 11809, null], [11809, 12642, null], [12642, 12835, null], [12835, 13119, null], [13119, 13528, null], [13528, 14046, null], [14046, 14357, null], [14357, 14951, null], [14951, 15430, null], [15430, 15559, null], [15559, 16012, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 16012, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 16012, null]], "pdf_page_numbers": [[0, 528, 1], [528, 1068, 2], [1068, 1414, 3], [1414, 1671, 4], [1671, 1829, 5], [1829, 2419, 6], [2419, 2968, 7], [2968, 3426, 8], [3426, 3667, 9], [3667, 4080, 10], [4080, 5004, 11], [5004, 5628, 12], [5628, 6018, 13], [6018, 6514, 14], [6514, 6995, 15], [6995, 7753, 16], [7753, 8245, 17], [8245, 8481, 18], [8481, 8898, 19], [8898, 9257, 20], [9257, 9686, 21], [9686, 10253, 22], [10253, 10527, 23], [10527, 11068, 24], [11068, 11809, 25], [11809, 12642, 26], [12642, 12835, 27], [12835, 13119, 28], [13119, 13528, 29], [13528, 14046, 30], [14046, 14357, 31], [14357, 14951, 32], [14951, 15430, 33], [15430, 15559, 34], [15559, 16012, 35]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 16012, 0.02603]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
595427b5482f14552447bf3f7d8ac20380c683c6
|
Performance of Java in Function-as-a-Service Computing
A. Dowd
Department of Electrical and Computer Engineering
University of Texas at Austin
Austin, USA
dowda@utexas.edu
Qinzhe Wu, Lizy K. John
Department of Electrical and Computer Engineering
University of Texas at Austin
Austin, USA
qw2699@utexas.edu,ljohn@ece.utexas.edu
0000-0002-7988-1431,0000-0002-8747-5214
Abstract—One of the newest forms of serverless computing is Function-as-a-Service (FaaS). FaaS provides a framework to execute modular pieces of code in response to events (e.g., clicking a link in a web application). The FaaS platform takes care of provisioning and managing servers, allowing the developers to focus on their business logic. Additionally, all resource management is event-driven, and developers are only charged for the execution time of their functions. Despite so many apparent benefits, there are some concerns regarding the performance of FaaS. Past work has shown that cold starts typically have a negative effect on response latency (e.g., the initialization could add more than 10× execution time to short Python FaaS functions). However, the magnitude of the slowdown is subject to varying from language to language. This paper investigates how containerization and cold starts impact the performance of Java FaaS functions, and compares with the findings from the prior Python study.
We find that containerization overhead slows Java FaaS functions from native execution by 4.42× on average (geometrical mean), ranging from 1.69× up to 15.43×. Comparing with Python in warm containers, Java has more overhead on three of the functions, but faster on the other functions (up to 27.08× faster). The container initialization time for Java is consistently less than half that of Python. However, Java has the additional overhead due to Java Virtual Machine (JVM) warmup which contributes varying amount of latency to the execution depending on the Java function properties. Overall, Java has about 2.60× (2.65×) speedup across seven FaaS functions over Python in cold (warm) start scenarios, respectively.
Index Terms—Serverless Computing, Function-as-a-Service, JVM, OpenWhisk
I. INTRODUCTION
Over the past several years, interest in serverless computing has rapidly increased due to its flexibility and low cost. Despite the name, serverless computing does not actually fully remove the servers, it instead adds a layer of abstraction to isolate the servers from the developers to some extent. Eyk et al. [1] defines serverless computing as “a form of cloud computing which allows users to run event-driven and granularly billed applications, without having to address the operational logic”. In this case, the user of the serverless platform might be the developer of a web application, and the operational logic could refer to bringing up the server, spinning up virtual machines, managing memory, and so on. In recent years, there have been many services such as Platform-as-a-Service (PaaS [2]–[4]), Function-as-a-Service (FaaS [5]–[8]), and Software-as-a-Service (SaaS [9], [10]), and they all partially fit the aforementioned definition. The primary factor that differentiates the three is the level of developer control. PaaS allows a developer to rent hardware resources and is billed by the hour. In this case, the developer has full control over the allocated resources and is responsible for managing them effectively. If servers sit idle due to a workload decrease, the developer will still be charged. On the other end of the spectrum is SaaS. SaaS is event-driven and granularly billed but does not accept customized code. Instead, the developer can run service code selected from the preset. FaaS sits somewhere between PaaS and SaaS (as shown in Figure 1). The infrastructure is shared, but the application code is customizable. The developer is able to register modular pieces of code with the FaaS provider and set up triggers which will execute the code in response to an event, such as a user clicking a button in a web application. The term serverless is most commonly used to refer to FaaS [11], which is the model we will focus on in this paper.
![Fig. 1: Level of developer control in serverless computing [11].](image_url)
FaaS has a couple of advantages over the traditional application development model. Resources are allocated by the service provider as needed, and developers are only charged for the execution time of their functions making it a highly scalable and cost-effective solution for enterprise application development [12]. This model is especially attractive due to the recent shift of application architectures to containers and microservices [11]. While Villamizar et al. [13] have shown that there are clear financial benefits to using FaaS, it should be noted that there are several limitations of FaaS. For example, functions must be stateless and short. Most providers limit the execution time to 10-15 minutes [4], [5], [14].
Few studies [15]–[18] have been conducted, attempting to understand the primary factors affecting FaaS performance. The earlier research [15], [16] focus more on the platform level. For instance, Manner et al. [16] and Jackson et al. [15] reveals that different FaaS providers optimize their platforms/runtimes for different programming languages. In contrast, Shahrad et al. [17] deployed an open-source FaaS framework, Apache OpenWhisk [14], to a self-owned machine and performed server-level profiling. Among a few hardware implications that Shahrad et al. found, the containerization slowdown of Python FaaS functions could go up to 20×, and the cold start could add initialization time 10× more than the actual function execution time. Those findings are based on the experiments with FaaS functions written in Python (and some NodeJS), so naturally it raises the questions about Java, another very popular language in cloud computing. As pointed by the prior study (more details in Section II), programming language might change the FaaS performance significantly. Here are the primary questions the paper wants to answer:
1) What is the containerization overhead for Java FaaS functions?
2) What is the impact of a container cold start for functions written in Java, and whether Java Virtual Machine (JVM) plays a role in this scenario?
3) How does Java performance on OpenWhisk compare with Python?
In order to find the answers for those questions, we design a set of experiments using the same facilities from Shahrad et al. [17], further investigating the overhead due to containerization and cold starts for functions written in Java. Specifically, we measure and break down the response latency of various Java functions into container initialization plus function execution time, expecting to see a significant slowdown over the native execution, especially for cold start scenarios; we repeat the same experiments for Python implementation for comparison, anticipating a better performance from Java due to the pre-compiled bytecode; we scale the Java FaaS function data size in both cold and warm start scenarios, looking for a constant overhead from JVM warmup [19].
This paper makes the following contributions:
1) We measure the response latency of Java functions running on an open-source FaaS framework. For those measured Java FaaS functions, we determine the containerization overhead (over native execution) causes 4.42× and 12.44× slowdown on average for cold and warm starts, respectively.
2) From the analysis and comparison with Python implementation of the same FaaS functions, we show Java has shorter (about 1/3) container initialization time than Python and the overall performance is better on most functions (on average 2.60× faster).
3) We also reveal cold starts would incur an additional overhead, JVM warmup, for Java FaaS functions.
For the rest of the paper, we will give a brief introduction on the related work (§ II), and describe our experimental methodology in details (§ III), and present the results collected from each test (§ IV), then discuss the future work (§ V) and conclude the implications from the results (§ VI).
II. RELATED WORK
As shown in several studies, the primary factors which have affect FaaS performance are cold start overhead, programming language and FaaS provider, and containerization overhead.
FaaS Cold Start: For security reasons, a new container must be started for each new function run on a particular machine. Containers may be reused, but only for exactly the same function registered by the same developer. Idle containers are shut down after a short grace period for better resource utilization. This means that if functions are triggered infrequently, the containers will have to be unparsed or restarted every time the function is run. Past work [15], [16], [18] has shown that cold starts typically have a negative effect on response latency, but the magnitude of the slowdown varies depending on provider and language and container image versions [20]–[22]. All providers and languages exhibit a cold start latency of at least 300ms, but in some cases the latency can be as long as 24s [16]. This could be up to 10× the execution time of extremely short functions [17].
Programming Language: Several studies [15], [16] has shown the effect that programming languages have on FaaS functions vary widely. Manner et al. [16] tested functions written in Java and JavaScript on AWS Lambda and Microsoft Azure. They found that in both cases Java incurred a larger cold start overhead, but performed much better than JavaScript in a warm container. They suggested that the larger cold start overhead was due to the virtual machine warmup time required by Java. On the other side, they also noticed that Java outperforming JavaScript in a warm container, as that Javascript is an interpreted language while Java uses pre-compiled bytecode. Principally, bytecode could save runtime since some of the work of translating high level code, is done prior to execution.
Platform: It is also clear that different platforms provide runtimes that are tuned for different languages. Jackson et al. [15] also tested the effect of language runtime on AWS Lambda and Microsoft Azure. They found that on AWS Lambda Python outperformed all other languages, including Java, on warm starts. Python is usually regarded as an interpreted language (i.e., high-level language is not translated into machine readable instructions until the line is executed), so the result is an exception to the principle mentioned above. They also found that C# .NET performed best on Microsoft Azure, but performed badly, particularly in cold start scenarios on AWS. NodeJS exhibited the exact opposite behavior, performing well on AWS and poorly on Azure. These differences are not surprising as providers are motivated to improve the performance of the most popular languages for their platforms. However, it is extremely important for developers to understand how their choice of language and platform may impact function performance.
**Containerization Overhead:** On commercial FaaS platforms, it is difficult to figure out the total slowdown caused by FaaS when compared to native execution. Shahrad et al. [17] developed a methodology that uses open-source FaaS framework, OpenWhisk [14], and a tightly-integrated profiling tool, FaaSProfiler, to study the containerization overhead (more details of OpenWhisk and FaaSProfiler in Section III-A). Shahrad et al. found that for functions written in Python, there is a significant slowdown (up to 12×), and predicted functions in other programming languages should suffer the containerization overhead at the same order of magnitude.
### III. Methodology
**A. OpenWhisk and FaaSProfiler**
Figure 2 illustrates the architecture of OpenWhisk [14] (the dashed box on the right) and FaaSProfiler [17] (the left dashed box). Just like all other FaaS design, OpenWhisk [14] executes function code provided by a developer in an isolated environment, a docker container [23] for this case. Because different FaaS functions are registered as the actions for different events, a database, CouchDB [24], is used for storing those mapping information. Whenever the frontend, NGINX [25] for OpenWhisk, receives a request, the OpenWhisk controller queries CouchDB to find the corresponding FaaS function, then the pair of request and FaaS function is queued into Kafka [26], which manages the resources and schedule the actual execution of the FaaS function. Finally, a new docker container is started, if necessary, and the invoker runs the function. The result is returned to the database and the requester application. Most academical experiments to date have to reverse engineer the commercial FaaS system. This makes it difficult to take reliable measurements due to the lack of control of the entire system. The FaaSProfiler is developed to study the server-level behaviors of FaaS functions [17] with the open-source FaaS framework OpenWhisk. As shown in Figure 2, there are a few components in FaaSProfiler: a Synthetic Workload Invoker takes a configuration written in JSON in order to create the requests in a desired way (e.g., specific Query-Per-Second, request length etc.); and a Workload Analyzer module is hooked with the CouchDB in OpenWhisk system to retrieve the first-hand execution data (e.g., request queuing time, container initialization time, function execution time and so on).
Based on the OpenWhisk and FaaSProfiler publicly available on GitHub [27], [28], we setup OpenWhisk-FaaSProfiler facility as described above, and follow the instructions on the GitHub pages to adjust the configurations properly (listed in Table I). The blade server where we deploy this setup has fairly modern specifications (Table II). As the system has only a single worker node, the range of request arrival rate that the system is capable to handle is relatively narrow, so we adjust the configuration carefully to ensure the system is in a balanced state [17]. It allows us to focus on the containerization overhead and cold start effect, without worrying about other latency factors (e.g., Kafka queuing latency).
**B. Benchmarks and Experiments**
We use four microbenchmarks (base64, http, json, primes) from the FaaSProfiler repository [28], and five more benchmarks from the the Java Microbenchmark Harness (JMH) repository [29]. The four functions coming with FaaSProfiler are written in Python, so we implement the corresponding Java version by ourselves, and also write the corresponding Python version implementations for the five JMH benchmarks. Descriptions of each of the functions are given in Table III along with a data size definition. One of our experiments scales the data size as we will describe later.
We run each function on OpenWhisk in both cold and warm start scenarios, achieved by shutting down the services and run right after a warmup invocation of the same function, respectively. FaaSProfiler records the container initialization time and function execution time separately, so we can double check that the warm start runs have zero initialization time while the cold start runs have. In order to understand the containerization overhead, we also measure the same functions running natively using the Java Microbenchmark Harness (JMH) [29]. JMH ensures that the benchmarks are run in a warm environment and that no dead code is optimized out. We chose to collect the native execution data in a warm environment because this is more realistic to the traditional programming model, which constructs and executes one monolithic application. In contrast, FaaS never guarantee a warm environment as containers are spun up and shut down frequently.
---
**Fig. 2: OpenWhisk-FaaSProfiler architecture [17]**
TABLE I: Configurations of OpenWhisk-FaaSProfiler.
<table>
<thead>
<tr>
<th>Parameter</th>
<th>Description</th>
<th>Value Set</th>
</tr>
</thead>
<tbody>
<tr>
<td>invocationPerMinute</td>
<td>The maximum number of action invocations allowed per minute</td>
<td>60000</td>
</tr>
<tr>
<td>concurrentInvocations</td>
<td>The maximum number of invokers allowed to run concurrently</td>
<td>30000</td>
</tr>
<tr>
<td>firesPerMinute</td>
<td>The allowed trigger firings per minute</td>
<td>60000</td>
</tr>
<tr>
<td>sequenceMaxLength</td>
<td>The maximum length of a sequence action</td>
<td>50000</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
<th>Value Set</th>
</tr>
</thead>
<tbody>
<tr>
<td>test_name</td>
<td>Name of tests to be run (can choose anything)</td>
<td></td>
</tr>
<tr>
<td>random_seed</td>
<td>Ensures run to run consistency</td>
<td>100</td>
</tr>
<tr>
<td>blocking_cli</td>
<td>True/false determines whether blocking CLI calls are used</td>
<td>false</td>
</tr>
<tr>
<td>test_duration_in_seconds</td>
<td>Total duration of test in seconds, measurement stop after this time</td>
<td>15-90</td>
</tr>
<tr>
<td>instances</td>
<td>Set of functions to run during measurement period</td>
<td></td>
</tr>
<tr>
<td>application</td>
<td>Name of function (should be the exact name registered)</td>
<td></td>
</tr>
<tr>
<td>distribution</td>
<td>Distribution of function invocations</td>
<td>Uniform</td>
</tr>
<tr>
<td>rate</td>
<td>Number of function invocations per second</td>
<td>1-30</td>
</tr>
<tr>
<td>activity_window</td>
<td>Range of time in seconds during which function invocations should occur</td>
<td>[5,5]</td>
</tr>
<tr>
<td>perf_monitoring</td>
<td>Set of scripts to be run during or after test</td>
<td>default</td>
</tr>
<tr>
<td>runtime_script</td>
<td>Monitoring script run during test</td>
<td></td>
</tr>
<tr>
<td>post_script</td>
<td>Optional post processing script</td>
<td>null</td>
</tr>
</tbody>
</table>
TABLE II: System Specifications
<table>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
<th>Value Set</th>
</tr>
</thead>
<tbody>
<tr>
<td>Processors</td>
<td>12×X86, 64 cores @ 2.2 GHz, 2 hyperthreads/core</td>
<td></td>
</tr>
<tr>
<td>Cache</td>
<td>32KB private I-Cache, 32KB private D-Cache, 256KB private L2, 30 MB shared L3</td>
<td></td>
</tr>
<tr>
<td>Memory</td>
<td>56GB DDR4-2400</td>
<td></td>
</tr>
<tr>
<td>NIC</td>
<td>PCIe 2.1 5GT/s GbE NIC</td>
<td></td>
</tr>
<tr>
<td>kernel version</td>
<td>Linux 4.4.0-138-generic</td>
<td></td>
</tr>
</tbody>
</table>
TABLE III: Microbenchmarks
<table>
<thead>
<tr>
<th>Name</th>
<th>Description</th>
<th>Data Size (n)</th>
</tr>
</thead>
<tbody>
<tr>
<td>base64</td>
<td>Encodes and decodes a string</td>
<td>Length of the string</td>
</tr>
<tr>
<td>http</td>
<td>Performs API call to retrieve current time</td>
<td>Number of API invocations</td>
</tr>
<tr>
<td>json</td>
<td>Reads a JSON object from a file, averages values in common fields</td>
<td>Length of the JSON object</td>
</tr>
<tr>
<td>primes</td>
<td>Finds the number of primes between 1 and n</td>
<td>Upper bound on range of numbers for prime search</td>
</tr>
<tr>
<td>bigDec</td>
<td>Creates array of BigDecimals and compares all elements to element 0</td>
<td>Number of compare operations performed</td>
</tr>
<tr>
<td>bigInt</td>
<td>Creates array of BigIntegers and multiplies each element together</td>
<td>Number of elements in array</td>
</tr>
<tr>
<td>arrCpy</td>
<td>Copies an array of bites to an empty array (deep copy)</td>
<td>Length of byte array</td>
</tr>
<tr>
<td>intMax</td>
<td>Rinds maximum value in an array of integers</td>
<td>Length of integer array</td>
</tr>
<tr>
<td>fileRW</td>
<td>Writes to tmp file and then reads data back</td>
<td>Number of bytes written to file</td>
</tr>
</tbody>
</table>
IV. Results
A. Native vs. OpenWhisk Execution
Fig. 3: OpenWhisk response latency normalized by native execution time for Java functions. Benchmarks are sorted by the native execution time (the numbers above the black solid bars are the absolute execution time in milliseconds).
Function execution on OpenWhisk is consistently slower than native execution in both cold and warm start scenarios. Figure 3 shows the response latency of each function on OpenWhisk normalized by its latency in native environment. The end-to-end latency for functions in warm containers is 1.6× to 15× longer than the native execution. Most functions experience a 3× to 24× longer response latency (over native latency) during cold starts. However, extremely short functions such as bigInt and json suffer from much more slowdowns (up to 57.64×). This is partially due to that the overhead caused by container start up and initialization is relatively constant (as we will show in Section IV-B). Across all 9 benchmarks, the geometric average (GEOMEAN in Figure 3) of slowdown is 4.42× for warm start, while cold start is 12.44×.
B. Cold Start
As shown in Figure 4a, container initialization time (the cyan solid bars) remains about the same (266 ms) across all Java FaaS functions. This is expected because the time to start a container should not depend on which function it will execute. Surprisingly, Figure 4a also indicates that container initialization time is not the only factor contributing to the
For FaaS function, it is clear one of the cold start overheads comes from the container initialization. Java FaaS function running in a cold start scenario have additional overhead added to the execution time. One of the possible sources for the extra overhead is Java Virtual Machine (JVM) warmup, so we also performed a data scaling experiment by varying the data size as defined in Table III. If the difference in the execution times (cold vs. warm start) is due to the JVM warming up, it should be consistent for the same function across data sizes [19].
To make comparison between Python and Java, we measure the container initialization time as well as the total response latency (warm and cold starts) for the Python version of all aforementioned FaaS functions (§ III-B) except fileRW, which cannot reach a balanced state on a single worker server.
longer cold start response latency (compared with the warm start latency). The differences (the hatched bars) between the latency on cold and warm starts are more or less (but still statistically) greater than the container initialization time.
We can see in Figure 4b that Python containers has very stable initialization time (about 778 ms), nearly triple of Java container initialization time. Python containers take longer time than the Java ones to initialize, because OpenWhisk launches Java container and Python container based on different docker images, and the Python image has much more layers (preparing commands/actions) than the Java image. Unlike Java FaaS function, Python version has a much simpler answer for the cold start overhead. The difference of response latency between cold start and warm start for Python FaaS functions, perfectly match to the container initialization time, meaning the container initialization is the only cold start overhead for Python FaaS.
Based on the observations from Figure 4, we know there must be some overhead in addition to the container initialization in the cold start scenario that Java suffers but Python avoids. It is likely Java Virtual Machine (JVM) warmup that contributes more latency to the Java FaaS function execution, after the container is up.
C. Data Scaling
As revealed in prior work [19] that loading Java classes and interpreting bytecodes that JVM has not seen before are the main sources to JVM warmup overhead. Therefore, JVM warmup overhead should not scale with the input size. Based on this theory, we scale the data size and monitor how the difference between cold starts and warm starts changes. Figure 5 visualizes the results. For base64, http, and primes, the cold start and warm start difference remains almost constant as the overall response latency rise steeply. It is a bit different for json (Figure 5c), where the gap between cold start and warm start enlarges gradually. This is because json function is relatively short and the function invocation rate is fairly high.
Fig. 4: Java function cold start container initialization time, which explains part of the overhead cold start has over warm start. Python function container initialization time is about the latency difference between cold start and warm start.
Fig. 5: Java function cold start and warm start execution times scale as the data size increases. The difference between cold start and warm start execution times remains relatively stable. A small increase in the response latency of json could lead to the chained effect, that more requests are queued up and the requests arrive later accumulate latency. In other word, the server drifts from the balanced state to over-invoked state similar to what was observed in Shahrad et al. [17].
D. Java vs. Python OpenWhisk Performance
Figure 6 shows a comparison of Java vs. Python functions in cold (Figure 6a) and warm (Figure 6b) execution environments. Both plots show total response latency (i.e., the sum of function execution time and initialization time if cold start). Due to the longer initialization time that Python has (§ IV-B), the Java function is faster in all except base64 for the cold start scenario. For the warm start scenario, Java is still faster on average, but there is more variation between functions. On average (geometric mean), Java function implementations
are 2.60× faster than Python in cold start scenarios and 2.65× faster than Python in warm start scenarios.
V. FUTURE WORK
We have shown that function length affects containerization overhead, and that typically longer functions perform better (§ IV-A). Further study could try to determine the optimal function length for any language or FaaS architecture. Similar optimization on memory consumption and invocation rate should also be done. Most FaaS research to date has been focused on the performance of very short functions. Understanding the FaaS overhead for larger applications would be especially helpful in determining the range of use cases where FaaS can provide a cost or performance benefit.
VI. CONCLUSIONS
This study provides three key takeaways for the FaaS developers. First, function execution time should be significantly longer than the container initialization time in order to keep FaaS overheads low. In general, short functions have a higher containerization overhead than long ones, and especially in cold start scenarios. Writing functions with longer execution time (breaking application code into larger pieces) can amortize the constant container initialization time.
Second, pay special attention to cold start overheads for functions which are not invoked frequently enough. The cold start overheads of these functions will not be amortized enough. The cost of these cold starts varies depending on language. The results presented here show that the container initialization time for Python functions is higher than that of Java. However, Java functions typically incur an additional slowdown (from JVM) in the function execution phase on cold starts, while Python functions have consistent execution times regardless cold or warm containers. Pre-warming and other strategies could be explored to reduce these overheads.
Third, the same FaaS function written in different programming languages could have quite different performance. Pre-compiled languages (e.g., Java) tend to be faster than interpreted languages (unless the platform has special optimizations). This is extremely important for developers to understand as choosing the appropriate platform/language pair can have a major impact on performance. In the case of OpenWhisk, Java performs better, even considering the additional overhead caused by JVM warmup during cold starts.
REFERENCES
|
{"Source-Url": "https://lca.ece.utexas.edu/pubs/qinzhe_CloudAM.pdf", "len_cl100k_base": 6061, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 19596, "total-output-tokens": 7761, "length": "2e12", "weborganizer": {"__label__adult": 0.0003490447998046875, "__label__art_design": 0.00026416778564453125, "__label__crime_law": 0.00030159950256347656, "__label__education_jobs": 0.0005064010620117188, "__label__entertainment": 7.808208465576172e-05, "__label__fashion_beauty": 0.0001494884490966797, "__label__finance_business": 0.0003724098205566406, "__label__food_dining": 0.00034236907958984375, "__label__games": 0.0004193782806396485, "__label__hardware": 0.0016384124755859375, "__label__health": 0.0005588531494140625, "__label__history": 0.0002620220184326172, "__label__home_hobbies": 9.137392044067384e-05, "__label__industrial": 0.0004417896270751953, "__label__literature": 0.00021564960479736328, "__label__politics": 0.0002472400665283203, "__label__religion": 0.0004150867462158203, "__label__science_tech": 0.0521240234375, "__label__social_life": 9.006261825561523e-05, "__label__software": 0.00861358642578125, "__label__software_dev": 0.931640625, "__label__sports_fitness": 0.00027561187744140625, "__label__transportation": 0.0005936622619628906, "__label__travel": 0.00020945072174072263}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33501, 0.02913]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33501, 0.51967]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33501, 0.86519]], "google_gemma-3-12b-it_contains_pii": [[0, 4995, false], [4995, 11084, null], [11084, 15835, null], [15835, 22762, null], [22762, 26160, null], [26160, 33501, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4995, true], [4995, 11084, null], [11084, 15835, null], [15835, 22762, null], [22762, 26160, null], [26160, 33501, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33501, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33501, null]], "pdf_page_numbers": [[0, 4995, 1], [4995, 11084, 2], [11084, 15835, 3], [15835, 22762, 4], [22762, 26160, 5], [26160, 33501, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33501, 0.27338]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
8b52a505aa778243cd0f5fa91d98dd1a6e8329d3
|
Comparing Feature Engineering Approaches to Predict Complex Programming Behaviors
Wengran Wang, Yudong Rao, Yang Shi, Alexandra Milliken, Chris Martens, Tiffany Barnes, Thomas W. Price
North Carolina State University
{wwang33, yrao3, yshi26, aamilllik, crmartens, tmbarnes, twprice}@ncsu.edu
ABSTRACT
Using machine learning to classify student code has many applications in computer science education, such as autograding, identifying struggling students from their code, and propagating feedback to address particular misconceptions. However, a fundamental challenge of using machine learning for code classification is how to represent program code as a vector to be processed by modern learning algorithms. A piece of programming code is structurally represented by an abstract syntax tree (AST), and a variety of approaches have been proposed to extract features from these ASTs to use in learning algorithms, but no work has directly compared their effectiveness. In this paper, we do so by comparing three different feature engineering approaches for classifying the behavior of novices’ open-ended programming projects according to expert labels. In order to evaluate the effectiveness of these feature engineering approaches, we hand-labeled a dataset of novice programs from the Scratch repository to indicate the presence of five complex, game-related programming behaviors. We compared these feature engineering approaches by evaluating their classification effectiveness. Our results show that the three approaches perform similarly across different target labels. However, we also find evidence that all approaches led to overfitting, suggesting the need for future research to select and reduce code features, which may reveal advantages in more complex feature engineering approaches.
1. INTRODUCTION
Automatically classifying student code using machine learning has many applications in computer science education, such as to automatically grade students’ code [8], to predict when students are unlikely to succeed at a task and may benefit from feedback [13], and to propagate feedback on particular misconceptions to students who need it [10]. However, a fundamental challenge in applying machine learning to source code is how to represent that code in a way the learner can understand. The structure of programming code is traditionally represented as an abstract syntax tree (AST), where nodes and their children correspond to specific code elements (e.g., an if statement), and the tree can be arbitrarily large. However, the vast majority of machine learning models take fixed-length vectors as input.
In many domains, researchers have addressed this challenge of code representation by extracting a set of features from source code, which can then represent the code in the model. For example, a simple Bag-of-Words (BoW) approach represents code as a binary vector, where each element indicates the presence or absence of a specific AST node anywhere in the code (e.g., [6, 18, 11]). However, simple feature extraction approaches like BoW do not capture the complex structural relationships among AST nodes in student code, which may be important for many classification tasks. A number of other feature extraction approaches have been proposed (e.g., [2, 28, 4]), but no work has directly compared their effectiveness for classifying student code. Further, feature extraction can be especially difficult in the domain of computer science education, where students’ code may cover a large and sparse solution space, with little overlap among solutions paths [21, 27, 13]. This suggests the need to develop new feature extraction approaches that address this challenge.
In this paper, we compared the effectiveness of three code feature extraction approaches, on a challenging and generalizable classification task. This task classifies programming game design projects to identify the presence and absence of complex game behaviors. We found that the three code feature extraction approaches had similar performance across all behaviors, and the performance of specific feature extraction approaches is dependent on factors such as the properties of the target label, the size of training data, and the prevalence of positive labels. Our work contributes to educational data mining for CS education by comparing the affordance of different feature engineering approaches and evaluating their effectiveness in predicting the presence of complex game behaviors.
2. RELATED WORK
In this section, first, we discuss the relevance and importance of automatic code classification for improving computing education through personalization and scalability. We then summarize state-of-the-art feature engineering approaches that related work has used for various code analysis purposes.
2.1 Applications of Classifying Student Code
Manual labeling of student code is a frequent practice in computing education. It is often done by instructors and researchers, for example, to grade student program submissions, identify misconceptions [15, 23], or to profile a programming dataset to identify when particular code features are used [12]. However, labeling tasks are quite time-consuming and hard to scale, leading many researchers to investigate methods for automatically labeling code.
Researchers have used different approaches to automatically classify and analyze students’ code, such as using correct program submissions to generate rubric-based auto-graders [8], or using programming homework grades to infer students’ knowledge [2]. Elmadami et al., for example, built a data-driven misconception classifier in EER-Tutor, an Intelligent Tutoring System that provides tutorials for database design. Using association rule mining, EER-Tutor categorizes frequent failing patterns as indicators of misconceptions [9]. Similarly, Mao et al. developed a classifier that predicts a student’s success in completing a programming task based on their programming code trajectory with just one minute of data from a student [13]. This classifier, if implemented in programming education, could help instructors or learning systems to prompt students with suggestions or feedback when they most need it. In addition, our prior work has shown the efficacy of adding an automatic code classifier to a learning system. We developed an unsupervised classifier to identify completions of 11 sub-goals in a Snap! block-based programming task [27]. We then integrated this classifier in a programming environment to detect sub-goal completions and provide timely positive feedback to students, which significantly increased the time students spent engaged with the programming task [14]. These results suggest that, in programming learning environments, automatic code classification can help provide adaptive and scalable feedback to support students.
2.2 AST Structural Feature Extraction
Researchers have used various approaches to extract a fixed set of features from code to use in machine learning models. Structural feature extraction looks for patterns in an AST and creates a binary input vector, indicating the presence or absence of these patterns, or counts of the frequency of their occurrence. For example, Bag-of-Words (BoW) is a common feature extraction approach, adapted from natural language processing, where each possible type of AST node becomes a binary feature. Figure 1 shows how BoW features transform a piece of code into an input vector by indicating the presence or absence of each feature in the programming code AST. The BoW approach has been used in various code classification tasks, such as to predict students’ success in completing a program, or to summarize functions of code snippets [6, 18, 11]. For example, Azcona et al. used BoW to represent students’ code, and found that after using BoW feature extraction to convert program code into vectors, a simple Naïve Bayes model predicted correctness of short pieces of Python code submissions with 59.4% accuracy [6]. This suggests that even a relatively simple feature extraction approach, such as BoW, extracts useful information that can predict meaningful labels for student code with some success.
BoW features represent a single AST node, regardless of its neighbors. However, meaningful programming patterns usually include nodes that are structurally connected with each other in the AST. For example, in Figure 2(b), the piece of programming code completes a behavior that, when a sprite 1 touches a bullet, the game ends. In order to accomplish this behavior, the “Stop” block must be inside of the “if” block - otherwise, the behavior would be different. In order to extract structural information - such as the requirement that the “stop” block be inside the “if” for the behavior (shown in Figure 2), more complex features can be extracted. For example, researchers have extracted features corresponding to paths within the AST. For example, the root path for a given node consists of the path from that node to the root node [21], and this has been used for data-driven hint generation [21]. The Code2Vec algorithm [4] also decomposes the AST into a collection of paths for use in a deep neural network (discussed further in Section 2.3), which was used to predict method names of code from Java GitHub repositories (not student code). Others have used n-Grams, which are n-length sequences of nodes extracted from a flattened representation of the AST. For example, a vertical n-Gram is created by a depth-first iteration of its nodes, and a horizontal n-Gram is created by a breadth-first iteration of all children in an AST subtree (shown in Figure 2(c)). Akram et al. used code n-Grams as features to predict the rubric-based grades of students’ block-based programs with a Gaussian Process model that achieved an R-squared of 0.94, higher than the 0.88 achieved by the baseline BoW approach [2].
Many of these structural features can be represented more generally as a type of pq-Grams. A pq-Gram is a subtree that includes a target node, along with its (p − 1) ancestor nodes, as well as q of its child nodes. For example, Figure 2(b) shows how a pq-Gram can be extracted for the target node “script (2)”, with p ancestor nodes and q child nodes. If a node has fewer than (p − 1) ancestors or q children, the pq-Gram includes this information by noting these missing nodes as “null”. pq-Grams were introduced as part of a method to calculate differences between tree-structured data [5], such as between a student’s AST and a correct solution’s AST, in order to provide automated hints [28]. Using this notation, we can also consider the features extracted by the BoW and root path approaches to be pq-Grams: p(1)q(0)-Grams and p(∞)q(0)-Grams, respectively. Most horizontal n-Grams can be represented as p(0)q(n)-Grams, and vertical n-Gram can be viewed as sub-arrays of root paths, and can be represented as p(n)q(0)-Grams. These AST structural feature extractions have been shown to be effective for representing and analyzing student code [27]. Despite the variety of feature extraction approaches, no work has compared the efficacy of these approaches, especially pq-Gram feature extraction, which has not been used previously for code classification tasks. This comparison is important, since having features with too little expressivity (e.g., BoW) will not capture important AST structural information (causing the resulting model to underfit), but having features with too much expressivity (e.g., treating an entire AST as a feature) will lead to overly-specific features that
1 A sprite in Scratch is similar to an object-oriented class. In Scratch game design projects, an actor of the game is usually represented by a sprite.
do not generalize to new, unseen instances, causing the resulting model to overfit. Empirical evaluation is needed to find an appropriate balance.
Researchers have also explored clustering these simple features to represent more complex structural relationships. For example, Zhi et al. automatically clustered $pq$-Grams into what they called “features” [27], which clusters $pq$-Grams that performs a meaningful programming sub-goal. However, this clustering method was not applied to feature engineering for supervised classification. Mao et al. used Recent Temporal Patterns (RTPs) to transform a highly condensed feature set into a Multivariate State Sequence, including information such as feature co-occurrence and precedence. They used this feature engineering approach to predict whether a student is unlikely to succeed in a given task. Using RTPs, their model performs better than simple feature extraction approaches, with an 18.8% increase in classification accuracy, showing that creating feature combinations offers more information on students’ programming status [13]. However, these existing approaches were only applied in short programming tasks with specific goals.
3. EXPERIMENT AND RESULTS
Our goal in this study was to compare the effectiveness of the existing code feature extraction approaches outlined in Section 2.2. To do so, we compared the effectiveness of BoW, n-Gram, and $pq$-Gram approaches in classifying game behaviors.
3.1 Dataset and Classification Task: Labeling Open-Ended Scratch Projects
The student code used in our evaluation comes from the Scratch community [22], an online, novice-friendly, block-based programming website, where users create and remix interactive programming projects, such as games and animations. We chose the Scratch repository because it includes diverse, open-ended programs from learners around the world, which are not constrained to a single assignment or goal. This might be analogous to submissions to an open-ended final programming course project. On Feb 18th, 2020, we scraped the 6247 most trendy projects, from the Game genre in the Scratch community. Among them, we selected the first 457 projects based on the creation date. Among the 457 projects, we excluded 44 projects that had over 50%
broken or unused code. Our dataset for the classification task includes 413 projects, with an average of 1201 AST nodes in each project.
An important classification task for Scratch game projects is to identify whether a given project includes a specific game behavior (i.e., a game mechanic), for example, whether the player can jump (like in the classic Mario game). Algorithm 1 shows one pseudocode example of how a platformer jump can be implemented in Scratch. In this example, this behavior is implemented by two threads, to ensure that the actor jumps using gravity, and stops when landing on the top of a platform. Based on our observations of students’ code, PlatformerJump is the most complex behavior in the five behaviors, usually including a large amount of code, spread across different sprites and scripts. However, even with a relatively less complex behavior (e.g., CollisionChangeVar), students can still implement the behavior in a wide variety of ways.
The ability to detect these game behaviors automatically would allow researchers to better understand novice programming behavior by profiling the whole Scratch repository, including millions of projects, to find popular combinations of behaviors (e.g., in [1]). It would also enable researchers to instantly identify what type of game a student is currently working on, in order to offer them highly customized feedback or examples. This task also represents a difficult challenge for code classification, since these game behaviors are comprised of many code elements, which may be dispersed throughout a student’s code (e.g., the jump behavior, shown in Algorithm 1), and which may be implemented in diverse ways, creating a large and sparse programming state-space. These challenging properties are shared by many other programming code classification tasks, such as identifying misconceptions and predicting learner performance.
In order to create meaningful categories of game behaviors, the first author investigated 13 student game design project submissions, from an undergraduate programming course in a large, public research university. After thoroughly examining the submissions, the first author decomposed each game into a set of discrete game behaviors, under the criteria that these behaviors are general enough to be reused in other games. We identified 24 game behaviors. From these, we selected five that represented a diverse range of complexity and frequency of use. One author developed a definition for each behavior label (see Table 1) and trained another author to identify whether a given project includes a specific game behavior. The exact game logic is not important, but it illustrates the complexity of Scratch game behaviors.
Table 1 shows the description and the commonness of the behaviors in the 413 Scratch game projects. These behaviors have the following characteristics:
1. These behaviors are implemented by a variety of blocks, sometimes more than 30 blocks across different sprites (i.e., similar to object-oriented classes) or code scripts (i.e., threads).
2. Students implemented these behaviors in a variety of different ways, using varying types and numbers of blocks. This makes expert-authored rule-based static analysis (e.g., [25, 7]) ineffective at detecting the presence of these behaviors.
3. Although the selected game behaviors are typical within certain game genres, the prevalence of individual behaviors is often quite infrequent, as is shown by the counts of projects in Table 1, with a range of 5.3% to 46%. This creates imbalanced datasets, which pose a challenge in training classifiers that can lead a model to be biased towards the majority class.
While the above characteristics make our classification task challenging, they are also common characteristics in many important code classification tasks. Code indicating misconceptions, low performers, or notable strategies may also be complex, diverse, or rare. The results of our evaluation, therefore, may be able to generalize to these tasks as well.
### 3.2 Experiment Setup
In order to understand how well these feature extraction approaches capture meaningful information for predicting the presence of complex game behaviors, we compared the BoW, n-Gram, and pq-Gram approaches. Here we present our experiment setup.
### Table 1: Target Labels of Game Behaviors
<table>
<thead>
<tr>
<th>Label Name</th>
<th>Label description</th>
<th># of projects with this label</th>
<th># of blocks (estimate)</th>
</tr>
</thead>
<tbody>
<tr>
<td>KeyboardMove</td>
<td>An actor moves in the direction indicated by the player on the keyboard</td>
<td>197/413</td>
<td>3 - 10</td>
</tr>
<tr>
<td>CollisionChangeVar</td>
<td>When one actor touches another, a variable changes (e.g. score)</td>
<td>146 /413</td>
<td>4 - 6</td>
</tr>
<tr>
<td>PlatformerJump</td>
<td>An actor can jump and then falls down with gravity</td>
<td>81/413</td>
<td>20 - 50</td>
</tr>
<tr>
<td>MoveWithMouse</td>
<td>An actor moves when the user moves or clicks the mouse</td>
<td>49/413</td>
<td>2 - 4</td>
</tr>
<tr>
<td>CollisionStopGame</td>
<td>The game ends when an actor touches another</td>
<td>25/413</td>
<td>3 - 4</td>
</tr>
</tbody>
</table>
**Feature extraction.** We first extracted BoW, \(n\)-Gram, and \(pq\)-Gram features from the training dataset. To reduce the number of irrelevant features, we extracted features that have more than 5% support (i.e., the percentage of projects where this feature exists). When extracting \(n\)-Grams of a specific \(n\) value, we consider both horizontal and vertical ones, as introduced in Section 2.2, for they each extract different AST structural information. We extracted \(n\)-Grams with \(n \in \{1, 2, \ldots, 10\}\), and also extracted \(pq\)-Grams, with \(p \in \{1, 2, 3, 4\}\), and \(q \in \{1, 2, 3, 4\}\). The exact subset of these \(n\)-grams or \(pq\)-Grams used was determined by hyperparameters, as discussed below. At each increase of \(n\) in \(n\)-Grams, we kept features that were extracted by smaller \(n\)s, but removed duplicated smaller features based on the rule that when two features always co-appear, and one is a subset of another. We extracted \(pq\)-Grams using the same approach.
**Training and evaluation.** To train our model, we used a Support Vector Machine (SVM) model with a linear kernel, and used the regularization parameter as a hyperparameter, with values in \(\{0.01, 0.1, 1, 10, 100\}\). We employed five-fold cross-validation to evaluate our feature set. Within each round of cross-validation, we used \(\frac{1}{4}\) of the training set as the validation set to tune the hyper-parameters. When we extracted features for \(n\)-Grams, we used the maximum \(n\) as a hyperparameter, with \(n \in \{1, 2, \ldots, 10\}\). Similarly, we also used the maximum \(p\) and \(q\) as hyperparameters when extracting \(pq\)-Grams, with \(p \in \{1, 2, 3\}\), and \(q \in \{1, 2, 3, 4\}\). The values of the hyperparameters were determined by their F1 scores on the validation set at each round of cross-validation. Since many of our target labels are highly imbalanced, the accuracy score offers little information on how well our model performs in predicting target labels. We therefore use F1 scores to tune hyperparameters.
### 3.3 Results
Figure 3 shows how the feature sets perform across the five target game behaviors. We present the F1 scores of prediction in each target behavior, with its precision (P) and recall (R), shown in the brackets. In this first experiment, our results show that all classifiers perform similarly. One exception to this is the **PlatformerJump** behavior, where BoW (F1 = 0.58) does notably worse than \(n\)-Gram (F1 = 0.72), or \(pq\)-Gram (F1 = 0.68). We note that **PlatformerJump** is easily the most complex behavior (shown in Algorithm 1), which can be completed in many ways. This suggests that there may be some advantage to more expressive feature representations for identifying more complex program properties.


Figure 3 also shows that F1 scores of all approaches decrease as the prevalence of positive samples decreases (i.e., with more class imbalance). For example, on the y-axis of Figure 3, we have marked each label with the prevalence of its positive samples. As the prevalence of features decreases from 197/413 (48%) in KeyboardMove to 25/413 (6%) in CollisionStopGame, the feature extraction methods perform increasingly worse. For more common behaviors such as KeyboardMove, all approaches had sufficient data to accurately identify the behavior (F1 = 0.83-0.87), and even simpler approaches such as BoW were expressive enough to find discriminating features, e.g., the “WhenKeyPressed” block. However, when less positive training data are available, none of the approaches perform well, suggesting the possibility that the models are overfitting.
We therefore investigated the training F1 scores for each model, shown in Figure 4. The results confirm that the models are likely overfitting with all three feature extraction approaches, especially when the prevalence of positive samples is small, such as in MoveWithMouse and CollisionStopGame. This is unsurprising, given that all feature extraction approaches produced hundreds (BoW) to over a thousand (pq-Gram) features, and the training data never exceeded 200 positive instances. Because even our simplest feature extraction approach (BoW) was clearly overfitting, it is unclear whether more expressive feature representations (n-Grams, pq-Grams) would hold an advantage under other circumstances (e.g., more training data, with additional feature selection).
4. DISCUSSION AND CONCLUSION
We have compared different approaches for extracting predictive features from the program code. We have also presented the first step towards automated classification of open-ended block-based program behaviors, going beyond existing rule-based analysis [17, 1]. Here we discuss the insights we have derived from our results, and we discuss our future task to improve our current classification approach and address underlying challenges.
Overall, we found no evidence that different feature extraction approaches led to better or worse code classification results. However, we note that in all cases, this was due to overfitting, due to a large number of features and a relatively small amount of training data. This is supported by the fact that all approaches did worse for behaviors with more class imbalance. We are therefore unable to conclude whether our results would generalize to a larger dataset, where the richer feature of n-Gram or pq-Grams may be better leveraged. However, we also note that in the domain of computer science education, courses often only have a relatively small number of students, causing the size of training data to be also small. This means that in programming code feature extraction approaches, we should explore ways to reduce the number of features, through feature selection and dimensionality reduction. It may also be helpful to develop ways to more efficiently label data - which serves as the building block for many intelligent algorithms.
Our classification performance also varied considerably across tasks, in some cases quite well (e.g. KeyboardMove), but in others quite poorly (e.g. CollisionStopGame). To understand why, we note that, unlike prior work, our code classification task used a relatively small dataset (n = 413), consisting of large programming projects. The Scratch projects we analyzed had an average of 1201 AST nodes. By contrast, code classification tasks in prior work have generally been applied to smaller code input, with a much larger amount of training data. For example, Code2Vec is implemented in a code classification task for predicting method names from programs with an average length of 7 lines [4], although the training sample is of size 14M. Iyer et al. implemented an LSTM-based code summarization model, but only on programming code with an average of 38 tokens (i.e., the number of elements in the program). Specifically, in the computer science education domain, many programming code analysis approaches are evaluated on short programming tasks, such as drawing a geometric shape using nested loops [26, 13, 27], or implementing a short algorithm (e.g., a bubble sort algorithm [2]). Mou et al. evaluated their LSTM-based code classifier’s performance on classifying function methods, and concluded that longer programs (i.e., with longer length of code) had relatively lower classification performance compared to shorter programming tasks [18]. This may explain why our results were not as strong, and more prone to overfitting.
In conclusion, in this work, we compared features with different levels of expressivity (i.e., Bag-of-Words, n-Grams, and pq-Grams), in a challenging task to classify meaningful game design behaviors in open-ended Scratch projects. Our results show that our model may be overfitting with all three different feature extraction approaches, and that we need to explore ways to reduce feature dimensions and increase data size to improve performance.
5. ACKNOWLEDGEMENTS
This material is based upon work supported by the National Science Foundation under Grant No. 1917885.
6. REFERENCES
|
{"Source-Url": "https://emmableu.github.io/publications/wang2020comparing.pdf", "len_cl100k_base": 5731, "olmocr-version": "0.1.51", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 23977, "total-output-tokens": 7128, "length": "2e12", "weborganizer": {"__label__adult": 0.0006885528564453125, "__label__art_design": 0.000728607177734375, "__label__crime_law": 0.0007443428039550781, "__label__education_jobs": 0.023681640625, "__label__entertainment": 0.000152587890625, "__label__fashion_beauty": 0.000370025634765625, "__label__finance_business": 0.0004477500915527344, "__label__food_dining": 0.0008096694946289062, "__label__games": 0.0016069412231445312, "__label__hardware": 0.0015964508056640625, "__label__health": 0.0009889602661132812, "__label__history": 0.0005125999450683594, "__label__home_hobbies": 0.0003056526184082031, "__label__industrial": 0.0009398460388183594, "__label__literature": 0.0006155967712402344, "__label__politics": 0.0006704330444335938, "__label__religion": 0.0007996559143066406, "__label__science_tech": 0.044342041015625, "__label__social_life": 0.0002884864807128906, "__label__software": 0.005359649658203125, "__label__software_dev": 0.91162109375, "__label__sports_fitness": 0.0008325576782226562, "__label__transportation": 0.001354217529296875, "__label__travel": 0.0003643035888671875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 31441, 0.02993]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 31441, 0.69999]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 31441, 0.92179]], "google_gemma-3-12b-it_contains_pii": [[0, 4808, false], [4808, 11761, null], [11761, 14039, null], [14039, 18360, null], [18360, 22654, null], [22654, 28641, null], [28641, 31441, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4808, true], [4808, 11761, null], [11761, 14039, null], [14039, 18360, null], [18360, 22654, null], [22654, 28641, null], [28641, 31441, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 31441, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 31441, null]], "pdf_page_numbers": [[0, 4808, 1], [4808, 11761, 2], [11761, 14039, 3], [14039, 18360, 4], [18360, 22654, 5], [22654, 28641, 6], [28641, 31441, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 31441, 0.08861]]}
|
olmocr_science_pdfs
|
2024-12-04
|
2024-12-04
|
46f87063270c2086f70d160d0f550fef109fe7bb
|
Monitoring Strategic Goals in Data Warehouses with Awareness Requirements
Vitor E. Silva Souza
University of Trento, Italy
vitorsouza@disi.unitn.it
Jose-Norberto Mazón
University of Alicante, Spain
jnamazon@dlsi.ua.es
Irene Garrigós
University of Alicante, Spain
igarrigos@dlsi.ua.es
Juan Trujillo
University of Alicante, Spain
jtrujillo@dlsi.ua.es
John Mylopoulos
University of Trento, Italy
jm@disi.unitn.it
ABSTRACT
A data warehouse (DW) system stores data from multiple data sources in integrated form and provides capabilities for monitoring business operations to ensure compliance to strategic goals. As such, DWs constitute a fundamental building block for Business Intelligence (BI) operations. In this paper, we introduce the notion of Awareness Requirements (AwReqs) in the requirements analysis and elicitation phase for DWs. In this context, AwReqs provide analysts with the means for eliciting and modeling requirements over performance measures (indicators) to appraise the success or failure of strategic goals. To demonstrate the benefit of our approach, we present a typical business example throughout the paper and show how we can establish in the early stages of DW design the adequacy of the design for BI operations.
Categories and Subject Descriptors
D.2.1 [Software Engineering]: Requirements/Specifications;
H.2.8 [Database Management]: Database Applications
General Terms
Data Warehouse; Requirements; Awareness; Goals; Monitoring; Key Performance Indicators
Keywords
Data Warehouse; Requirements; Awareness; Goals; Monitoring; Key Performance Indicators
1. INTRODUCTION
A data warehouse (DW) is commonly described as an integrated collection of historical data sources in support of decision making that structures information into facts (composed of measures) and dimensions (which are the contexts for analyzing facts) based on multidimensional (MD) modeling [7]. DWs constitute a fundamental building block for Business Intelligence (BI) operations. Although the development of the MD model has been traditionally guided by the data sources [6], several approaches [3, 11, 14, 18] advocate a requirements-driven DW design process in order to define a MD model that better agrees with user needs and expectations. For example, a UML profile based on \(i^*\) [20] has been defined in [9] in order to capture strategic goals of an organization, the decisions made to achieve them, and the required information to be analyzed for supporting these decisions. From these information requirements, the MD model of the DW is automatically obtained through model transformations, thus allowing us to structure data in a suitable manner to be analyzed for achieving strategic goals.
Unfortunately, current proposals for engineering DW requirements overlook how the DW should be queried to monitor the decision making process and evaluate the fulfillment of the strategic goals. This evaluation is a crucial issue for measuring the success of an organization and it is commonly carried out by analyzing metrics collected during the execution of business processes (BPs), such as Key Performance Indicators (KPIs), normally defined based on facts and on dimensions of the MD model. Therefore, queries for these measurements should be defined from the very beginning of the development in order to (i) prevent designers from deploying an entire DW which does not meet the decision makers data analysis needs, and (ii) implement them in a coherent and integrated manner with the rest of the DW in the subsequent design stages (as basic queries normally evolve in further analysis queries).
To this aim, in this paper we introduce the notion of Awareness Requirements (AwReqs) in the user’s requirements analysis and elicitation phase for DWs. AwReqs have been proposed in [17], motivated by the use of feedback loops as a generic mechanism for self-adaptation. Here, we use them in the context of DW modeling in order to explicitly specify the different queries over KPIs that allow us to monitor if the strategic goals are being fulfilled.
In this paper, we make the following contributions: (i) include AwReqs in the DW’s MD model and, for this purpose, we extend the UML profile for \(i^*\) proposed in [9] with new elements for AwReqs; (ii) show how AwReqs can be used to model monitoring requirements on KPIs in different levels of abstraction; (iii) illustrate a systematic process that starts with high-level, close to natural language requirements (business rules and vocabulary) and ends with low-level rules tailored to a specific DW’s implementation; and (iv) provide some experimental evaluation by apply-
The proposed ideas in an experiment, in which the DW requirements for a BP are augmented with monitoring requirements modeled as AwReqs and then implemented in the open source BI tool Pentaho\(^1\). The chosen BP is inspired by one of Pentaho’s samples (SteelWheelSales) and the results of our experiments are presented throughout the paper as a running example.
The remainder of this paper is structured as follows: section 2 summarizes the work used as baseline for the proposals in this paper; section 3 presents our proposal for the specification of AwReqs in the requirements of DWs to monitor strategic goals; section 4 discusses the implementation of monitors based on such a specification; section 5 compares our proposal with the existing state-of-the-art in requirements-oriented DW modeling; finally, section 6 concludes and presents future work.
2. BASELINE
This section summarizes the main building blocks of our approach.
\(^1\)http://www.pentaho.com/
2.1 User’s Requirements for Data Warehouses
A requirement analysis stage for DWs aims at obtaining informational requirements of decision makers, which are related to interesting measures of business processes and the context for analyzing these measures. However, decision makers often ignore how to suitably describe information requirements, since they are instead concerned about goals which the DW should help satisfy. Therefore, a requirement analysis phase for DWs should start by discovering goals of decision makers. Afterwards, the information requirements will be more easily discovered from these goals. Finally, information requirements will be related to the required MD concepts, i.e., the measures of the business process or the context for analyzing these measures.
To ease the task of discovering and eliciting goals and requirements for DWs, we classify the different kind of goals that decision makers expect to fulfill with the envisaged DW according to [9]: strategic goals are the main objectives of the business process, such as “increase sales”, “increase number of customers”, “decrease cost”, etc.; decision goals aim to take the appropriate actions to fulfill a strategic goal, for example “define some kind of promotion” or “open new...
stores”; finally, information goals are related to the information required by a decision goal if they are to be achieved, examples of which might be “analyze customer purchases” or “examine stocks”. Once these goals have been defined, information requirements can be obtained directly from the information goals. The various MD elements, such as facts or dimensions, will be discovered from these information requirements in order to specify the corresponding conceptual MD model of the DW.
Several concepts from DW design have been represented by matching and extending the most convenient concepts of the i* framework [20] in order to model the aforementioned hierarchy of goals and their corresponding information requirements. This framework provides mechanisms with which to represent the various actors and their dependencies, and to structure the business goals that the organization wishes to achieve. In order to model goals and information requirements, a UML profile for the i* modeling framework has been defined, adapting i* to the DW domain. This profile is shown in Figure 1.
In order to define i* models for DWs, goals, tasks, and resources are represented as intentional elements for each decision maker. Goals of decision makers are defined by using the Strategic, Decision, and Information stereotypes by specializing the defined Goal stereotype; and intentional means-end relationships between them. From information goals, information requirements (Requirement) are derived and represented as tasks. Furthermore, the requirement analysis for DWs needs some MD concepts to be added (in the sense of [3]). Thus, the following concepts are added as Resource stereotype extensions: business processes related to the goals of decision makers (BusinessProcess stereotype), relevant measures related to information requirements of decision makers (Measure), and contexts needed for analyzing these measures (Context). Also, foreseen relations between contexts of analysis are modeled. For instance, city and country contexts could be related because cities can be aggregated in countries. For modeling these relationships, we use UML’s (shared) aggregation relationship.
As a running example for this paper, Figure 2 shows an i* model — using the DW UML profile — for a DW of a company in the retail business that wishes to increase its sales each year. In the context of the SteelWheelSales business process, this objective is represented by the strategic goal SG: Increase sales by 10% each year. In order to accomplish this goal, decision makers included decision goal DG1: Launch adequate promotions.
To fulfill DG1, the DW will have to provide adequate information. Such objective for the DW is modeled first as information goal IG1: Analyze sales, which is then further refined into information requirements tasks R1:1 - Quantity of products sold by promotion and R1:2 - Quantity of products sold in general. The latter are then associated with measures and contexts, allowing us to proceed with the implementation of a DW that provides the correct data to be successful in the decision making process, thus achieving SG.
Unfortunately, a major drawback of this model is that it only accounts for the static part of the DW², i.e., a plan for achieving strategic goals and the required information to be further structured in an MD model. However, requirements should also address how the decision making process could be monitored at runtime through the analysis of specific KPIs, allowing analysts to successfully evaluate the fulfillment of the strategic goals. In order to overcome this limitation, we propose the use of Awareness Requirements (AwReqs) to specify monitoring requirements in our DW models. The next subsection introduces the concept of AwReq.
### 2.2 Awareness Requirements
Awareness Requirements (AwReqs) have been proposed in [17], motivated by the use of feedback loops as a generic mechanism for self-adaptation. The purpose of such loops is to maintain properties of the system’s output at or as close as possible to its reference input. In software systems, the reference input are the requirements, whereas the output is measured at runtime by monitoring it.
We therefore define AwReqs as requirements that refer to the success or failure of other requirements. In goal-oriented approaches, an AwReq could specify, for instance, that a goal of the system should never fail (i.e., it should always be satisfied). More complex types of AwReqs can refer to specific parameters (e.g., requirement R1 should be satisfied within 10 minutes), aggregate many execution instances and refer to the success rate (e.g., requirement R2 should be satisfied 95% of the time over one week periods) or even the trend of the success rate over a period of time (e.g., the success rate of requirement R3 in a month should not be lower than the previous month for two months in a row).
Furthermore, since AwReqs are themselves requirements, we allow for the creation of meta-AwReqs, meta-meta-AwReqs,
---
²The static part of the DW refers to the MD-data structure while the dynamic one refers to the operations that can be done on it (e.g., obtaining some tables, OLAP cubes, reports, etc.).
and so forth, while disallowing the specification of circular references. To ease the task of modeling, we have also proposed patterns that simplify AwReqs description and a syntax for their graphical representation in the goal model.
After they are specified, AwReqs have to be formalized in order to be monitored at runtime. Any formal language could be used, as long as it: (i) allows references to requirements; (ii) can reason over periods of time; and (iii) is supported by the framework that implements the adaptivity. For example, Figure 3 shows the formalization of two of the above AwReqs, using OCL\(_{TM}\) [16].
As described in [17], a monitoring framework represents requirements as UML classes which are instantiated at runtime and the target system to which these requirements belong is instrumented to send messages (such as start(), end() and fail()) to instances of these requirements representing changes in their states. Given this infrastructure, the AwReqs formalized in Figure 3 can be explained: AR0 states that it should never be the case that requirement R0 goes into the failed state at any point of its life cycle (between start() and end() messages). AR2, on the other hand, processes success() and fail() messages received in a week period (delimited by calls of method newWeek() performed by the framework) and assures that at least 95% of the times requirement R2 has succeeded.
Therefore, once AwReqs are formalized, they can be fed into this monitoring framework that will indicate during system operation when they have succeeded or failed, assuming that the system provides the appropriate log messages. To this aim, in [17] we describe a monitoring framework implemented over the Event Engineering and Analysis Toolkit (EEAT\(^3\)), formerly known as ReqMon [15].
3. MONITORING STRATEGIC GOALS
We would like to augment our DW models with requirements that help assuring the fulfillment of strategic goals. This can be accomplished by specifying monitoring requirements on KPIs that can influence decision and information goals. Such measures would then be monitored at runtime, leading to one of two possible outcomes: (a) through some dashboard-like user interface, decision makers could be informed if the decisions they have taken are indeed fulfilling the organization’s strategic goals in time for them to change plans (new decision/information goals) if needed; or (b) the DW system itself could adopt new strategies that would better fulfill the organization’s objectives if adaptivity requirements were also provided in the DW’s design.
Our focus on this paper, however, is on monitoring only. We propose the use of AwReqs (see §2.2) to model such monitoring requirements in our DW models. Going back to the example of Figure 2, we start by stating we do not want the strategic goal SG: Increase sales by 10% in each year to fail — AR1: NeverFail(SG). However, adding such an AwReq to a strategic goal will only make the system monitor if, at the end of the year, this goal has been accomplished or not. To be useful, our monitors should tell decision makers if there are any problems with their decisions ahead of time. In other words, during the period in consideration (i.e., throughout the year in question), indicators (KPIs) that could tell if we are on the right path to satisfying SG should be regularly extracted from the DW in order to be checked.
We should, thus, break down AwReq NeverFail(SG) into KPI-related AwReqs (or K-AwReqs for short), which specify the monitoring requirements over given KPIs. The first step towards this, however, is eliciting from stakeholders and domain experts what these indicators are. In our running example, we have elicited four monitoring requirements, namely:
- **AR\(_{1.1}\)** — after every month, the quantity of products sold in that month should be at least 10% greater than the quantity sold in the same month last year;
- **AR\(_{1.2}\)** — after every quarter, the quantity of products sold so far in the year should be at least 10% greater than the quantity of products sold in the same period last year;
- **AR\(_{1.3}\)** — for every month, the quantity of products sold in promotions should account for at least 50% of the total quantity of products sold in the month;
- **AR\(_{1.4}\)** — for every month, the quantity of sales of a given product under a promotion should not decrease in 10% or more the quantity of sales of other products of the same type.
*K-AwReqs* AR\(_{1.1}\) and AR\(_{1.2}\) verify if we are indeed increasing the amount of sales, while AR\(_{1.3}\) and AR\(_{1.4}\) check that, if sales are increasing, this is most likely due to a good decision goal: making promotions to sell more. After eliciting AwReqs and K-AwReqs, the next step is to formalize them.
3.1 AwReq Formalization
As said in section 2.2, any formal language that allows us to reference the requirements and reason over periods of time can be used to formalize AwReqs. K-AwReqs refer to data that can be extracted from the DW, thus we need a model of these data before we can write constraints that reference them. For instance, Fig. 4 shows a conceptual MD model for our running example based on the notation of [8]. The model depicts the relationship between facts and dimensions in the DW: a SteelWheelsSales fact stores the quantity of product sold, which can be analyzed by using the promotion, time...
or product dimensions. The time dimension has a hierarchy consisting of month, quarter and year aggregation levels. This MD model can be obtained from the \( t^* \) model of Fig. 2 by using the approach proposed in [9].
Although we could use OCL\(_{TM} \) for the formalization of \( K-AwReqs \), a more suitable language for formalizing \( AwReqs \) that refer to DW elements is an extension of OCL that includes a set of pre-defined OLAP (On-Line Analytical Processing) operators [13]. OLAP is one of the most popular kinds of applications that can be used to analyze data in the DW, since it allows human analysts to navigate through MD structures in order to access data in a more natural manner. Therefore, OLAP operators can be useful for specifying queries over KPIs according to the \( K-AwReqs \). Although OCL for OLAP\(^4\) does not include temporal logic operators like OCL\(_{TM} \), time periods (years, months, days, etc.) are always considered as relevant dimensions when aggregating and disaggregating data in DWs. Hence, we can refer to information in the DW from different moments in time by slicing and dicing over these dimensions.
Based on the conceptual model of Figure 4, OCL for OLAP can be used to formalize our monitoring requirements. As an example, Figure 5 shows the formalization of \( AR_{1.1} \) and \( AR_{1.3} \). Unlike \( AwReqs \), which refer to requirements and are triggered by log messages coming from the monitored system, \( K-AwReqs \) have predefined moments in which they need to be checked. In our examples, \( AR_{1.1} \), \( AR_{1.3} \) and \( AR_{1.4} \) should be checked monthly, while \( AR_{1.2} \) is checked at the end of every quarter. Therefore, we introduce a \( @trigger \) annotation in comments to specify when the \( K-AwReq \) is supposed to be verified, passing a Cron expression\(^5\) as a parameter. Cron’s predefined scheduling definitions (e.g., \( @monthly \), \( @weekly \), etc.) are also allowed. Both \( K-AwReqs \) shown in Figure 5 are supposed to be checked monthly (on the 1\(^{st} \) day of the month at hour 00:00).
### 3.2 AwReq Implementation
Since our MD models contain \( K-AwReqs \) written by using OCL for OLAP, they can be directly implemented in any final BI technology platform by using exactly the same existing methods and tools able to generate code from UML/OCL schemas. These methods do not need to be extended to cope with our \( K-AwReqs \) due to the fact that functions of our OCL for OLAP approach are defined by using standard OCL operations. Furthermore, \( K-AwReqs \) are defined at the model-level and thus they are technologically independent. A set of rules to deal with the transformation between OCL for OLAP and SQL is proposed in [13]. In this paper we exemplify this transformation by defining some ad-hoc rules for the MultiDimensional eXpressions (MDX) language.\(^6\) This language provides a specialized syntax for managing MD data stored in an OLAP cube. Since we aim to query data, we are interested in the \( SELECT \) statement in order to retrieve data from a specified cube. A simplified generic \( SELECT \) statement template is described in Figure 6.



---
4Despite its name, OCL for OLAP is still applied over UML classes, but adds OLAP operators to “vanilla” OCL.
ping it; `sliceAndDice(e)` corresponds to a `SELECT slicer axis clause`, and the expression `e` is mapped to the `Tuple_Expression` in MDX; `dimensionalProject(e1::e2)` corresponds, at the same time, to the `SELECT subcube clause` (since the expression `e1` in the OCL function is the `Cube_Name` in the MDX statement) and to a `SELECT query axis clause` containing an `ON COLUMNS` (since expression `e2` maps to the measure to be analyzed in the MDX statement). The resulting MDX implementation is shown in Figure 7.
3.3 SBVR formalizations of AwReqs
Our approach focuses on giving the decision makers the right mechanisms to specify their KPIs and giving the designer the right mechanisms for formalizing them in OCL using the concept of AwReqs. These AwReqs can be translated as queries over the MD model in some technology-dependent language as MDX. Monitoring requirements are elicited from stakeholder and domain experts and are then translated into OCL for OLAP by technical people in order to be used in the DW. This process has two shortcomings: (i) requirements are formalized solely in a language that Decision Makers do not understand and, therefore, cannot further contribute; (ii) if stakeholders present changes in the requirements, technical people must once again interpret the ideas presented by the domain experts and translate them into OCL for OLAP. It would be interesting to formalize these requirements in an intermediary language that both technical and business people could understand and that could be systematically or automatically translated into a more formal language. In this way, our approach could be used in the context of a model-driven development process.
The Object Management Group (OMG) has developed the Semantics of Business Vocabulary and Business Rules (SBVR) specification [1]. SBVR has been created to be useful for business purposes, independently of information systems designs. Specifically, it defines a meta-model for documenting the semantics of business vocabulary, facts and rules in a language close enough to a natural language to allow business experts to manage them, and at the same time formal enough (based on predicate logic) to be suitable for being used in a model-driven process. Thus, according to [2], it is specially suited for acting as an intermediate representation between the business users and the IT designers.
It is worth noting that within SBVR there is a user-friendly notation based on Structured English that allows us to express AwReqs in natural language. There are four font styles with formal meaning in this language: the `term` font is used for noun concepts that are part of a vocabulary being used or defined; the `name` font is used for individual concepts; the `verb` font is used for verbs, preposition, or combination thereof; and the `keyword` font is used for linguistic symbols used to construct statements (e.g., quantifications as “each” or “at least one”).
Therefore, in order to bridge the gap between business and IT perspectives, K-AwReqs would first be documented in SBVR before being formalized (as in Figure 5). Due to space constraints, we show here the SBVR version of only one K-AwReq, AR1_1: it is necessary that the sum of the quantity of SteelWheelSales of each product in month M of year Y increases by 10% of the sum of the quantity of SteelWheelSales of each product in month M of year Y-1.
Once, the AwReqs are defined in SBVR the following step towards a model-driven process is to automatically derive the corresponding OCL for OLAP formalizations. However, this is a challenging research task [2] that is out of the scope of this paper.
3.4 Context K-AwReqs
Monitoring KPIs with K-AwReqs can alert decision makers of undesirable situations, such as not increasing sales in 10% in the previous month (AR1_1) or in the cumulative result of the past months (AR1_2). These warnings, however, could be more or less important given the current context of the organization. Such context can also be monitored through analysis of indicators and, therefore, could be included in the requirements for the DW using K-AwReqs.
In our running example, suppose that an analysis of the past 3 years shows us that, in average, 80% of the sales of our company happen in the months of November and December due to shopping for the holidays. In this case, if we increased sales in these two months by 12.5%, we would guarantee the 10% sales increase for the whole year even if sales did not increase (nor decrease) in all of the other months. Therefore, if a K-AwReq like AR1_1 fails, it could be interesting to know if the period that has been analyzed is a critical one or not.
A context K-AwReq to monitor this information can be written in SBVR as follows — AR1_5: it is necessary that the sum of the quantity of SteelWheelSales of each product in month M of years Y-3 to Y-1 is not lower than 5% of the sum of the quantity of SteelWheelSales of each product of years Y-3 to Y-1.
As it can be seen from this SBVR formalization, Context K-AwReqs are just like regular K-AwReqs, only differing in their purpose. Regular K-AwReqs failures should trigger warnings on dashboards or self-adaptive behavior of the system itself. Context K-AwReqs failures, on the other hand,
just indicate that the current context for a given set of regular $K$-$AwReqs$ is not a critical one (in the above example, a $cAR_{1.5}$ failure indicates the current month is traditionally not a high sales month). Decision makers are, thus, advised to consider a $K$-$AwReq$ failure as less important if one or more of its associated Context $K$-$AwReqs$ have failed.
3.5 $K$-$AwReqs$ in the $i^*$ profile for DW
In order to graphically represent $AwReqs$ and $K$-$AwReqs$ in requirement models for DW, such as the one in Figure 2, we need to extend the $i^*$ profile for DW that we have previously explained. Figure 1 shows the extended profile (with new and changed meta-classes presented in gray), while an example of the graphical representation of $AwReqs$ in the DW requirements model is depicted in Figure 8.
By extending the profile, we could benefit from the transformations proposed in [9] and the CASE tool developed in [5], facilitating the implementation of the DW. A screen shot of the CASE tool being used to create the model of Figure 8 is shown in Figure 9.
4. EXPERIMENTS USING A BI TOOL
Once $K$-$AwReqs$ are defined using the MDX language (like $AR_{1.3}$ in Figure 7), it is possible to insert them into a specific business intelligence tool. It is worth noting that together with the MDX queries, the rest of the MD structures must be implemented. However, this is out of the scope of this paper and the reader should refer to [10] for a detailed explanation of this implementation.
To test the queries generated with our approach in the context of the running example presented in this paper, the Mondrian open-source OLAP server – which is part of the Pentaho BI Suite – was chosen, since it uses MDX as a query language. In this section, it is shown how the MDX derived from one of the $K$-$AwReqs$ – $cAR_{1.5}$ – was validated.
The aim of $cAR_{1.5}$ is to query sales of products during different periods of time in order to know periods that are critical. Therefore, a couple of MDX queries are executed in Mondrian: the first one, shown in Figure 10a, retrieves sales of products in a certain month for three consecutive years, whilst the second one, in Figure 10b, considers these three years as a whole.
Both figures show the Pentaho user console with the MDX query being executed. Shaded in the bottom we can see the result of the queries, namely the quantity of sales during the specified periods. Other MDX queries generated from the $K$-$AwReqs$ presented in this paper were tested in a similar fashion. The idea is that these queries could be later integrated into a dashboard to inform decision makers how their decisions are affecting the business process.
5. RELATED WORK
Requirement analysis is a crucial task in early stages of the DW development. However, only few approaches in this field have considered this task. In [19], a method is proposed in order to both determine information requirements of DW users and match these requirements with the available data sources. The work in [12] presents the DW requirements definition (DW ARF) approach that adapts the traditional requirements engineering process for requirements definition and management of DWs. The approach described in [14] focuses on describing a requirement elicitation process for DWs by identifying goals of the decision makers and the required information that supports the decision making process. Finally, in [4], the authors present a goal-oriented framework to model requirements for DWs, thus obtaining a conceptual MD model from them, in which data sources are used for shaping hierarchies, while user requirements are used to choose facts, dimensions and measures.
Unfortunately, these approaches present one common drawback, since they overlook business user’s requirements about how the DW should be queried to monitor the decision making process and evaluate the fulfillment of the strategic goals. Consequently, metrics used for monitoring purposes, such as KPIs, are overlooked in these requirement engineering approaches for DWs. To overcome this situation, in this paper we have introduced the notion of $AwReqs$ in the user’s re-
requirements analysis and elicitation phase for DWs with the aim of explicitly specifying the different queries over KPIs that allow us to monitor if the strategic goals are being fulfilled. The approach is not a BI tool per se, but is useful for designing BI solutions on top of existing BI tools.
6. CONCLUSIONS AND FUTURE WORK
In this paper, we have introduced the notion of Awareness Requirements in the requirements analysis and elicitation phase for Data Warehouses in order to model requirements over KPIs, with the purpose of generating monitors for the success of the organization’s strategic goals and warning decision makers if their decisions may not have been adequate.
We proposed a model-driven approach, in which: (i) $AwReqs$ are included in the DW’s MD model, broken down into $K-AwReqs$; (ii) $K-AwReqs$ are elicited from stakeholders and written in OCL for OLAP, which formalizes the $K-AwReqs$; (iii) MDX queries are derived from the $K-AwReqs$’s formalizations in order to be used in a specific BI tool. The approach has been validated with an experiment, presented throughout the paper as a running example, that used the OLAP server Mondrian and its sample business process Steel-WheelSales.
As immediate future work, we plan to obtain better validation results by performing a case study (preferably using the aforementioned tools) in order to evaluate the benefits of strategic goal monitoring and the effectiveness of our approach in a real-world scenario. Another direction we find interesting is to further apply research on self-adaptive systems in the area of DW, developing a framework that implements a feedback controller that not only detects failures, but diagnoses and compensates them and reconciles the business process behavior to satisfy the strategic goals.
7. ACKNOWLEDGMENTS
This work has been partially supported by MESOLAP (TIN2010-14860), from the Spanish Ministry of Science and Innovation, by the DADS (PBC-05-012-2) project, from the Castilla-La Mancha Ministry of Education and Science (Spain) and by the ERC advanced grant 267856 “Lucretius: Foundations for Software Evolution” (unfolding during the period of April 2011 – March 2016, www.lucretius.eu).
8. REFERENCES
|
{"Source-Url": "https://www.inf.ufes.br/~vitorsouza/wp-content/papercite-data/pdf/souza-et-al-sac12.pdf", "len_cl100k_base": 7126, "olmocr-version": "0.1.53", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 28409, "total-output-tokens": 8829, "length": "2e12", "weborganizer": {"__label__adult": 0.0003535747528076172, "__label__art_design": 0.0005846023559570312, "__label__crime_law": 0.000518798828125, "__label__education_jobs": 0.0032062530517578125, "__label__entertainment": 0.00010192394256591796, "__label__fashion_beauty": 0.00022864341735839844, "__label__finance_business": 0.0042266845703125, "__label__food_dining": 0.00044655799865722656, "__label__games": 0.0006928443908691406, "__label__hardware": 0.0008106231689453125, "__label__health": 0.0006709098815917969, "__label__history": 0.0003807544708251953, "__label__home_hobbies": 0.00013780593872070312, "__label__industrial": 0.0009899139404296875, "__label__literature": 0.00046324729919433594, "__label__politics": 0.00034689903259277344, "__label__religion": 0.00039076805114746094, "__label__science_tech": 0.125732421875, "__label__social_life": 0.00012564659118652344, "__label__software": 0.042022705078125, "__label__software_dev": 0.81640625, "__label__sports_fitness": 0.00020396709442138672, "__label__transportation": 0.0006785392761230469, "__label__travel": 0.0002386569976806641}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 36530, 0.02501]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 36530, 0.26659]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 36530, 0.91497]], "google_gemma-3-12b-it_contains_pii": [[0, 4653, false], [4653, 6892, null], [6892, 12121, null], [12121, 17538, null], [17538, 21322, null], [21322, 26605, null], [26605, 30778, null], [30778, 36530, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4653, true], [4653, 6892, null], [6892, 12121, null], [12121, 17538, null], [17538, 21322, null], [21322, 26605, null], [26605, 30778, null], [30778, 36530, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 36530, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 36530, null]], "pdf_page_numbers": [[0, 4653, 1], [4653, 6892, 2], [6892, 12121, 3], [12121, 17538, 4], [17538, 21322, 5], [21322, 26605, 6], [26605, 30778, 7], [30778, 36530, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 36530, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
a50da7412946c1a133d9b63583e1162fe8c08fb0
|
One possible approach is to see it as a structuring technique for software:
- modules, units, packages, subsystems
Software objects combine data structures and functions:
- type ---- > “class” i.e. a template (ADT)
- instance --- > similar to a variable, exists in memory
- implement “information hiding” principle
An implementation technique for programs:
- encapsulation
- generalization and specialization (inheritance)
- naming and scoping, polymorphism
Object - Orientation
> On a more abstract level a system is a collection of distinct interacting objects ("things", "entities")
> An object has
* structure - data, state
* behavior - methods, functions, operations
> An object is characterized
* by a number of operations (methods)
* and a state which remembers the effect of these operations
> Objects can be seen on two levels:
* Type – class – interface
* Variable – instance – value / reference
Example / "Truck"
<table>
<thead>
<tr>
<th>Behavior / Methods, operations</th>
</tr>
</thead>
<tbody>
<tr>
<td>load ()</td>
</tr>
<tr>
<td>unload ()</td>
</tr>
<tr>
<td>move ()</td>
</tr>
<tr>
<td>...</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th>Attributes / state variables</th>
</tr>
</thead>
<tbody>
<tr>
<td>int: load, weight;</td>
</tr>
<tr>
<td>Type: something;</td>
</tr>
<tr>
<td>Coordinates: position;</td>
</tr>
<tr>
<td>...</td>
</tr>
</tbody>
</table>
Example - Object
Domain 1
Car:
Attributes:
- Model
- Year
- Value
- Tax
- Debt
... Operations:
- calcTax:
- remind:
...
Domain 2
Car:
Attributes:
- Model
- Year
- Color
- Horsepower
- Maxspeed
... Operations:
- race:
- showoff:
- tune:
...
The set of attributes and operations is determined by the application - i.e. it is defined by the behavior during requirements definition and refined during requirements analysis.
Only those operations which determine the behavior are visible from the outside, all internal structure and functions are hidden.
Object - Orientation
A system is a collection of distinct objects having
- structure - data, state
- behavior - methods, functions
The seven magic characteristics of true o-o:
- Identity* name, handle, reference, capability
- Abstraction concentration on the essential
- Classification belongs to a certain category, instance
- Encapsulation information hiding, limited access
- Inheritance specialization / generalization
- Polymorphism generic behavior, overloading
- Persistence state and behavior transcend time, space
Objects – Class and Instance
Abstract Class (Type)
Class (Template)
Superclass
Instance
your_car: Car
attributes
methods
Subclass
Racecar
attributes
methods
my_car
attributes
methods
instantiates
Instance
my_car
attributes
methods
instantiates
your_car: Car
attributes
methods
Racecar
attributes
methods
Instance
your_car: Car
attributes
methods
Racecar
attributes
methods
my_car
attributes
methods
Instance
your_car: Car
attributes
methods
Racecar
attributes
methods
my_car
attributes
methods
**Object-Orientation**
- **A system is a set of interacting objects.**
- material objects: car, plane, brake shoe, runway, ...
- immaterial objects: government, management, flight_no, ...
- **Modeling:** mapping real objects (things in the real world) onto conceptual models in the solution space using abstraction = “extracting the essential aspects”
- **Implementing:** mapping the conceptual models onto software archetypes (modules, data structures, procedures)
---
**Relationships**
- **ABS**
- sensor attributes
- methods
- control attributes
- methods
- increase
- apply
- composition
- aggregation
- association
- **Engine**
- attributes
- methods
- **Actuator**
- attributes
- methods
- **Brakepedal**
- attributes
- methods
Object-Orientation - 2
"object" means an entity which has two significant properties (object: class-type / instance-variable):
- attributes – a set of values describing the state
- operations – a set of functions which describe the behavior of the object – the methods
State changes are allowed only through methods! (information hiding): set-/get-attribute();
O-O programming languages must enforce these concepts by an appropriate encapsulation mechanism
- "get", "set" functions: instance.method(params);
- "private", "public", ("friend") keywords
Object-Orientation - 3
An object is a "black box" and acts as a server for other objects called clients
The client objects have to "know" the server and its interface – this combination of identity (internal name, "handle") and functionality is frequently called "capability"
- that is, the client must have the capability to call a server object – send it a "message" to perform a method
Classes and Instances
“Object” is used in two senses:
• as a “type”, i.e. a template for instances – Class
• as an instance of a Class – an object entity, “variable”
Example:
– Class Car; Instances: my_car, your_car, her_car, . . .
Diagram:
Polymorphism
Polymorphism allows to put the responsibility to the called object (callee) instead of the caller
• objext_x.print() instead of printX(object_x)
Sending a “stimulus” to an object to perform a certain operation
Using a unique symbol for the intended operation
• examples:
– x + y ; k + j ; x/y ; k/j
– bag.sort(); depending on what types of objects are contained in bag
Inheritance
- Basic idea: collect all common characteristics of a collection of very similar objects in one "parent" class
- If this parent class does not have any immediate instances it is considered an "abstract class"
- All specific objects are created from this class by generating child classes which inherit the common properties and add some specializations
- Subclassing allows the
- addition of new attributes
- redefinition ("overloading") of operations which they inherit from their ancestor classes
- overriding (virtual function calls) redefines the characteristics of the ancestor class – hence, operations with the same name can have different semantics – attention!!!
- Must be supported by an inheritance mechanism of the programming language (genericity, templates)
Main Purposes for Inheritance
- Re-use of code;
- use ready-made classes from library
- combine similar parts of classes
- Subtyping – create behaviorally compatible descendants of a class
- all should have the same interface
- should only perform an extension, not a redefinition (overriding)
- Specialization
- descendant is modified and no longer behaviorally identical to parent class
- Conceptual view
- corresponds to the well-known “is-a” relation
Diagrams
Class Diagrams describe the objects of a system and the various kinds of static relationships
- associations - aggregation, interaction, ...
- subtypes – specialization, generalization
Interaction Diagrams describe how groups of objects collaborate in some behavior
- two forms are used:
- sequence diagrams
- collaboration diagrams
Diagrams are models!
Views and Diagrams
Models and Views
- A model is an abstraction of a system or a context
- A model can be documented in UML or some other form of notation/language
- An architectural view is an abstraction of a model,
• taken from a specific perspective / view
• enabling the extraction of architecturally essential elements
- Starting point is always the "user view" as defined by the use cases
- The different model views are:
• the structural view - static relationships
• the behavioral view - dynamic relationships
• the implementation view – modules and components
• the environmental view - deployment
Views and Diagrams
- Structural view:
• concentrates on the static relationships
• described by class and object diagrams
- Behavioral view:
• concentrates on the dynamic relationships
• described by one or several of the following
– Sequence diagrams
– Collaboration diagrams
– State chart diagrams
– Activity diagrams
Caveat: when writing the use cases do NOT concentrate on the functions but on the interactions!
Views and Diagrams - 2
- Starting point are the Use Cases and diagrams
- System development is a gradual transformation of a series of models described and documented by a set of diagrams
- Structural view:
- class diagrams (generic templates, "types")
- object diagrams – instances, "variables"
- Behavioral view:
- sequence diagrams
- collaboration diagrams
- state chart diagrams
- activity diagrams
- Implementation view:
- component diagrams – modules
- Environment view:
- deployment diagrams
Models
\- **The requirements model**
• captures the functional requirements
\- **The analysis model**
• describes a robust and changeable object structure for the system
\- **The design model**
• Adopts and refines the object structure to the current implementation environment
\- **The implementation model**
• is an implementation in a specific progr. Language
\- **The test model**
• aims to validate and verify the system
Requirements Model
\- **Actors and Use Cases**
• a use case model
• interface description
• a problem domain model
The use case model uses actors and use cases to define what exists outside the system and what should be performed by the system.
Object-Oriented Analysis
Contains the following activities, not necessarily in this order
- finding the objects - AM
- organizing the objects - AM
- describing how the objects interact - AM
- defining the objects internally – A/DM
Construction
*Construction = design + implementation*
*Main steps:*
- identify the implementation environment
- transform and refine the objects of the analysis model into the design model
- describe how the objects interact for each use case
*Traceability - correspondence between the analysis and design models*
*Interactions - stimuli, events, signals and messages*
---
**Class-Diagrams**
Class Diagrams
Class diagrams and models can be looked at from three different perspectives:
- conceptual – represent the concepts in the domain without regard to the software, related to the classes that implement them but there is often no direct mapping
- specification – look at the software but concentrate on the interfaces, not the implementation – the “Type”; o-o languages usually combine interface and implementation
(Modula 2/Ada: DEFINITION and IMPLEMENTATION
MODULE/Package;
Java: Class/Interface/Body)
- implementation – this is the actual implementation level frequently very close to or directly in the programming language used
Associations
Associations represent relationships between instances of classes
- conceptual – relationships between classes
- each association has 2 ends – each can be labeled with a role name (adornment)
- an association end has a multiplicity indicating how many instances may participate in this relationship (adornment)
- specification – relationships represent responsibilities
- realized through one or more methods (get, update, . . .)
- can not infer structure from the interface description
- implementation – here the details become visible
- e.g. a doubly-linked pointer structure
- a “ref” implementation (Java)
Vocabulary
- Classes are the most important building blocks
- A class represents a set of objects sharing the same properties (structure, behavior)
- A class implements one or more interfaces
- Classes may represent:
- software things
- hardware things
- things that are purely conceptual
- Modeling involves identifying the things important to the particular view
- These things form the vocabulary of the system
Simple and Path Names
- Temperature Sensor
- Customer
- Car
- BusinessRules::FraudAgent
- java.awt::Rectangle
Things have names - identity
An attribute is a named property of a class that describes a range of values that instances may hold. Usually a short noun or noun phrase that represents some property.
**Things have state - attributes**
### Attributes - 1
<table>
<thead>
<tr>
<th>Customer</th>
</tr>
</thead>
<tbody>
<tr>
<td>name</td>
</tr>
<tr>
<td>address</td>
</tr>
<tr>
<td>phone</td>
</tr>
<tr>
<td>birthDate</td>
</tr>
<tr>
<td>. . .</td>
</tr>
</tbody>
</table>
### Attributes - 2
<table>
<thead>
<tr>
<th>Wall</th>
</tr>
</thead>
<tbody>
<tr>
<td>height: Float</td>
</tr>
<tr>
<td>width : Float</td>
</tr>
<tr>
<td>thickness : Float = 10.0</td>
</tr>
<tr>
<td>isLoadBearing: Boolean</td>
</tr>
<tr>
<td>. . .</td>
</tr>
</tbody>
</table>
An attribute can be further specified by giving its class (type) and possibly an initial value.
Operations - 1
<table>
<thead>
<tr>
<th>Rectangle</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
</tr>
<tr>
<td></td>
</tr>
<tr>
<td></td>
</tr>
<tr>
<td>add()</td>
</tr>
<tr>
<td>grow()</td>
</tr>
<tr>
<td>move()</td>
</tr>
<tr>
<td>isEmpty()</td>
</tr>
<tr>
<td>. . .</td>
</tr>
</tbody>
</table>
An operation is the realization of a service that can be requested from any instance of the class; it is something one can do to an object. Usually a short verb or a verb phrase.
Things have behavior - operations
Operations - 2
<table>
<thead>
<tr>
<th>TemperatureSensor</th>
</tr>
</thead>
<tbody>
<tr>
<td></td>
</tr>
<tr>
<td></td>
</tr>
<tr>
<td>reset()</td>
</tr>
<tr>
<td>setAlarm(t: Temperature )</td>
</tr>
<tr>
<td>value() : Temperature</td>
</tr>
<tr>
<td>. . .</td>
</tr>
</tbody>
</table>
An operation can be further specified by stating its signature
Attributes and Operations
- When drawing a class it is not necessary to show every attribute and every operation.
- In most cases there are too many and only the ones relevant to a specific view are included.
- For these reasons it is common to elide a class and show only some or none of the attributes and operations.
- An empty compartment does not mean that there are no attributes or operations.
- To better organize long lists of attributes and operations one can prefix each group with a descriptive category by using "stereotypes" <<guillemots>> e.g. <<registers>>
### Responsibilities
<table>
<thead>
<tr>
<th>FraudAgent</th>
</tr>
</thead>
<tbody>
<tr>
<td>Responsibilities</td>
</tr>
<tr>
<td>-- determine the risk of a customer order</td>
</tr>
<tr>
<td>-- handle customer-specific criteria for fraud</td>
</tr>
</tbody>
</table>
A responsibility is a contract or an obligation of a class; responsibilities are derived from the UseCases. Usually given in free-form text, written as a phrase, a sentence or a short paragraph.
**Things have responsibilities**
- services
### Modeling Techniques
*Classes rarely stand alone; when building models one usually focuses on groups of classes that interact. These societies of classes form collaborations and are usually visualized in class diagrams.*
**Modeling the Vocabulary of a System**
- Identify those things that users or implementers use to describe the problem or solutions. UseCase-based analysis helps finding these abstractions
- For each abstraction identify the responsibilities. Make sure that each class is crisply defined and that there is a good balance of responsibilities among all classes
- Provide the attributes and operations that are needed to carry out the responsibilities for each class
Modeling the Vocabulary of a System
- As the models get larger, classes tend to cluster in groups that are conceptually and semantically related.
- In the UML packages can be used to model these clusters of classes.
- Most of the models will rarely be completely static but include interactions between the vocabulary.
- The UML provides a number of ways to model the dynamic behaviour.
Modeling the Distribution of Responsibilities in a System
- Identify a set of classes that work together closely to carry out some behavior
- Identify a set of responsibilities for each of the classes
- Split classes that have too many responsibilities into smaller abstractions
- Collapse tiny classes that have trivial responsibilities into larger ones
- Reallocate responsibilities so that each abstraction reasonably stands on its own
- Consider the ways in which those classes collaborate with one another and redistribute their responsibilities accordingly so that no class within a collaboration does too much or too little
Modeling the Vocabulary
<table>
<thead>
<tr>
<th>Model</th>
<th>View</th>
</tr>
</thead>
<tbody>
<tr>
<td>Responsibilities</td>
<td>Responsibilities</td>
</tr>
<tr>
<td>-- manage the state of the model</td>
<td>-- render the model on the screen</td>
</tr>
<tr>
<td></td>
<td>-- manage movement and resizing of the view</td>
</tr>
<tr>
<td></td>
<td>-- intercept user events</td>
</tr>
</tbody>
</table>
Controller
| Responsibilities |
| -- synchronize changes in the model and its views |
Modeling Primitive Types
- Primitive data types such as integers, characters, floating point numbers, strings and enumerations available in most programming languages can be modeled in UML.
- To model primitive types:
- model the thing to be abstracted as a type or an enumeration which is rendered using class notation with the appropriate stereotype.
- If the range of values associated with this type needs to be specified, make use of constraints.
<table>
<thead>
<tr>
<th><<datatype>></th>
<th>Status</th>
</tr>
</thead>
<tbody>
<tr>
<td>int</td>
<td>idle</td>
</tr>
<tr>
<td></td>
<td>working</td>
</tr>
<tr>
<td></td>
<td>error</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th><<enumeration>></th>
</tr>
</thead>
<tbody>
<tr>
<td>Status</td>
</tr>
<tr>
<td>idle</td>
</tr>
<tr>
<td>working</td>
</tr>
<tr>
<td>error</td>
</tr>
</tbody>
</table>
<table>
<thead>
<tr>
<th><<datatype>></th>
<th><<enum>></th>
</tr>
</thead>
<tbody>
<tr>
<td>int</td>
<td>Status</td>
</tr>
<tr>
<td></td>
<td>idle</td>
</tr>
<tr>
<td></td>
<td>working</td>
</tr>
<tr>
<td></td>
<td>error</td>
</tr>
<tr>
<td></td>
<td>true</td>
</tr>
<tr>
<td></td>
<td>false</td>
</tr>
</tbody>
</table>
Hints and Tips
- Every class should map to some tangible or conceptual abstraction in the user or implementer domain. A well-structured class:
- provides a crisp abstraction of something drawn from the vocabulary of the problem domain or the solution domain
- embodies a small well defined set of responsibilities and carries them out very well
- provides a clear separation of the abstractions specification and its implementation
- is understandable and simple yet extensible and adaptable.
Hints and Tips - cont
When drawing a class in UML:
• show only those properties of the class that are important to understand the abstraction in its context;
• organize long lists of attributes and operations by grouping them according to their category;
• show related classes in the same class diagrams.
|
{"Source-Url": "https://www.cs.nmt.edu/~cs328/PPT_slides/chap04_05.pdf", "len_cl100k_base": 4377, "olmocr-version": "0.1.53", "pdf-total-pages": 24, "total-fallback-pages": 0, "total-input-tokens": 39220, "total-output-tokens": 4867, "length": "2e12", "weborganizer": {"__label__adult": 0.00042128562927246094, "__label__art_design": 0.00037789344787597656, "__label__crime_law": 0.0002675056457519531, "__label__education_jobs": 0.0007829666137695312, "__label__entertainment": 4.13060188293457e-05, "__label__fashion_beauty": 0.00012165307998657228, "__label__finance_business": 0.00015544891357421875, "__label__food_dining": 0.0003132820129394531, "__label__games": 0.0004086494445800781, "__label__hardware": 0.00038313865661621094, "__label__health": 0.00027108192443847656, "__label__history": 0.0002009868621826172, "__label__home_hobbies": 6.496906280517578e-05, "__label__industrial": 0.0002574920654296875, "__label__literature": 0.0002532005310058594, "__label__politics": 0.00024127960205078125, "__label__religion": 0.0004148483276367187, "__label__science_tech": 0.0013647079467773438, "__label__social_life": 8.147954940795898e-05, "__label__software": 0.003162384033203125, "__label__software_dev": 0.9892578125, "__label__sports_fitness": 0.0003211498260498047, "__label__transportation": 0.00041556358337402344, "__label__travel": 0.00020766258239746096}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 17779, 0.0018]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 17779, 0.74376]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 17779, 0.84725]], "google_gemma-3-12b-it_contains_pii": [[0, 460, false], [460, 1362, null], [1362, 2452, null], [2452, 2992, null], [2992, 3769, null], [3769, 4715, null], [4715, 5352, null], [5352, 6612, null], [6612, 7002, null], [7002, 7952, null], [7952, 8565, null], [8565, 9260, null], [9260, 9493, null], [9493, 9897, null], [9897, 11193, null], [11193, 11756, null], [11756, 12311, null], [12311, 12952, null], [12952, 13526, null], [13526, 14629, null], [14629, 15020, null], [15020, 15994, null], [15994, 17470, null], [17470, 17779, null]], "google_gemma-3-12b-it_is_public_document": [[0, 460, true], [460, 1362, null], [1362, 2452, null], [2452, 2992, null], [2992, 3769, null], [3769, 4715, null], [4715, 5352, null], [5352, 6612, null], [6612, 7002, null], [7002, 7952, null], [7952, 8565, null], [8565, 9260, null], [9260, 9493, null], [9493, 9897, null], [9897, 11193, null], [11193, 11756, null], [11756, 12311, null], [12311, 12952, null], [12952, 13526, null], [13526, 14629, null], [14629, 15020, null], [15020, 15994, null], [15994, 17470, null], [17470, 17779, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, true], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 17779, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 17779, null]], "pdf_page_numbers": [[0, 460, 1], [460, 1362, 2], [1362, 2452, 3], [2452, 2992, 4], [2992, 3769, 5], [3769, 4715, 6], [4715, 5352, 7], [5352, 6612, 8], [6612, 7002, 9], [7002, 7952, 10], [7952, 8565, 11], [8565, 9260, 12], [9260, 9493, 13], [9493, 9897, 14], [9897, 11193, 15], [11193, 11756, 16], [11756, 12311, 17], [12311, 12952, 18], [12952, 13526, 19], [13526, 14629, 20], [14629, 15020, 21], [15020, 15994, 22], [15994, 17470, 23], [17470, 17779, 24]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 17779, 0.17552]]}
|
olmocr_science_pdfs
|
2024-12-11
|
2024-12-11
|
57613c3f17003157f472926cf88262ef78d71822
|
Keywords: Feature-oriented programming, generics, program customization, software reuse, software product lines.
Abstract: With feature-oriented programming (FOP) and generics programmers have proper means for structuring software so that its elements can be reused and extended. This paper addresses the issue whether both approaches are equivalent. While FOP targets at large-scale building blocks and compositional programming, generics provide fine-grained customization at type-level. We contribute an analysis that reveals the individual capabilities of both approaches with respect to program customization. Therefrom, we extract guidelines for programmers in what situations which approach suffices. Furthermore, we present a fully implemented language proposal that integrates FOP and generics in order to combine their strengths. Our approach facilitates two-staged program customization: (1) selecting sets of features; (2) parameterizing features subsequently. This allows a broader spectrum of code reuse to be covered – reflected by proper language level mechanisms. We underpin our proposal by means of a case study.
1 INTRODUCTION
Feature-oriented programming (FOP) aims at feature modularity in software product lines (Batory et al., 2004). Features are increments in program functionality and reflect stakeholder requirements. The key idea of FOP is to map features one-to-one to feature modules. A feature module encapsulates all software artifacts that contribute to a feature in a cohesive unit. FOP targets mainly at large-scale components and compositional programming. Hence, program customization takes place at the level of feature modules, i.e., by selecting and composing a set of desired modules. It is not obvious how this scales down to fine-grained customization needs.
Fine-grained program customization is exactly the aim of an alternative approach, generic and parameterized programming (GPP) (Goguen, 1989; Austern, 1998). The key idea of GPP is to implement program structures as generic as possible and to use these in different contexts by parameterization. This approach is known as very fine-grained since it enables adjusting the types of program elements; but it is not well explored whether GPP is capable for program customization and reuse at a larger scale.
In this paper we examine the differences of FOP and GPP and how do they influence their program customization capabilities. Thereof we derive a set of guidelines in what situations to use which paradigm. Furthermore, we propose a language-driven approach of integrating FOP and GPP to cover a broad spectrum of scales of customization, which we call generic feature modules. Generic feature modules support two-staged program customization: (1) the desired features of a program are selected; (2) the corresponding feature modules are parameterized for fine-grained customization. Besides customizability, this promotes reuse of feature modules and offers potential for reasoning about explicitly represented configuration knowledge in form of parameters.
To underpin our proposal, we present a fully functional compiler on top of FEATUREC++. We use our compiler to apply generic feature modules to a case study.
In this paper we make the following contributions:
- We compare FOP and GPP with respect to program customization; we infer guidelines in what situations which paradigm suffices.
- We propose an integrated language approach that integrates FOP and GPP.
- We contribute a fully functional compiler that implements our language proposal.
- We underpin our proposal by means of a case study.
1http://wwwiti.cs.uni-magdeburg.de/iti_db/fcc
2 BACKGROUND
2.1 Feature-Oriented Programming
FOP studies the modularity of features in product lines (Batory et al., 2004). The idea of FOP is to build software (individual programs) by composing features that are first-class entities in design and implementation. Features refine other features incrementally. Hence, the term refinement refers to the changes a feature applies to others. This step-wise refinement leads to conceptually layered software designs.
Figure 1: Mixin layers.
Feature modules. Feature modules implement features. Mixin layers is one implementation technique that aims at source code artifacts (Smaragdakis and Batory, 2002). Typically, features are not implemented by single classes; often, a whole set of collaborating classes contributes to a feature. Classes play different roles in different collaborations. A mixin layer is a static component encapsulating fragments of several different classes (roles) so that all fragments are composed consistently. Figure 1 depicts a stack of three mixin layers ($L_1 - L_3$) in top down order. Mixin layers crosscut multiple classes ($C_A - C_C$). White boxes represent mixins and gray boxes feature modules.
Figure 2: Refining a list with code for determining the size.
2.2 Generic and Parameterized Programming
GPP is about generalizing software components so that they can be easily reused in a wide variety of situations (Goguen, 1989; Czarnecki and Eisenecker, 2000). It serves the need for customizing components to specific requirements. In this paper we use C++ templates as a representative approach of GPP (Austern, 1998).
They key idea of GPP is that software components often do not rely on specific data types, but can operate with arbitrary types. In order to reuse these components for all possible kinds of types, they are implemented against type parameters that act as placeholders for different concrete data types. To use a generic component in a specific context, it has to be instantiated by passing a concrete type to the component.
Figure 3 depicts a standard GPP example: a generic list implementation (Lines 1-5). It is generic because it relies on a template parameter $\text{ItemT}$ (Line 1). Therefore, it can be used polymorphically in different contexts. When instantiating a list object a programmer has to pass a concrete data type to the list, e.g. $\text{Item}$ or $\text{Thread}$ (Line 6).
3 ANALYSIS OF FOP AND GPP
In this section, we analyze the different capabilities of FOP and GPP to implement customizable software by means of an example.
3.1 A Product Line of Linked Lists
As example, we choose a standard problem: a product line of linked lists ($\text{list product line} - \text{LPL}$), adopted
```
class List {
Item * head;
void put(Item * i) { i->next = head; head = i; }
}
refines class List {
int size; int getSize() { return size; }
void put(Item * i) { super::put(i); size++; }
}
```
```
template <typename _ItemT> class List {
_ItemT ItemT;
List<_ItemT> List() {
ItemT head;
void put(_ItemT *i) { i->next = head; head = i; }
}
int main() { List<Item> il; List<Thread> tl; }
```
Figure 3: A generic list and two concrete variants.
---
2Feature modules may contain manifold types of software artifacts, not only source code (Batory et al., 2004).
from (Czarnecki and Eisenecker, 2000). Figure 4 depicts the feature model of LPL.
The feature ItemT abstracts over possible data types of items that are stored in the list; it is mandatory. Lists can have different ownership relationships to their stored items: (1) references to items are maintained externally; (2) lists own references to items and therefore they are responsible for releasing the allocated memory when items are removed; (3) lists copy items that are stored; they are responsible for cleaning up when items are removed. These features are alternative – only one variant can be selected for a concrete list. Furthermore, lists have different morphologies: (1) a list stores items of the same type only (monomorphic) or (2) of different types (polymorphic). Only one feature variant can be selected. Lists may have an optional length counter feature that counts the number of stored elements. The counter becomes invariant with respect to the item type. New item types inherit from the abstract base class. This polymorphism allows for fine-grained tuning. The questions that arise are what approach is useful under which circumstances? Are both approaches equally expressible?
3.2 Implementation of LPL
Ideally, when implementing this product line the programmer implements each feature via one feature module. This is in line with the methodology and principles of FOP. Applying this methodology to our example, we would have to implement at least 11 feature modules, assuming one basic list, three different length counter types, and one item type.
FOP implementation. Figure 5 shows a basic list and a tracing feature implemented in FEATUREC++. It can be seen that both features are implemented as mixins, encapsulated in feature modules (not shown). Up to here FOP works fine. But implementing other features reveals the weaknesses of FOP. For example, it is not obvious how to implement different variants of the item type feature, i.e., different types of items. One can implement for each type a distinct basic list, e.g., a list of item objects and a list of thread objects. Unfortunately, this results in replicated code (one feature module per type) for the base feature and all subsequent added features!
Another approach would be introducing an abstract base class of all items. Hence, the list implementation becomes invariant with respect to the item type. New item types inherit from the abstract base class. This solution imposes performance penalties and a higher resource consumption due to dynamic binding; it may demand for dynamic type checking.
Similar problems occur when implementing other features that come in slightly different variants, e.g., the length counter type, an allocator or iterator type.
GPP implementation. Readers familiar with GPP may notice that the problematic features could be easily implemented using template parameters or alternative mechanisms for generics. Figure 6 depicts a generic list that expects an item type and a type for a length counter, as well as two concrete variants, an item object list with integer counter and a thread list with short integer counter. With GPP, a programmer can define a concrete variant at compile time in a type safe manner. However, one has always to anticipate a potential variation point when implementing a feature. Moreover, it is not obvious how GPP can implement larger program features, e.g., implementing the size feature.
3.3 A Comparison of Fop and Gpp
It seems that both, FOP and GPP, are necessary to implement a highly customizable software. While feature modules encapsulate large-scale features, GPP allows for fine-grained tuning. The questions that arise are what approach is useful under which circumstances? Are both approaches equally expressible? How to integrate both in a consistent way? In this section we shed more light on these issues and provide guidelines for the efficient use of FOP and GPP.
FOP. Feature modules usually contain a set of classes. These classes are introductions or refinements to existing classes. Thus, feature modules implement mainly increments to a program’s functionality. A refinement may extend existing methods by executing code around a method execution. Although refinements rely on a reasonable structure of the base program, they do not expect explicitly represented variation points (e.g., hooks) for being applicable. A feature module binds to the natural structure of the base program and may apply unanticipated changes. A consequence of its encapsulation property is that a feature module can implement a variant that concerns multiple variation points, e.g., a synchronization feature extends simultaneously a list and its iterator. However, the fact that feature modules rely on given structural abstractions defines the minimal granularity of customization of software built of feature modules. It is not possible to refine a base feature at statement level to change existing types, etc. However, achieving even so customizability at type level (1) imposes performance penalties due to dynamic binding, i.e., implementing a feature against an abstract class, and (2) it results in redundant code, i.e., for each type a distinct feature module. That is, FOP imposes a complexity overhead at small scales.
GPP. GPP supports program customizability at a smaller scale than FOP. For example, templates enable the programmer to customize program structures down at type level by parameterizing types of used variables and arguments. This methodology implies that programmers have to anticipate changes to and variants of a program. Variation points are explicit and fixed; they are an inherent part of the referring modules. n variation points demand for n parameters. This in-language approach to customization facilitates static type-safety.
Although templates can be used to implement entire feature modules (Smaragdakis and Batory, 2002), they are mainly suited for fine-grained customization. This is because the overhead of complexity to maintain the template expressions grows considerably for large-scale features.
Table 1 summarizes our observation of the properties of FOP and GPP achieving customizability and reusability. It is intended to serve as guideline for programmers to decide when to use which technique.
4 GENERIC FEATURE MODULES
As our analysis revealed, both, GPP and FOP, have strengths at different scales of customization. Consequently, we propose the notion of generic feature modules, that integrates mechanisms of GPP into FOP.
<table>
<thead>
<tr>
<th></th>
<th>FOP</th>
<th>GPP</th>
</tr>
</thead>
<tbody>
<tr>
<td>scale</td>
<td>large</td>
<td>small</td>
</tr>
<tr>
<td>granularity</td>
<td>methods, classes</td>
<td>statements, types</td>
</tr>
<tr>
<td>var. points</td>
<td>implicit</td>
<td>explicit</td>
</tr>
<tr>
<td>extensions</td>
<td>unanticipated</td>
<td>anticipated</td>
</tr>
<tr>
<td>locality</td>
<td>multiple points</td>
<td>single point</td>
</tr>
</tbody>
</table>
Templates. Mixins within feature modules can declare a list of template parameters (Fig. 7). In contrast to traditional classes, subsequently applied refinements to a class may extend its (possibly empty) template parameter list. This is useful because in this way the set of parameterizable types has not to be anticipated up front. Figure 8 depicts a refinement to the basic list that implements the size feature. The type of the counter is passed via template parameter; for that, the template list is extended (Line 1).
Figure 7: A list template.
```c
<template typename _ItemT> class List {
typedef _ItemT ItemT;
_ItemT * head;
_SizeT size;
void put(ItemT *i) { i->next = head; head = i; }
};
```
Figure 8: Extending the parameter list in a refinement.
However, extending the parameter list implies that clients have two provide a set of expected parameters, which may vary depending on the current feature selection. We address this issue later.
Parameterizing feature modules. Templates can be used to parameterize feature modules statically by a set of types. Figure 9 depicts a stack of generic feature modules. Each feature module extends existing structures, but also the template parameter list. This enables the individual features to declare new parameters that are intended for customizing themselves. Composing a concrete program out of this set of features allows the final program to be parameterized with concrete types.
This example illustrates the two-staged nature of the configuration process imposed by generic feature modules: (1) a subset of features is selected; (2) the selected features are parameterized.
Configuration repositories. Since each refinement may potentially extend the template parameter list, the programmer may easily get lost in the mass of parameters and values (constants and types). In order to simplify the parameterization and to improve code readability without constraining its expressibility and flexibility, we adopt the notion of configuration repositories (Czarnecki and Eisenescker, 2000). A configuration repository encapsulates the overall configuration knowledge that is passed via parameters (Fig. 10).
The key benefit of using configuration repositories in generic feature modules is that now classes and refinements expect only one parameter, namely a repository. Each refinement takes out only this configuration information that it depends on. That solves the problem that each client has to know what set of parameters are expected by the current selection of feature modules. Instead, the configuration repository can be defined in one location; clients do not need to know about its overall structure.
Figure 11 depicts a reimplementation of the basic list and the size feature. Both use different subsets of this repository.
```cpp
class ListConfig {
typedef _Config Config;
typedef _Config Config;
typedef _Config Config;
typedef _Config Config;
typedef _Config Config;
typedef _Config Config;
void put(ItemT * i) {
i->next = head;
head = i;
}
};
```
Figure 11: Customizing features via repositories.
Table 2: Features and numbers of their parameters.
<table>
<thead>
<tr>
<th>Feature</th>
<th>No.</th>
<th>Feature</th>
<th>No.</th>
<th>Feature</th>
<th>No.</th>
</tr>
</thead>
<tbody>
<tr>
<td>Base</td>
<td>6</td>
<td>Alloc</td>
<td>3</td>
<td>Contain</td>
<td>3</td>
</tr>
<tr>
<td>Length</td>
<td>3</td>
<td>Bounded</td>
<td>3</td>
<td>Connect</td>
<td>3</td>
</tr>
<tr>
<td>Trace</td>
<td>3</td>
<td>Sync</td>
<td>4</td>
<td>Delete</td>
<td>2</td>
</tr>
<tr>
<td>DblLink</td>
<td>4</td>
<td>Array</td>
<td>4</td>
<td>Stack</td>
<td>3</td>
</tr>
<tr>
<td>Iter</td>
<td>3</td>
<td>Compare</td>
<td>3</td>
<td>Queue</td>
<td>3</td>
</tr>
<tr>
<td>Sorted</td>
<td>3</td>
<td>Clone</td>
<td>2</td>
<td>Set</td>
<td>2</td>
</tr>
<tr>
<td>Insert</td>
<td>3</td>
<td>Persist</td>
<td>3</td>
<td>Map</td>
<td>4</td>
</tr>
</tbody>
</table>
Discussion. An alternative version of LPL that omits templates could be implemented using abstract classes. Each parameter would be passed as object reference via the list’s constructor. Different parameter settings would be implemented by subclassing abstract classes that serve for representing parameters.
Besides the mentioned penalties imposed by abstract classes, it is not obvious how to bundle parameters in repositories without type definitions and templates. Nevertheless, many design and customization decisions are made upfront. In contrast to using FOP standalone, generic feature modules provide a well-aligned symbiosis between GPP and FOP that supports customizable large-scale components with configuration support and static type safety.
6 RELATED WORK
**GenVoca** is an architectural model for large-scale components and collaboration-based designs (Batory and O’Malley, 1992). Principally, GenVoca distinguishes between horizontal and vertical parameters. The vertical parameters are instrumental in defining the vertical refinement hierarchies of layers, whereas the horizontal parameters provide for variability within a single layer (Goguen, 1996). Mapping this to our approach, concrete configuration repositories encapsulate horizontal parameters; vertical parameters are the classes that will be refined. Interestingly, we integrate the configuration repositories into the GenVoca layers themselves, enabling subsequent refinement.
Our notion of configuration repositories builds on an earlier proposal: They are implemented as trait classes, i.e., classes that aggregate a set of types and constants to be passed to a template as a parameter (Myers, 1995). Additionally, we provide means...
for integrating configuration repositories into feature modules.
Consul is an integrated approach to manage variabilities and customization (Beuche et al., 2004). It provides a proprietary component model and a logic-based representation of configuration knowledge. The component model lacks the flexibility of mixin composition and compositional reasoning; the logic-based approach of customization is powerful, but relies on a complex program transformation approach. Issues as type-safety are not discussed.
Making configuration knowledge and management explicit is a kind of meta-programming. A comprehensive overview of static meta-programming in C++ is given in (Czarnecki and Eisenecker, 2000). There it is shown how configuration repositories can be further processed to automatically determine parameter settings on the basis of partially specified configurations.
7 CONCLUSION
In this paper, we examined the capabilities of FOP and GPP for implementing reusable software: FOP performs well for implementing composable large-scale building blocks, but it imposes a complexity overhead when implementing fine-grained customizable features; GPP focuses mainly on reuse in the small by providing proper means for fine-grained program customization, but lacks abstraction and composition capabilities for programming in the large. Consequently, we proposed an integrated language-level approach for supporting both kinds of customization and reuse. Generic feature modules impose a two-staged program customization: After selecting and composing features, they can be parameterized to adapt them to a specific application context. A distinguishing feature of our approach is that we integrate the configuration knowledge into the associated feature modules to improve the encapsulation properties. For implementation, C++ templates are only a first attempt to demonstrate our ideas. Exploring sophisticated mechanisms for representing and reasoning about configuration knowledge is part of further work.
ACKNOWLEDGEMENTS
We thank Don Batory and Christian Kästner for useful comments on earlier drafts of this paper. This research is sponsored in parts by the German Research Foundation (DFG), project number SA 465/31-1, as well as by the German Academic Exchange Service (DAAD), PKZ D/05/44809.
REFERENCES
|
{"Source-Url": "http://www.scitepress.org/Papers/2006/13113/13113.pdf", "len_cl100k_base": 4634, "olmocr-version": "0.1.48", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 20770, "total-output-tokens": 5184, "length": "2e12", "weborganizer": {"__label__adult": 0.0004086494445800781, "__label__art_design": 0.0002677440643310547, "__label__crime_law": 0.0002980232238769531, "__label__education_jobs": 0.0003788471221923828, "__label__entertainment": 4.208087921142578e-05, "__label__fashion_beauty": 0.00014019012451171875, "__label__finance_business": 0.00014722347259521484, "__label__food_dining": 0.0003657341003417969, "__label__games": 0.0003478527069091797, "__label__hardware": 0.0004248619079589844, "__label__health": 0.00029349327087402344, "__label__history": 0.00014960765838623047, "__label__home_hobbies": 5.620718002319336e-05, "__label__industrial": 0.00022482872009277344, "__label__literature": 0.00018262863159179688, "__label__politics": 0.0002419948577880859, "__label__religion": 0.0003902912139892578, "__label__science_tech": 0.0011157989501953125, "__label__social_life": 7.355213165283203e-05, "__label__software": 0.0029277801513671875, "__label__software_dev": 0.99072265625, "__label__sports_fitness": 0.000270843505859375, "__label__transportation": 0.0003261566162109375, "__label__travel": 0.00019359588623046875}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 23083, 0.01552]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 23083, 0.09879]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 23083, 0.85048]], "google_gemma-3-12b-it_contains_pii": [[0, 3661, false], [3661, 6996, null], [6996, 10935, null], [10935, 15598, null], [15598, 19313, null], [19313, 23083, null]], "google_gemma-3-12b-it_is_public_document": [[0, 3661, true], [3661, 6996, null], [6996, 10935, null], [10935, 15598, null], [15598, 19313, null], [19313, 23083, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 23083, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 23083, null]], "pdf_page_numbers": [[0, 3661, 1], [3661, 6996, 2], [6996, 10935, 3], [10935, 15598, 4], [15598, 19313, 5], [19313, 23083, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 23083, 0.11679]]}
|
olmocr_science_pdfs
|
2024-11-24
|
2024-11-24
|
47deca7ac5296fdc265179483e14740ac3d16639
|
Verifying the configuration of Virtualized Network Functions in Software Defined Networks
Johan Pelay, Fabrice Guillemín, Olivier Barais
To cite this version:
Johan Pelay, Fabrice Guillemín, Olivier Barais. Verifying the configuration of Virtualized Network Functions in Software Defined Networks. France. bcom, pp.1-6, 2017. <hal-01657866>
Abstract—The deployment of modular virtual network functions (VNFs) in software defined infrastructures (SDI) enables cloud and network providers to deploy integrated network services across different resource domains. It leads to a large interleaving between network configuration through software defined network controllers and VNF deployment within this network. Most of the configuration management tools and network orchestrators used to deploy VNF lack of an abstraction to express Assume-Guarantee contracts between the VNF and the SDN configuration. Consequently, VNF deployment can be inconsistent with network configurations. To tackle this challenge, in this paper, we present an approach to check the consistency between the VNF description described from a set of structural models and flow-chart models and a proposed deployment on a real SDN infrastructure with its own configuration manager. We illustrate our approach on virtualized Evolved Packet Core function.
I. INTRODUCTION
The emergence of virtualization techniques is revolutionizing the architecture of telecommunications networks. In particular, network functions, which were so far designed and deployed on dedicated hardware, are progressively migrating onto virtual infrastructures (see for instance [1]). The dissociation between functions and hosting hardware allows network operators to be more agile in the deployment of new services. The goal for a network operator is eventually to be able to deploy on demand network functions according to customer’s needs and thus create new businesses.
Several functions are currently redesigned in order to be virtualized and flexibly instantiated on common hardware. This is notably the case of Radio Access Network (RAN) and Evolved Packet Core (EPC) functions for mobile networks. The virtualization of these two sets of functions enables a network operator to instantiate a mobile network on demand according to the needs of a company or a (virtual) mobile operator.
The virtualization of network functions urges network operators to change their business models. Instead of offering connectivity and information transport, they become IT technology providers and have to operate distributed storage and compute facilities in addition to their traditional role of connectivity provider. It is very likely that VNFs will be hosted and maybe split over several data centers disseminated throughout the network.
The decomposition of a global VNF into several components (or micro-services [2] [3]), which can be instantiated on distant servers, raises the problem of interconnecting them. This is deeply related to the method of programming the network. With the emergence of Software Defined Networking (SDN), it becomes possible to program a network by means of external controllers and thus to completely configure how different entities communicate between each other. In some sense, for a network operator, SDN and Network Function Virtualization (NFV) will rapidly become intimately interleaved and will raise the problem of consistency between the service which has to be offered by a VNF and the way the VNF is deployed within the network.
Beyond performance issues, which pose the problem of the placement of the various components of a VNF in the network so as to meet the associated grade of service objectives, a problem that network operators will have to solve in the next future is the consistency between the exchange of information between the components of a VNF and the configuration of the network. In this paper, we precisely address the problem of consistency between call flows and network configuration. We propose an approach, where the VNF call flow is attached to the VNF definition as a behavioral contract and we define the SDN behavior using the NetKAT formalism [4] [5] [6]. Then, on the basis of a deployment model of VNF micro-services on a real network infrastructure, we check the consistency between the VNF network assumption and the SDN guarantees [7].
NetKAT relies on the fact that network procedures acting on packets can be viewed as regular expressions on a certain alphabet (namely, that formed by the fields of packets). Then, by introducing the concept of history capable of tracking the progression of a generic packet through the network, notably depending on the forwarding decisions in each switch along the data path, and using the theory developed by Kozen in ’90s [8] [9], it is possible to show that the system is decidable and provable. While the NetKAT framework is capable of proving that any given property is satisfied or not by the network, when configured by means of SDN and in particular by using OpenFlow, this formalism however does not account of VNFs.
To remedy this situation, we introduce in this paper an artifact, which allows us to describe the micro-services of a VNF as switches and to verify that the history of a generic packet in this augmented network topology, is compliant with the targeted call flow of a VNF.
This paper is organized as follows: In Section II, we motivate our proposal by using a detailed example of EPC VNF. Through this example, we illustrate that a VNF provider cannot easily attach to its VNF modular implementation a contract that defines its assumptions regarding the network configuration. Section III provides an overview of our approach based on NetKAT and we illustrate its use on the virtual EPC (vEPC) use case. Section IV discusses related works. Section V presents some conclusions and future work.
II. CONTEXT AND MOTIVATING EXAMPLE
A. Decomposition of a VNF into micro-services
A VNF is a complete software suite composed of several modules and accomplishing a number of tasks. The current trend is to decompose a complete VNF in the form of micro-services [2], [10], [3], [11], [12] interacting between each other, each micro-service executing a set of elementary tasks. Once a VNF is decomposed into micro-services, the subsequent task is to instantiate them onto a virtualized architecture.
Before proceeding to the instantiation phase, let us stress the fact that a micro-service is a software package that is developed by independent entities (companies specialized in software development or open-source communities) and used as plug-and-play by a network operator. Engineers, who are in charge of software development or who design services, do a job different from that of network administrators and have different skills.
This gap between these two worlds can cause misunderstandings or configuration errors between the wanted VNF logical architecture [13] and the real set up. The services are often designed without taking into account the use of the network by others, which can lead to problems such as security of communications, latency, congestion, etc.
There exists several configuration management tools and network orchestrators, that allow operators to quickly describe VNF architectures, i.e., the micro-services using VMs or containers, micro-service configuration and assembly. We can cite Ansible, Chef, Puppet, Docker Compose, etc. Currently to the best of our knowledge, only openMANO and of course the SONATA NFV service platform propose a way to declare network configuration assumptions, that has to be declared before the deployment.
In the following, we pay special attention to the way the micro-services of a VNF are interconnected. When dealing with the implementation correctness of a VNF, we can definitely identify two aspects:
- The semantic correctness: The various micro-services exchange messages between them according to a given protocol. The semantic checking of the VNF amounts to verify the correctness of the implementation of the protocol. This can be done off-line when coding the VNF in the form of micro-services. Usual model checking tools [14] can be used to check behavioral consistency between services [15].
- The correctness of the exchange of information: When micro-services are implemented on various servers, they have to communicate between them across the network. Micro-services are hosted by a server attached to the network. The key point is to check that the messages are correctly exchanged between micro-services interconnected by a network configured by means of OpenFlow.
In this paper, we focus on the second issue.
B. Motivating example: A modular implementation of vEPC
1) The various functions of vEPC: An EPC is composed of data and control plane functions as depicted in Figure 1, which displays the various functions for cellular and WiFi radio access. In the following, we focus on 4G access composed of HSS (Home Subscriber Server), S/PGW (Serving/Packet data network Gateway), MME (Mobility Management Entity). Note when considering non cellular access, additional modules are necessary (e.g., ePDG, WiFi controllers, etc.).
2) Attach and Authentication: To get attached to the radio network, a UE (User Equipment) identified by its SIM card connects to an eNodeB base station. Before authorizing a UE to access the network its identity is verified by the MME thanks to a database that describes the entities of the network and contains the list of users and the associated rights and permissions as well as the current sessions (HSS). For the sake of conciseness in our next examples, we will focus on those first steps.
3) Default Radio Bearer Setup : UE’s mobility is managed by the 4G control plane. A packet is routed by using an address that is generally linked to a fixed location. The solution chosen in 4G is to pass traffic through a Packet Gateway (PGW). When the UE moves and changes the base station, the PGW is informed of its new location by the MME.
According to the LTE standard, the S/PGW must provide an IP address to the client. This step is divided into two distinct parts: on the one hand, the request followed by the transmission of the IP address and on the other hand, the choice of the IP address. This choice can be made directly by the S/PGW if it has a database with the pool of IP addresses or through the query from a DHCP server. The IP address can be returned to the customer via the MME, or only after the opening of the tunnel (bearer) by the S/PGW when the client requests it through a tunnel.
The call flow of the attachment of a UE is depicted in Figure 2. The various elements of the EPC has to exchange information, which has to be forwarded through the network. In current networks, all servers (MME, HSS) and the data plane network elements (eNodeB, S/PGW) have fixed IP addresses and routing is static. The challenge of NFV is to dynamically implement these functions on data centers.
C. Implementation issues
We tested the deployment of an open source EPC (as illustrated in Figure 3) in the Network Architecture Lab at b<>com, namely the Open Air Interface (OAI) EPC [17]. For deploying this
vEPC with a simple configuration manager, we used Ansible scripts to define roles and a Vagrant file to mount and connect all of the resources. A role is defined by a list of files to be installed or imported, a list of values that will change for each resource associated with the role (name, IP ...). The same type of equipment usually plays several roles, a playbook lists them for all types and passes to the associated roles the variables to apply for each resource. The configuration of the various services as well as the routing tables are fixed and written in files that are copied by Ansible after the launch of the VMs.
A total of 1047 lines in Ansible code and 344 of configuration files was necessary to deploy the six micro-services that form the vEPC as well as the network equipment and their controller. No verifications could be made to ensure that the network configuration allows for the necessary exchanges. When the deployment was done on a different architecture than the development lab (namely, when implementing the vEPC on the platform of another project), network problems appeared (loops). Tests performed when writing configurations are valid only on the network tested at a given time.
This simple example incites us to develop a methodology for testing the consistency between the decomposition of a VNF in terms of micro-services and its deployment in the multi-cloud environment.
III. PROPOSED SOLUTION
The NFV approach promises to be able to encapsulate network functions into reusable boxes that operates predictably without requiring network operators know the details of how it does so. To detect inconsistencies between NFV assumptions and real SDN configurations, this section introduces an extended model of VNF with software contracts. This section shows how these contracts could be used in combination with existing approach such as NetKat, to check the overall consistencies of the VNF deployment. We define below the four main steps of our approach and illustrate each of these steps on the EPC case study:
1. Define a reusable VNF model (subsection III-A);
2. Define the augmented topology of the network and IT infrastructure (subsection III-B);
3. VNF deployment model (subsection III-C);
4. Check the consistency of the VNF deployment model (subsection III-D).
A. Step 1 - Reusable VNF model
We first introduce an abstract definition of a VNF.
Definition 1: A VNF is defined as:
- A set of micro-services $S = \{s_1, s_2, ..., s_i\}$.
- A set of Hubs $H = \{h_1, h_2, h_3\}$, where $H$ represents a logical links between a set of services and meta-data. For example, if $h_1 = \{services : \{s_1, s_2\}, secured : false\}$ and $h_2 = \{services : \{s_2, s_3\}, secured : false\}$ means that $s_1$ can send message to $s_2$, $s_2$ can send message to $s_3$ but $s_1$ can not exchange message with $s_3$ and cannot view the messages between $s_2$ and $s_3$. $h_2 = \{services : \{s_3, s_4, s_5\}, secured : true\}$ means that communication between $s_3, s_4, s_5$ must be encrypted.
- A behavioral contract (see definition 2).
- A set of micro-service implementation.
In the above definition, we have used the concept of behavioral contract defined as follows.
Definition 2: A behavioral contract is defined through a set of messages. Each message is a tuple $msg = (s_1, s_2, order, \{a_1, a_2, ..., a_n\})$ where:
- $s_1$ is the micro-service source of the message,
- $s_2$ is the target micro-service of the message,
- $order$ is the order number of the message in the corresponding sequence diagram structure tree,
- $a_1, a_2, ..., a_n$ is a set of routing rule actions (addition, remove, modification) that can be triggered before the message reception or after its emission (see Definition 3).
All the order numbers of messages in the sequence diagram construct a partial order. The order number of a message is given according to its position in the tree. The root node corresponds to the starting micro-service of a sequence diagram.
Definition 3: A routing rule action $a$ is defined using NetKAT syntax, it has a name and it handles virtual nodes and virtual ports of a VNF. A virtual node ($VN(s_i)$) maps the nodes where the micro-service $s_i$ is deployed. A virtual port ($VP_k(s_i)$) maps the port $k$ of the node where the micro-service $s_i$ is deployed.
1) vEPC attachment illustration: To illustrate this formalism, a vEPC can be characterized as follows.
Proposition 1: A vEPC can be described as a set $S = \{eNodeB, MME, HSS\}$ with
- a set of hubs $H = \{h_1, h_2\}$ with
$h_1 = \{services : \{eNodeB, MME\}, secured : false\}$
$h_2 = \{services : \{MME, HSS\}, secured : false\}$
We could show that the vEPC has three micro-services connected through two different unsecured hubs \((h_1, h_2)\). The behavioral contract contains five messages: \(\text{Attach\_Request}, \text{Auth\_info\_Req}, \text{Auth\_info\_Ans}, \text{Ciphered\_Req}, \text{Ciphered\_Rep}\),
\[
BC = \{\text{Attach\_Request, Auth\_info\_Req, Auth\_info\_Ans, Ciphered\_Req, Ciphered\_Rep}\},
\]
with: \(\text{Attach\_Request} = \{\text{eNodeB}, \text{MME}, 1\}\),
\(\text{Auth\_info\_Req} = \{\text{MME}, \text{HSS}, 2\}\),
\(\text{Auth\_info\_Ans} = \{\text{HSS}, \text{MME}, 3\}\),
\(\text{Ciphered\_Req} = \{\text{MME}, \text{eNodeB}, 4\}\),
\(\text{Ciphered\_Rep} = \{\text{eNodeB}, \text{MME}, 5\}\).
B. Step 2 - Augmented topology based on NetKAT
To describe the network configuration, we directly propose to use NetKAT. In the following subsection, we recall the basic elements of NetKAT.
1) Basic elements of NetKAT: The basic approach of NetKAT is to suppose that functions implemented in a network can be viewed as regular expressions acting on packets. In this framework, a packet \(pk\) is a series of fields, namely
\[
pk := \{f_1 = v_1, \ldots, f_k = v_k\}
\]
where \(f_i\), \(i = 1, \ldots, k\) are fields expressed as series of bits. The most common fields used in the context of IP networks are source and destination IP addresses, source and destination port numbers, protocol types, DiffServ Code Point (DSCP), etc. Moreover, to track the position of a packet in the network, NetKAT introduces two additional fields: the switch label and the port number at which the packet appears when arriving at the switch. The key observation is that these two fields are changed at each switch while other fields are relevant end-to-end (except the DSCP field which may be updated inside the network in the case of untrusted marking but which should have in theory an end-to-end value). With the above definition, packets form an alphabet of \(2^N\) elements if \(N = |f_1| + \ldots + |f_k|\). This number is potentially very high but most network procedures act only on a restricted number of fields (IP addresses in case of rerouting inside the network, switch and port labels).
The fundamental idea of NetKAT is recognize that usual network functions such as forwarding, rerouting, firewall, etc. can be viewed as regular expressions which can be:
- either predicates \((f = v\), where \(f\) is a field and \(v\) a given value\);
- or else policies, for instance updating a field \((f \leftarrow v)\).
The basic procedures (policies and predicates) of NetKAT are given in Table I. The Kleene star
\[
p^* = \sum_{n \geq 0} p \cdots p
\]
is the sum of the finite iterates of procedure \(p\). The dup policy is introduced to duplicate packets in the construction of histories (namely prepends the same packet at an history); histories are introduced below and are instrumental in the capability of NetKAT of proving properties of the network.
The set of policies with identity 1 (no actions) and null 0 (packet drop) procedures and equipped with +, ·, the star * operations constitutes a Kleene algebra. The set of predicates with the identity 1 and the drop 0 equipped with the operations +, · and ¬ is a Boolean algebra. Predicates with + and · operations is a subalgebra of policies. The set of policies and predicates with the above operations is a Kleene Algebra with Tests (KAT).
With the above notation, a firewall which drops all packets towards a given IP address (say, \(A_0\)) can be written as
\[
(@\text{IP}_d = A_0) \cdot 0,
\]
where as stated above, 0 is the filtering policy that drops all packets. Similarly, forwarding a packet from port \(pt_1\) on switch \(A\) to port \(pt_2\) on switch \(B\) reads
\[
(SW = A, pt = pt_1) \cdot (SW \leftarrow B, pt \leftarrow pt_2)
\]
To record the path taken by a packet through the network, NetKAT introduces the concept of history that is the list of states occupied by a generic packet when traversing the network. A history \(h\) has the form \(\langle pk_1, \ldots, pk_n \rangle\) where \(pk_i\) is the state of packet \(pk\) at the \(n-i\) switch (the list reads from the right to the left). All NetKAT predicates and policies act on the packet-history to create a new history (possibly empty if the packet is dropped). Policies and predicates act on histories as detailed in Section 3 of [4].
More precisely, a predicate on a history \(h\) returns a singleton \(\{h\}\) or the empty set \(\{}\). A field modification \((f \leftarrow v)\) returns a singleton history in which the field \(f\) of the current packet has been set equal to \(v\). Thus, usual predicates and policies induce functions on histories. The function induced by policy \(p\) is denoted by \([\langle [p]\rangle]\). A function \([\langle [p]\rangle]\) is from the set of histories \(H\) to \(\mathcal{P}(H)\), the set of parts of \(H\).
Using the same formalism, NetKAT describes the network topology in order to check if rules are not trying to make links that do not physically exist (as described in the example provided in Proposition 2).
The key properties of NetKAT is that this language is sound and complete. This means that with KAT axioms and the NetKAT axioms (see Section 2 of [5]), every equivalence provable by using NetKAT axioms also holds in the equational model (soundness) and conversely, every equivalence in the equational model is provable with NetKAT axioms (completeness); these two statements are proved in Section 4 of [4].
In particular, it is possible to prove that a packet follows a given route (namely a sequence of switches in the network). This
simple remark motivates us to introduce an augmented version of the network.
2) Augmented network topology: The network is configured by means of SDN, for instance OpenFlow. By acquiring the configuration of the network (namely, the OpenFlow rules pushed in the network elements via the controllers), it is possible to completely translate the network configuration in NetKAT. We can then abstract the network topology in terms of nodes, ports and links.
To build the augmented topology of the virtualized infrastructure, we consider micro-services exchanging messages as (virtual) switches exchanging packets. Indeed, from a transport of view, micro-services receive messages, process them and transmit them to other micro-services or to the end users. Everything happens as if micro-services were switches and messages were (virtual) packets.
On the basis of the (physical) network topology and the (virtual) topology of micro-services, we are able to build the augmented topology of the network combined with VMFs. With this artifact and the power of the NetKAT formalism, we are able to verify that virtual packets are routed through the augmented topology so that the call flows of a VNF can be implemented.
3) Illustration with the UE attachment: The micro-services are illustrated in Figure 3 and the associated call flow in Figure 2. We introduced an augmented topology with four nodes (host1, host2, host3, sw1), each host has one port, sw1 has three ports. There is a link between each host and sw1. Considering micro-services as switches, the augmented topology of our example can be described as follows.
**Proposition 2:** The augmented topology of the UE attachment procedure for the example depicted in Figure 3 is:
\[ t = (sw = host1 \cdot pt = 1 \cdot sw \leftarrow sw1 \cdot pt \leftarrow 1) + (sw = host2 \cdot pt = 1 \cdot sw \leftarrow host1 \cdot pt \leftarrow 1) + (sw = host3 \cdot pt = 1 \cdot sw \leftarrow sw1 \cdot pt \leftarrow 2) + (sw = host2 \cdot pt = 2 \cdot sw \leftarrow host3 \cdot pt \leftarrow 1) + (sw = host2 \cdot pt = 1 \cdot sw \leftarrow sw1 \cdot pt \leftarrow 3) + (sw = sw1 \cdot pt = 3 \cdot sw \leftarrow host2 \cdot pt \leftarrow 1) \]
We could combine this topology with a deployment model of a new VNF to check the consistency between the VNF model and the current network topology before acting the real VNF deployment.
**C. Step 3 - VNF deployment model**
To deploy a reusable VNF on a real network, the deployment model creates a mapping between each micro-services belonging to a VNF model and a node belonging to the network configuration model.
**Definition 4:** A deployment model \( D \) is defined as \( D = \{m_1, m_2, ..., m_l\} \) and \( m \) is tuple such as \( m = (n_i, s_j) \).
In our illustrative use case (UE attachment), the proposed mapping is as follows:
**Proposition 3:** The VNF deployment model for the UE attachment procedure of the example depicted in Figure 3 is \( D = \{m_1, m_2, m_3\} \), \( m_1 = \{host1, eNodeB\} \), \( m_2 = \{host2, HSS\} \), \( m_3 = \{host3, MME\} \).
The micro-service eNodeB is deployed on the host host1. The micro-service HSS is deployed on the host host2. The micro-service MME is deployed on the host host3.
Once the topology and the VNF description are combined we can write the desired networks rules in NetKAT which will then be translated in OpenFlow.
**Proposition 4:** The NetKAT policies for the UE attachment procedure in the example in Figure 3:
\[ \begin{align*}
& (sw = sw1 \cdot pt = 1 \cdot sw \leftarrow MME \cdot pt \leftarrow 1) \\
& (sw = sw1 \cdot pt = 2 \cdot dst = HSS \cdot sw \leftarrow HSS \cdot pt \leftarrow 1) \\
& (sw = sw1 \cdot pt = 2 \cdot dst = eNodeB \cdot sw \leftarrow eNodeB \cdot pt \leftarrow 1) \\
& (sw = sw1 \cdot pt = 3 \cdot sw \leftarrow MME \cdot pt \leftarrow 1) \\
& (dst = HSS \cdot src = eNodeB \cdot 0)
\end{align*} \]
**D. Step 4 - Checking consistency of a VNF deployment model**
1) Running call flow on augmented topology: For a call flow of a VNF, a virtual packet is introduced and a history is created to record the journey of this virtual packet in the augmented network. NetKAT can then be used to prove that the exchange in the call flows are achieved by the virtual packet. This method can notably be used to prevent from loops, undue packet discard, missing forwarding rules, etc.
To check the compliance between histories and call flow, we basically built two message sequences traces and we check that it exists a weak bisimulation [18] relation between histories and call flows to check trace equivalence.
2) History applied to our use case: To illustrate the last step, we focus on Attach and Authentication behavioral contract defined in Figure 2. In NetKAT formalisms, for a packet from eNodeB virtual switch, we obtain the following history (recall that it must be read from last to first):
\[ \begin{align*}
& pk := \{ \\
& \text{pk}_1[src=eNodeB; dst=MME; sw=sw1; port=2], \\
& \text{pk}_2[src=eNodeB; dst=MME; sw=eNodeB; port=1], \\
& \text{pk}_3[src=MME; dst=eNodeB; sw=sw1; port=1], \\
& \text{pk}_4[src=MME; dst=MME; sw=MME; port=1], \\
& \text{pk}_5[src=HSS; dst=MME; sw=sw1; port=3], \\
& \text{pk}_6[src=HSS; dst=MME; sw=HSS; port=1], \\
& \text{pk}_7[src=MME; dst=HSS; sw=sw1; port=3], \\
& \text{pk}_8[src=MME; dst=MME; sw=MME; port=1], \\
& \text{pk}_9[src=eNodeB; dst=MME; sw=sw1; port=2], \\
& \text{pk}_{10}[src=eNodeB; dst=MME; sw=eNodeB; port=1]\}
\]
Based on this history, we could build two label transition systems (LTS) in which the label is defined using the source name and the target name. Then we check the trace inclusion between the VNF behavioral contract and the NetKAT history. To check the trace inclusion of LTSs we check a global property of weak bisimulation between LTS and it is known that weak simulation implies trace inclusion [19]. For tooling the approach, on top of NetKAT, we use the LTSA [20] model checker. LTSA can check the weak simulation by representing the VNF behavioral contract as the safety property process.
**IV. RELATED WORK**
The interleaving between SDN and NFV is one of the challenge that has recently been investigated in technical literature. In [21],
Medhat et al explore the limitations of current service function chaining approaches in next generation networks in terms of architectural and conceptual research work by providing a brief analysis of each solution in the state of the art. This article also proposes some new research directions. With regard to placement of functions, they discuss the automatic placement/migration of VNF to ensure their correct execution. In such a scenario, guaranteeing the consistency between the VNF assumptions and the network configuration guarantees could be used as an oracle to accept a placement/migration.
Closer to our approach, in [22], Spinozo et al. check that the functionalities implemented in the VNFs are not disturbed by the modifications made by the middle-boxes or other VNFs. For this purpose, they use a satisfiability modulo theories (SMT) solver that quickly provides formal proof before a deployment. The topology of the network is not managed but only the network graph/service chaining. With respect to their approach, we focus in this paper on possible errors in forwarding rules that can cause loop, reachability issue or security breach, etc. Our work could easily be extended to packet modification by middle-boxes and VNFs viewed as virtual switches in our augmented topology.
In [23], Shin et al. provide formal foundations for supporting the development of reliable network services. The solution proposed uses a packet based Algebra of Communicating Shared Resources to check whether there is any inconsistency between chosen specifications and the implementation. All the above cited studies stress the fact, as we do in this paper, that there is a real need for formally proving the correctness of the implementation of VNFs in an SDN context.
V. CONCLUSION
We have addressed in this paper the correctness of the deployment of VNFs when distributed on distant data centers. We have advocated for the development of a unified view of all resources involved in the implementation and deployment of VNFs, notably how they are interconnected. On the basis of this view, we have introduced the concept of augmented topology where micro-services appear as switches. By associated virtual packets with call flows of VNFs it is possible to use the NetKAT formalism to verify that histories of virtual packets are compliant with the call flows of a VNF and thus the VNF is correctly implemented.
We furthermore believe that such an approach is utmost relevant in the development of ONAP (Open Network Automation Project) [24], which aims at automating the creation and the instantiation of VNFs. By adopting the solution proposed in this paper, ONAP will be capable of safely instantiating VNFs in a network programmed by means of SDN. ONAP is in the first development phase but has already identified a number of features which are required for the automatic creation and instantiation of VNFs. In particular, ONAP aims at developing a holistic view of network resources, including traditional assets of the network (types of connectivity and bandwidth) managed with OpenDayLight together with IT resources (storage and compute) managed by OpenStack. Hence, ONAP can develop a unified view of all resources so as to optimize and configure resources.
The developed solution is valid for a static environment, where micro-services do not migrate from one data center to another. Building such a model, in particular the network configuration model, that can be highly dynamic, could be error-prone. But we can get it through system introspection.
REFERENCES
|
{"Source-Url": "https://hal.archives-ouvertes.fr/hal-01657866/file/verifying-configuration-virtualized_vHAL.pdf", "len_cl100k_base": 7323, "olmocr-version": "0.1.50", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 25519, "total-output-tokens": 9068, "length": "2e12", "weborganizer": {"__label__adult": 0.0003826618194580078, "__label__art_design": 0.0004024505615234375, "__label__crime_law": 0.00043487548828125, "__label__education_jobs": 0.0006570816040039062, "__label__entertainment": 0.00014543533325195312, "__label__fashion_beauty": 0.00018024444580078125, "__label__finance_business": 0.0005459785461425781, "__label__food_dining": 0.00042057037353515625, "__label__games": 0.0006780624389648438, "__label__hardware": 0.0029201507568359375, "__label__health": 0.0008435249328613281, "__label__history": 0.0004200935363769531, "__label__home_hobbies": 0.00012421607971191406, "__label__industrial": 0.0007495880126953125, "__label__literature": 0.0003311634063720703, "__label__politics": 0.00036525726318359375, "__label__religion": 0.0005030632019042969, "__label__science_tech": 0.3837890625, "__label__social_life": 0.00011658668518066406, "__label__software": 0.0236053466796875, "__label__software_dev": 0.58056640625, "__label__sports_fitness": 0.0003368854522705078, "__label__transportation": 0.0008945465087890625, "__label__travel": 0.000293731689453125}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 34537, 0.02004]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 34537, 0.69508]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 34537, 0.89391]], "google_gemma-3-12b-it_contains_pii": [[0, 342, false], [342, 5887, null], [5887, 11224, null], [11224, 15895, null], [15895, 21528, null], [21528, 27748, null], [27748, 34537, null]], "google_gemma-3-12b-it_is_public_document": [[0, 342, true], [342, 5887, null], [5887, 11224, null], [11224, 15895, null], [15895, 21528, null], [21528, 27748, null], [27748, 34537, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 34537, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 34537, null]], "pdf_page_numbers": [[0, 342, 1], [342, 5887, 2], [5887, 11224, 3], [11224, 15895, 4], [15895, 21528, 5], [21528, 27748, 6], [27748, 34537, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 34537, 0.0]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
ad34b4e1ed229e1155058fcde675e936523180ac
|
Learning a Classifier for Prediction of Maintainability based on Static Analysis Tools
Markus Schnappinger
Technical University of Munich
Munich, Germany
schnappi@in.tum.de
Mohd Hafeez Osman
Technical University of Munich
Munich, Germany
osmanm@in.tum.de
Alexander Pretschner
Technical University of Munich
Munich, Germany
pretschn@in.tum.de
Arnaud Fietzke
ittestra GmbH
Munich, Germany
fietzke@ittestra.de
Abstract— Static Code Analysis Tools are a popular aid to monitor and control the quality of software systems. Still, these tools only provide a large number of measurements that have to be interpreted by the developers in order to obtain insights about the actual quality of the software. In cooperation with professional quality analysts, we manually inspected source code from three different projects and evaluated its maintainability. We then trained machine learning algorithms to predict the human maintainability evaluation of program classes based on code metrics. The code metrics include structural metrics such as nesting depth, cloning information and abstractions like the number of code smells. We evaluated this approach on a dataset of more than 115,000 Lines of Code. Our model is able to predict up to 81% of the threefold labels correctly and achieves a precision of 80%. Thus, we believe this is a promising contribution towards automated maintainability prediction. In addition, we analyzed the attributes in our created dataset and identified the features with the highest predictive power, i.e. code clones, method length, and the number of alerts raised by the tool Teamscale. This insight provides valuable help for users needing to prioritize tool measurements.
Index Terms— Software Quality, Software Maintenance, Code Comprehension, Static Code Analysis, Maintenance Tools
I. INTRODUCTION
Software vendors aim to develop software systems that fulfill all functional requirements, are economical to build in the first place while also being easy to maintain in the future. The largest share of the development costs is actually maintenance costs [1], [2]. Therefore, there exists a direct relation between the maintainability of a system and its economic profitability. Over time, maintenance costs increase as the code basis becomes larger and errors are propagated [3]. It is therefore critical for software vendors to establish continuous quality management to avoid cost explosions. Code reviews, for example, can help to evaluate and control the quality of source code [4]. Also, software health checks by external quality auditors are a tried and tested remedy [5], [6]. Though these manual inspection techniques are effective and well established, they are also expensive and time-consuming. Instead of continuously performing extensive and expensive reviews during development, many companies use static analysis tools to track the quality of their systems. These tools analyze source code and provide measurements about the program without actually executing it. Unfortunately, several studies confirm the high number of inadequate warnings emitted by these tools [7], [8]. Developers quickly feel overwhelmed by the large number of measurements provided by such tools. Thus, several approaches use polynomial functions to aggregate multiple measurements into one single numerical. One of the first approaches was the Maintainability Index introduced by Oman [9]. Several years later, Benestad et al. [10] point out the need to define a strategy for every metric-based assessment, consisting of well-defined selection-, combination-, aggregation- and interpretation techniques. Still, this strategy has to be defined manually, and especially the interpretation of the results requires enormous expertise. Instead of using a predefined polynomial with fixed weights, we try to capture that human intuition of professional quality experts with artificial intelligence. The company itestra brings 15 years of industrial experience to this joint research. In addition to software engineering projects, itestra also offers post-release software audits, sometimes referred to as software health checks [5].
Our research models the experience of professional quality analysts using machine learning. The goal is to establish an automated evaluation using metrics, that is based on expert judgment. This paper hence evaluates the following approach towards automated assessments: With the help of quality consultants working at itestra, we manually analyzed source code from three different projects, accounting for 115,373 lines of Java code. The experts labeled the corresponding classes with respect to their maintainability to create a labeled dataset. Then, we retrieved the output of three static analysis tools for these classes and attached the labels. Eventually, we trained supervised machine learning algorithms to assess the maintainability of source code based on static measurements. Analyzing the created dataset, we also identified the measurements with the highest predictive power. In our prediction experiments, we obtained promising results with respect to the achieved accuracy of 81% and F-Score of 80%. While we consider this a promising first step towards automated quality assessments, it is not yet sufficient for a stand-alone tool. However, our approach offers a valuable quick assessment for developers without access to professional, time-consuming software assessments. In addition, our feature evaluation showed that cloning information, Teamscale-Findings, and method length are the metrics with the highest correlation to maintainability as perceived by the experts.
II. RELATED WORK
Coleman et al. [11] investigated the relations between manual and metric-based automated assessments. Their study shows that the results of the automated maintainability analysis and the qualitative assessments performed by maintenance engineers strongly correlate. In their study, polynomial models were used to compare software systems. Benestad et al. [10] also predicted software maintainability based on metrics in their paper in 2006. Their approach mostly considered measurements describing the relation of one class to other classes of the system, e.g. coupling. While we use machine learning to capture the experience of professionals, they discuss different selection-, aggregation-, combination- and interpretation methods to deduce the maintainability. In [8], Yüksel and Sözer applied machine learning techniques to classify alerts emitted by static analysis tools. In contrast to our work, these alerts are bug-related and do not consider possible maintainability issues. Koc et al. [7] follow a similar approach and focus on bug alerts, too. In a first step, they isolate the code that was highlighted by the analysis tool. Applying machine learning to these code snippets, they either confirm or refute the finding. Hegedüs et al. [12] proposed an approach to build a method-level maintainability prediction model based on human evaluation. Three surveys were conducted resulting in three datasets of source code maintainability. They found that none of the datasets was suitable to build a reliable regression model.
Since labeling source code is challenging and time-consuming, Kumar et al. referred to the number of changed lines per file to quantify maintainability [13], while Hegedüs et al. conducted a survey to collect maintainability labels [12]. In contrast to these works, our study refers to maintainability as perceived by professional quality consultants. In summary, we capture the maintainability perception of quality experts using metrics. In contrast to Oman [9] and Coleman [11], we do not aggregate the metrics using polynomial functions with fixed parameters, but apply machine learning algorithms to learn the expert evaluation. Opposed to work from Hegedüs et al., this research focuses on class-level maintainability.
III. EXPERIMENT SETUP
In contrast to other studies, this paper does not measure task-completion-time to refer to comprehensibility [14] or the number of revisions to refer to maintainability [13], [15]. Instead, we work together with professionals from industry and their experience-based definition of maintainability. For this purpose, we define maintainability as the ease of change, leading to two sub-characteristics:
1) As a developer, can I understand what the code does and identify where certain aspects are implemented?
2) As a developer, do I have to worry about hidden dependencies of the code I am currently modifying?
While the first aspect addressed the need to comprehend the source code, the second one focuses on where else the developer has to apply changes. For example, duplications of the code snippet have to be found and modified as well.
Provided the expert judgment, this research answers the following questions:
- Is it possible to predict a human intuition of the maintainability of source code based on tool measurements?
- Are there relations between metrics and expert judgment, and which metrics have the highest predictive power?
A. Overall Approach
Figure 1 depicts the overall framework of our approach. In the data preparation phase, we extracted metrics from the code sample using static analysis tools, performed data cleaning and combined the metrics and the label. Next, we train and validate the models. We selected a diverse set of 21 algorithms representative for different approaches. For each classifier, the iteration train → evaluate → parameter tuning continued until all possible parameter combinations were evaluated. At this stage, we also analysed the predictive power of the features.
B. Study Objects
To evaluate the approach, a dataset of source code and its evaluation has to be created. We took our sample from three software systems written in Java. The chosen sample includes 115,373 Lines of Code (LoC), distributed over 345 classes. To ensure a high diversity among the study objects, we chose one small project with approx. 45k LoC, one medium-sized system with around 380k LoC and one big project with more than 3M LoC. The age of the systems lies between 4 and 19 years. The projects cover in-house, off-shore, and open-source development. Two of the systems are industrial projects located in the insurance domain. The third system is the software testing framework JUnit 4 (Version 4.11). Table I provides an overview of the systems.
C. Static Analysis Tools for Data Collection
Static code analysis tools analyze source code without actually executing it. Their measurements serve as input for
TABLE I
ANALYZED SOURCE CODE
<table>
<thead>
<tr>
<th>Domain</th>
<th>System A</th>
<th>JUnit 4 (4.11)</th>
<th>System C</th>
</tr>
</thead>
<tbody>
<tr>
<td>Purpose</td>
<td>Insurance</td>
<td>Software Dev: Insurance</td>
<td>Damage Evaluation System</td>
</tr>
<tr>
<td>First Release</td>
<td>2000</td>
<td>2014</td>
<td>2014</td>
</tr>
<tr>
<td>Development</td>
<td>Outsourced</td>
<td>Open-source</td>
<td>In-house</td>
</tr>
<tr>
<td>Size</td>
<td>3.1M LOC</td>
<td>44.6k LOC</td>
<td>380k LOC</td>
</tr>
<tr>
<td>Chosen</td>
<td>63k LOC</td>
<td>40k LOC</td>
<td>41k LOC</td>
</tr>
<tr>
<td>Sample</td>
<td>160 Classes</td>
<td>75 Classes</td>
<td>110 Classes</td>
</tr>
</tbody>
</table>
our experiments. We targeted to use both commercial and free-to-use tools and to integrate both basic measurements as well as complex metrics. Among the various available tools we chose the following three:
- **ConQAT**: The tailorable, open-source framework integrates clone detection and structural assessments [16].
- **Teamscale**: This commercial tool evaluates both structural properties and code style to identify code anomalies. These anomalies are called findings and are automatically categorized according to their severity [17].
- **SonarQube**: The open-source tool offers tailorable quality gates. It also provides aggregated measurements like code smells and potential vulnerabilities or bugs [18].
Examples of the extracted attributes are the following:
- **Size**: Lines of Code, Source Lines of Code, Method Lines of Code, Number of Conditions
- **Structural**: Max. Method Length, Avg. Method Length, Max. Block Depth, Loop Length, Max. Loop Depth
- **Cloning**: Clone Coverage, Clone Units
- **Complex Measurements**: Cognitive Complexity, Code Smells, Teamscale-Findings (i.e. the number of quality violations identified by Teamscale)
D. Labeling
In order to learn from our code base, the source code is analyzed and labeled by experts. On one hand, it is impossible to evaluate source code without context. On the other hand, we had to draw a line what to take into account and what to omit from the analysis. Hence, we chose a class-level granularity. The possible classification is threefold: A, B, and C.
- **Label A** indicates the absence of indicators for maintainability problems with respect to the ease of change.
- **Label B** covers classes with some room for improvement.
- **Label C** is assigned to code that is clearly hard to maintain and requires high effort to be changed.
Our experiment aims to capture the experience of professional experts. Therefore, it is imperative to label the data according to that expertise. Although this limits the size of the dataset, we still managed to label 345 classes, representing more than 115k Lines of Java Code that had to be inspected and evaluated. In this context, it is not advisable to automatically label large datasets with, for example, a rule-based script. The machine learning algorithm would not capture the expert opinion, but would simply reverse engineer the rules used for the automated labeling. During the joint assessment of the study objects, both the quality consultants and the researchers evaluated the source code. The judgment of the researchers was then discussed in joint validation sessions, ensuring the provided label matched the opinion of the experts. The labeling procedure resulted in 182 instances out of 345 (52.75%) being assigned label A, 51 instances (14.78%) assigned label B, and 112 instances (32.46%) are categorized as C. The distribution of the labels among the single projects is shown in Figure 2.
IV. EXPERIMENT
Though our dataset covers more than 115k Lines of Code, it accounts for just 345 instances. To avoid bias introduced by splitting the 345 data points in fixed training, validation, and test sets, we used 10-fold stratified cross-validation. Since we are using a threefold label and thus face a multiclassification problem, we use accuracy, precision, recall, and F-Score to evaluate the performance of the algorithms as suggested by Sokolova [19]. In addition, we analyzed differences in the performance between the classes A, B, and C.
A. Prediction Results
Our experiments are implemented using the Waikato Environment for Knowledge Analysis (Weka) [20] Version 3.9.3. Every algorithm was run once in its default configuration before hyperparameter optimization was applied. The results discussed in the remainder of this subsection correspond to the best observed performance of each classifier.
The algorithm with the best results was J48, a decision tree based algorithm. It was able to classify 279 instances (81%) correctly. It achieved a precision of 79.7% with a recall of 80.9%, combining for an F-Score of 80.1%. The performance of the best classifiers and baseline comparisons are denoted in Table II. The table also shows that J48 outperforms the other classifiers in all four performance measures.
In addition to the performance over the whole dataset, we also investigated differences between the categories. Table III denotes the F-Score per class for the three best-performing classifiers. Indeed, a significant drop for files from category B...
TABLE II
<table>
<thead>
<tr>
<th>Classifier</th>
<th>Accuracy</th>
<th>Precision</th>
<th>Recall</th>
<th>F-Score</th>
</tr>
</thead>
<tbody>
<tr>
<td>J48</td>
<td>0.8087</td>
<td>0.7967</td>
<td>0.8087</td>
<td>0.8009</td>
</tr>
<tr>
<td>LMT</td>
<td>0.8078</td>
<td>0.7893</td>
<td>0.7971</td>
<td>0.7757</td>
</tr>
<tr>
<td>SimpleLogistic</td>
<td>0.6666</td>
<td>0.7071</td>
<td>0.6667</td>
<td>0.7566</td>
</tr>
<tr>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<td>OneR</td>
<td>0.7102</td>
<td>0.6430</td>
<td>0.7101</td>
<td>0.6540</td>
</tr>
<tr>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
<td>...</td>
</tr>
<tr>
<td>Multilayer Perceptron</td>
<td>0.6667</td>
<td>0.6667</td>
<td>0.6667</td>
<td>0.5667</td>
</tr>
<tr>
<td>ZeroR</td>
<td>0.5275</td>
<td>n/a</td>
<td>0.5275</td>
<td>n/a</td>
</tr>
</tbody>
</table>
TABLE III
<table>
<thead>
<tr>
<th>Classifier</th>
<th>Category A F-Score</th>
<th>Category B F-Score</th>
<th>Category C F-Score</th>
</tr>
</thead>
<tbody>
<tr>
<td>J48</td>
<td>0.874</td>
<td>0.449</td>
<td>0.842</td>
</tr>
<tr>
<td>LMT</td>
<td>0.859</td>
<td>0.290</td>
<td>0.862</td>
</tr>
<tr>
<td>SimpleLogistic</td>
<td>0.860</td>
<td>0.161</td>
<td>0.860</td>
</tr>
</tbody>
</table>
can be observed. J48 only achieves an F-Score of 44.9%, while the performance is even worse for LMT and SimpleLogistic with 29.0% and 16.1%, respectively.
B. Attribute Evaluation
Given the combination of the tool output and the categories assigned by manual inspection, we analyzed the resulting matrix for the most influential features. We applied six different feature selection algorithms on our data. Table IV lists the results of the algorithms InfoGain and OneR Attribute Evaluation. Due to the space limitations of this paper, we list the results of neither all algorithms nor all 67 attributes. For presentation reasons, we count the number of times a feature was part of the 10 highest-ranked attributes. This number is provided in the right-most column of Table IV. Features with less than four votes are omitted from the table. The attribute evaluation identified clone coverage as one of the most predictive features. Also, clone units were selected by five out of six techniques, whereas Teamscale-Findings and the maximum size of a method are selected in four of the six cases. Hence, these characteristics are considered the most influential features.
TABLE IV
<table>
<thead>
<tr>
<th>Attribute</th>
<th>InfoGain Score</th>
<th>OneR AttrEval</th>
<th>Top10 Appearances</th>
</tr>
</thead>
<tbody>
<tr>
<td>Clone Coverage</td>
<td>0.3070</td>
<td>69.86</td>
<td>6</td>
</tr>
<tr>
<td>Clone Units</td>
<td>0.2633</td>
<td>71.01</td>
<td>5</td>
</tr>
<tr>
<td>Teamscale-Findings</td>
<td>0.2777</td>
<td>66.38</td>
<td>4</td>
</tr>
<tr>
<td>Max. SLOC per Method</td>
<td>0.2415</td>
<td>64.03</td>
<td>4</td>
</tr>
<tr>
<td>Max. LOC per Procedure</td>
<td>0.2226</td>
<td>63.80</td>
<td>4</td>
</tr>
<tr>
<td>Max. LOC per Method</td>
<td>0.2164</td>
<td>65.51</td>
<td>4</td>
</tr>
</tbody>
</table>
(i) non-normalized, minimum length of 50 units
V. DISCUSSION
This experiment uses a threefold label as we think a threefold classification captures the expert understanding of maintainability best. We did not compare the results with other labels such as a twofold label. Binary labels do not reflect the real world, and, even more importantly, do not reflect the way experts perceive quality. For the very same reason, we decided to use a classification model instead of regression models as implemented in [12]. From our experience, a numerical value does not correspond to the way experts perceive quality. Quality analysts do not target to retrieve a numerical value but aim to develop a general understanding of existing problems.
A. Interpretation of the Prediction Results
The results presented in Section IV show that the assigned label corresponds to the experts’ categorization in up to 80.87% of the time. The classifiers J48, LMT, and SimpleLogistic delivered the best results in our experiment. They clearly outperform baseline-classifiers such as ZeroR by a large margin. The best-performing algorithm, J48, is based on C4.5, a decision tree algorithm described in detail in [21]. It achieved an accuracy of more than 80% and an F-Score greater than 80% as well. LMT, the second-best performing algorithm, also implements a decision tree. In contrast to J48, LMT uses logistic functions at the leaves [22].
Analyzing the performance of these three algorithms, we observed significant differences between the three categories A, B, and C. As illustrated in Table III, the F-Score for category B just ranged from 16% to 45% while being above 84% for all other classes. We interpret this finding as follows. Our prediction approach is able to identify classes with good quality and classes with bad quality. It performs poor for mediocre labels. To solidify this interpretation, we analyzed the false positives. Using J48, 7 instances of category A were erroneously classified as C (4%), 12 instances were mistaken for B (7%), and 163 instances (89%) were labeled correctly. In contrast, 10 instances of category C were misclassified as A (9%), 6 instances (5%) were classified as B and 96 instances (86%) were labeled correctly.
Given these observations, the classification results can be interpreted both optimistically and pessimistically. The goal of industrial software quality assessments is to identify which parts of the system suffer from bad quality. Based on the identified issues, measures are taken whether to rebuild the system, renovate certain components or restructure the development team [6]. The analysis of the false positives shows that the automated approach is not yet suitable to replace the human expert in finding these trouble spots. Not only does it assign wrong labels in 19% of the time, but the produced false positives are actually severe. Hard to maintain code was misclassified as easy to maintain in 9% of the times. Missing that number of potential trouble spots prohibits to rely on the classification in critical software assessments. System owners should not derive far-reaching actions based on a classification with just 81% accuracy.
However, we still consider the achieved results the first step towards automated quality analysis. Static analysis tools are not only used for external quality assessments, but also for continuous quality control. Using the tools SonarQube, Teamscale and ConQAT, one obtains 67 different measurements, making it hard to reason about maintainability at a glance. This work presents a method to aggregate different metrics in a way that is learned from experienced experts. Though it is not comparable with human experts, the automated classification helps developers to identify a great share of the code with maintainability issues.
B. Interpretation of Attribute Evaluation
As mentioned earlier, static code analysis tools analyze source code and report the measured characteristics. The user must draw conclusions and interpret the metrics based on his experience and expertise. In this research, we created a dataset of source code, the static tool output for this code and its expert evaluation. The most influential metrics presented in Table IV now allow developers insights into the expert evaluation. Hence, we believe our feature analysis provides valuable guidance for developers which metrics to focus on to predict the expert opinion. The first two metrics to be taken into account are cloning coverage and clone units since they have the highest correlation with the expert judgment. Then Teamscale-Findings should be respected, as well as the maximum method length. This does not mean that all other metrics should be ignored, but this set already offers a good indication of code maintainability.
In the context of this research, maintainability was defined as the ease of change, i.e. a combination of comprehensibility of the code itself, and understandability which dependencies have to be updated as well. Clone coverage and clone units refer to code duplications. Modification of a code snippet with a duplicate in another place forces the developer to search for the clone and apply the change here as well [23]. With code duplications hence leading to decreased maintainability, it is not surprising that cloning measurements show high predictive power. Interestingly, as opposed to the size of a method, the size of a class is not amongst the most influential features. Teamscale and other static analysis tools automatically rate large classes with more than 750 Source Lines of Code as hard to maintain [24]. We did not apply such fixed thresholds and actually rated every program class manually. Hence, we consider the results of the feature analysis a valuable contribution to research, as it reverse engineers the intuition of the human experts.
C. Threats to Validity and Future Work
In this study, the maintainability of classes was evaluated manually. To mitigate the threat to internal validity, validation sessions were performed to discuss the evaluation. Still, the assessment was performed by quality consultants from just one company. We notice that our dataset consists of only three systems, covers just two domains and only includes Java code. Also, the used dataset is imbalanced with class A and class C dominating the data distribution.
This work presents initial findings and promising results on using static analysis metrics to classify the maintainability of source code. For future work, we plan to explore the possibility of using metrics derived from mining identifiers, method names, and comments as well. Also, investigating the influence of class network metrics on maintainability is part of our future plan. In the meantime, reducing the number of features and increasing the size of the dataset is our priority in order to reduce the risk of overfitting and increase the reliability of the classification model. Finally, there is one major limitation to the chosen approach. While inspecting and evaluating the source code, we observed that several negative findings are of semantic nature. While static code analysis tools have their strengths in assessing structural characteristics, they cannot detect semantic flaws. For example, discrepancies between implemented behavior and documentation lead to lower perceived comprehensibility but are not reflected by structural metrics.
VI. CONCLUSION
The goal of this study is to model the experience of professional quality analysts using machine learning. Therefore, a sample of 115,373 Lines of Code was selected from three study objects, including two industrial systems. In joint work with professional quality analysts, the source code was inspected and evaluated on class-level. The evaluation is based on the ease of change, i.e. the comprehensibility of the source code, and the comprehensibility which external dependencies have to be updated after a change. The experts assigned the labels A, B, and C to each file. Label C indicates the code is hard to maintain, while A corresponds to the absence of indications for low maintainability. While manual assessments are a well-established method to evaluate the quality of software, many developers use static analysis tools to monitor quality. In this study, we used metrics emitted by such tools to learn and predict expert judgement. The algorithm J48 achieved an accuracy of 81% and a precision of 80%. We consider this approach to be a promising first step toward automated software evaluation. While the performance is sufficient for quick assessments, it is not yet suitable to replace an expert review. In addition, we analyzed the used features and investigated their predictive power. We found that clone coverage and clone units are the most influential features. Teamscale-Findings, i.e. the number of identified quality violations as computed by Teamscale, and the maximum method length also have high predictive power. This result provides guidance which metrics to prioritize for maintainability evaluations, based on the correlation with the expert judgment.
ACKNOWLEDGMENT
The authors would like to thank itextra for the participation and their commitment to publish this joint work. In particular, we appreciate the valuable and constructive knowledge transfer and the opportunity to learn from industrial experience.
REFERENCES
|
{"Source-Url": "https://itestra.com/wp-content/uploads/2019/07/itestra_classifier_for_prediction_of_maintainability.pdf", "len_cl100k_base": 6089, "olmocr-version": "0.1.53", "pdf-total-pages": 6, "total-fallback-pages": 0, "total-input-tokens": 20175, "total-output-tokens": 7602, "length": "2e12", "weborganizer": {"__label__adult": 0.0003609657287597656, "__label__art_design": 0.00025463104248046875, "__label__crime_law": 0.0003230571746826172, "__label__education_jobs": 0.0006952285766601562, "__label__entertainment": 4.4405460357666016e-05, "__label__fashion_beauty": 0.00013315677642822266, "__label__finance_business": 0.0001970529556274414, "__label__food_dining": 0.0002841949462890625, "__label__games": 0.00047469139099121094, "__label__hardware": 0.0006055831909179688, "__label__health": 0.00037550926208496094, "__label__history": 0.00012183189392089844, "__label__home_hobbies": 6.693601608276367e-05, "__label__industrial": 0.0002567768096923828, "__label__literature": 0.0001825094223022461, "__label__politics": 0.00016760826110839844, "__label__religion": 0.0003120899200439453, "__label__science_tech": 0.005001068115234375, "__label__social_life": 7.802248001098633e-05, "__label__software": 0.00421905517578125, "__label__software_dev": 0.9853515625, "__label__sports_fitness": 0.000247955322265625, "__label__transportation": 0.0003025531768798828, "__label__travel": 0.00014853477478027344}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 32037, 0.04115]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 32037, 0.1692]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 32037, 0.9155]], "google_gemma-3-12b-it_contains_pii": [[0, 5670, false], [5670, 10622, null], [10622, 15596, null], [15596, 21653, null], [21653, 27860, null], [27860, 32037, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5670, true], [5670, 10622, null], [10622, 15596, null], [15596, 21653, null], [21653, 27860, null], [27860, 32037, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 32037, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 32037, null]], "pdf_page_numbers": [[0, 5670, 1], [5670, 10622, 2], [10622, 15596, 3], [15596, 21653, 4], [21653, 27860, 5], [27860, 32037, 6]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 32037, 0.22302]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
3bfa7a20f29083ab8928f4fb3888b405f35a5a3d
|
On the Correctness of Metadata-based SBOM Generation: A Differential Analysis Approach
Sheng Yu∗†, Wei Song†, Xunchao Hu†, Heng Yin∗†
∗University of California, Riverside
†Deepbits Technology Inc.
syu061@ucr.edu, wei@deepbits.com, xchu@deepbits.com, heng@cs.ucr.edu
Abstract—Amidst rising concerns of software supply chain attacks, the Software Bill of Materials (SBOM) has emerged as a pivotal tool, offering a detailed listing of software components to manage vulnerabilities, dependencies, and licensing. While many SBOM generation tools are extensively used in both commercial and open-source realms, the correctness of these tools remains largely unscrutinized. To date, there has not been a systematic study addressing the correctness of contemporary SBOM generation solutions. In this paper, we conduct a large-scale differential analysis of the correctness of four popular SBOM generators. Surprisingly, our evaluation reveals all four SBOM generators exhibit inconsistent SBOMs and dependency omissions, leading to incomplete and potentially inaccurate SBOMs. Moreover, we construct a parser confusion attack against these tools, introducing a new attack vector to conceal malicious, vulnerable, or illegal packages within the software supply chain. Drawing from our analysis, we propose best practices for SBOM generation and introduce a benchmark to steer the development of more robust SBOM generators.
I. INTRODUCTION
Software Supply Chain Attacks (e.g., SolarWinds [18], PyTorch dependency confusion attack [9]) have increased by 742% between 2019 and 2022 [16]. In 2022 alone, 185,572 software packages were affected by these attacks [1]. The lack of visibility and transparency in the software supply chain makes defending against such attacks challenging. Recently, the Software Bill of Materials (SBOM) [10], a list of “ingredients” used to build software, has demonstrated its efficacy in protecting the software supply chain by enhancing visibility from software development to consumption. Driven by regulations, such as Biden’s executive order [3] and the National Cybersecurity Implementation Plan [7], the industry is adopting SBOM-based solutions to safeguard the software supply chain.
An essential step in adopting SBOM is to generate accurate SBOMs. While SBOMs have the potential to enhance vulnerability detection and facilitate license compliance, these benefits can only be realized if the SBOMs themselves are precise and correct. Discrepancies or omissions in the SBOM can lead to false assurances of security or compliance, exposing systems to potential risks. Many SBOM generation tools [4], [6], [12], [13] are extensively used in both commercial and open-source realms. However, the correctness of these tools remains largely unscrutinized. To date, there has not been a systematic study addressing the correctness of contemporary SBOM generation solutions.
Given the diversity of programming languages, build tools, and development practices, constructing a ground truth for SBOM generation evaluation is inherently challenging. In this paper, we adopt a differential analysis approach: we analyze the discrepancies in SBOMs produced by different tools for the same software to assess both their correctness and weaknesses in SBOM generation. More specifically, we 1) select four popular SBOM generators: Trivy [13], Syft [12], Microsoft SBOM Tool [6], and GitHub Dependency Graph [4]; 2) collect 7,876 open-source projects written in Python, Ruby, PHP, Java, Swift, C#, Rust, Golang and JavaScript; 3) evaluate the correctness of the SBOMs by conducting a differential analysis on the outputs from these four tools.
Surprisingly, our evaluation reveals all four SBOM generators exhibit inconsistent SBOMs and dependency omissions, leading to incomplete and potentially inaccurate SBOMs. Moreover, we construct a parser confusion attack against these tools, introducing a new attack vector to conceal malicious, vulnerable, or illegal packages within the software supply chain. To assist in creating more effective SBOM generators, we have developed best practices for SBOM generation and a benchmark to facilitate their development based on our evaluation findings.
In summary, we make the following contributions in this paper:
- We are the first to conduct a large-scale differential analysis to examine the correctness of SBOM generation solutions.
- Our evaluation reveals significant deficiencies in current SBOM generators. We also conduct a comprehensive case study to uncover how each SBOM tool detects dependencies during the generation process.
- We construct a parser confusion attack against SBOM generators, introducing a new attack vector to inject malicious, vulnerable, or illegal software packages into the software supply chain.
- We develop best practices for developing SBOM generators and a benchmark to facilitate their development.
II. BACKGROUND
A. Software Bill of Materials
An SBOM [10] is a formal, machine-readable inventory of software components and dependencies that includes information about those components and their hierarchical relationships. It can be shared and exchanged automatically among stakeholders (e.g., software vendors and consumers) to enhance software development, software supply chain management, vulnerability management, asset management, and procurement. This results in reduced costs, security risks, license risks, and compliance risks.
SBOM Types: Based on the stages of the software lifecycle at which SBOMs are generated, they can be categorized into six types [14]: Design, Source, Build, Analyzed, Deployed, and Runtime. Depending on what information is available in each stage, these types of SBOMs focus on different aspects. In this paper, we evaluate Source SBOM, a type of SBOM derived from the development environment. It mainly contains dependencies used for development and compilation, and is widely supported by SBOM tools. Also, our survey suggests that, owing to its simplicity and precision, metadata parsing is the industry’s leading SBOM generation technique. Thus, this paper focuses on the Source SBOM generated using the metadata-based approach.
SBOM Applications: The increasing complexity and interdependence in software development have amplified the importance of SBOMs. These provide clarity by clearly listing software components, facilitating swift vulnerability tracking and identification for developers and security professionals. Their compatibility with Vulnerability Exploitation eXchange (VEX) [15], a structured database detailing product vulnerabilities, is noteworthy. Additionally, the comprehensive dependency information aids in license assessment, ensuring compliance and mitigating legal exposures. SBOMs enable quality assessment of closed-source software through component reputation checks, and their transparency fortifies the software supply chain by thwarting the introduction of potential backdoors and vulnerabilities via third-party components.
B. Metadata
At the heart of Source SBOM generation lies the metadata - an important element in modern software development. These files encapsulate parameters, settings, dependencies, and version constraints, all of which are indispensable for reproducibility and consistent and reliable deployment, and offer support for package management, version control, and even automated build processes. Nowadays, almost every programming language comes with at least one package manager, and each package manager defines its own metadata.
At high level, there are two kinds of metadata. One is “raw” metadata where only direct dependencies are specified and their versions are often given as a range or a constraint instead of a specific (pinned) one. Raw metadata, such as requirements.txt for Python and package.json for Node.js, are mainly for dependency declaration while ensuring a degree of flexibility and future compatibility. The other type is lockfile such as package-lock.json for Node.js. Lockfiles focus on providing a precise and deterministic snapshot of the exact dependency tree including transitive dependencies. Locking prevents unexpected updates or changes in the dependencies when installing the project across different environments, ensuring reproducibility and avoiding compatibility issues. Despite that lockfiles contain the richest information for SBOM generation, they are not always available. Library developers are not encouraged to share lockfiles which could otherwise lead to version conflicts. Some package managers lack a native locking mechanism. Without lockfiles, the missing transitive dependencies and pinned versions pose a great challenge to SBOM tools to generate accurate and complete SBOM files.
III. METHODOLOGY
Despite the growing significance and adoption of SBOMs, a notable gap exists in systematically assessing the quality of the SBOM files generated. The reliability of security-centric applications, including vulnerability detection and license compliance, highly depends on the correctness of SBOM data, which raises concerns regarding the trustworthiness of such information.
This work aims to investigate the correctness and completeness of the dependency information present in generated SBOMs. The objective is to not only measure the correctness but also to unravel the underlying factors contributing to high-quality SBOMs. Due to the lack of ground truth, we adopt a differential analysis approach to obtain insights into the performance of SBOM generators.
A. SBOM Generators
In this work, we evaluate four SBOM tools: Trivy 0.43.0, Syft 0.84.1, Microsoft SBOM Tool (sbom-tool) 1.1.6, and GitHub Dependency Graph (GitHub DG). Notably, the first three are popular open-source projects and offer cross-platform support for Linux, Windows, and Mac operating systems. Conversely, the GitHub Dependency Graph is intricately integrated with GitHub repositories. We choose Trivy and Syft because they are the de facto SBOM generators used by industries and open-source communities. We pick the Microsoft SBOM Tool because it is developed by the esteemed Microsoft. Similarly, the GitHub Dependency Graph is chosen because it is provided by the most widely used Git platform. All the evaluated SBOM tools implement metadata-based approaches, meaning they read metadata files and extract dependency information declared in the metadata files.
B. Setup
The evaluation was conducted by downloading popular GitHub repositories associated with each programming language onto the local file system and subsequently scanning the repository directories using the SBOM tools. Each tool will generate an SBOM report in either CycloneDX [8] or
SPDX [5] format depending on which format is supported by the tools. Dependencies in these reports are then extracted and compared against each other.
**Dataset:** GitHub repositories were sourced from the well-regarded awesome-LANGUAGE repositories, which are uniquely tailored to the respective programming languages. Our dataset contains 535 Python, 819 Ruby, 384 PHP, 398 Java, 1,019 Swift, 700 C#, 994 Rust, 2,367 Golang, and 660 JavaScript repositories. We do not evaluate C/C++ projects due to the absence of an “official” build toolset and extremely limited support provided by the SBOM tools. C/C++ projects can be configured and built via various tools such as Bazel, Makefile, CMake, Visual Studio project files, and more. Consequently, Trivy and Syft only analyze conan.lock, while GitHub Dependency Graph exclusively focuses on *.vcxproj files.
**Metrics:** For our large-scale evaluation, given the absence of ground truth, we adopt a differential analysis approach. First, we compare the number of dependencies reported by each SBOM tool. We then use Jaccard similarity to measure the reported dependency names. This tells us the degree of overlap and commonality among the dependencies reported by different tools. In addition, we identify duplicate packages reported by the SBOM tools. While these metrics may not provide a direct ranking, they do shed light on the performance of these tools.
**IV. LARGE-SCALE SBOM COMPARISON**
After analyzing 7,876 high-quality repositories, we made the following major findings. The reasons behind such discrepancies will be discussed in Section V.
**A. Discrepancies in Package Counts within SBOM Reports Generated by Different Tools**
The SBOM tools exhibited notable differences in the number of packages they identified. Figure 1 clearly depicts this variation. The x-axis is the repository ID sorted by the number of dependencies detected by the GitHub Dependency Graph. For Python, PHP, Ruby, and Rust programming languages, GitHub Dependency Graph discovers the most packages for these languages. For .NET repositories, Microsoft SBOM Tool excelled in identifying the most packages, which is unsurprising as it is tailored to Microsoft’s own projects. For the Go and Swift languages, Trivy and Microsoft SBOM Tool proved to be the frontrunners, consistently identifying the most packages in the majority of cases. Syft excels in detecting the highest number of packages when it comes to JavaScript repositories. The disparities presented in this figure underscore that different tools possess varying capabilities and strategies in identifying dependency packages across different programming languages. It is important to note, however, that identifying more packages does not mean better because false positives may also be included.
**B. Low Package Jaccard Similarities**
To measure whether the SBOM tools detect similar dependencies for each repository, we compute a Jaccard similarity for each SBOM tool pair for each repository as Equation 1 shows. A and B are two sets of dependencies generated by two different SBOM tools. Each set contains dependency (name, version) pairs.
\[
J(A, B) = \frac{|A \cap B|}{|A \cup B|}
\]
Our evaluation result is illustrated in Figure 2. The majority of these pairs show significant dissimilarity, with only a very small portion being similar. As shown in (a), the GitHub Dependency Graph and Syft have the most similarities among them, although the majority of SBOM reports still exhibit substantial differences.
**C. Duplicate Packages in SBOMs**
During our analysis of the generated SBOMs, we identified instances of duplicate packages: the same package appearing in different entries with varying or the same version requirements. To ensure accurate calculations, we excluded repositories in which tools could not find any packages.
In Table I we have presented the rate of duplicate packages for various SBOM tools. This problem was found to be widespread across all four tools, suggesting a common occurrence. However, it is important to note that having duplicate packages is expected in some cases. For example, a repository may contain multiple independent projects and they happen to have a common subset of dependencies.
<table>
<thead>
<tr>
<th>Language</th>
<th>Syft</th>
<th>Trivy</th>
<th>GitHub DG</th>
<th>sbom-tool</th>
</tr>
</thead>
<tbody>
<tr>
<td>Python</td>
<td>14.05%</td>
<td>12.56%</td>
<td>13.54%</td>
<td>13.71%</td>
</tr>
<tr>
<td>Java</td>
<td>12.76%</td>
<td>15.01%</td>
<td>19.93%</td>
<td>18.89%</td>
</tr>
<tr>
<td>JavaScript</td>
<td>17.46%</td>
<td>17.34%</td>
<td>18.89%</td>
<td>19.42%</td>
</tr>
<tr>
<td>Go</td>
<td>9.97%</td>
<td>6.69%</td>
<td>11.03%</td>
<td>6.58%</td>
</tr>
<tr>
<td>.NET</td>
<td>17.38%</td>
<td>12.43%</td>
<td>18.01%</td>
<td>20.94%</td>
</tr>
<tr>
<td>PHP</td>
<td>13.76%</td>
<td>11.77%</td>
<td>14.53%</td>
<td>23.76%</td>
</tr>
<tr>
<td>Ruby</td>
<td>13.56%</td>
<td>9.1%</td>
<td>15.84%</td>
<td>12.39%</td>
</tr>
<tr>
<td>Rust</td>
<td>13.19%</td>
<td>11.37%</td>
<td>19.18%</td>
<td>13.83%</td>
</tr>
<tr>
<td>Swift</td>
<td>1.37%</td>
<td>2.28%</td>
<td>6.98%</td>
<td>3.39%</td>
</tr>
</tbody>
</table>
**V. SBOM GENERATION ANALYSIS**
To uncover the root causes behind the large disparities in SBOM outputs, we conducted an in-depth analysis of the source code of the SBOM tools. Our examination revealed several critical issues in SBOM generation, which are summarized below.
**A. Limited Support for Metadata**
All the evaluated tools employ a metadata-based approach where they analyze metadata to identify the components used in the project. The supported metadata file types for each tool are detailed in Table II. It is important to note that the table indicates the tools’ actual capability to extract dependencies from metadata, which may differ from their claims.
The table illustrates that each tool supports only a subset of commonly used metadata files. Overall, the SBOM tools have good support for lockfiles in which transitive dependencies and pinned versions are available, but they struggle with raw metadata. The GitHub Dependency Graph has the best support for raw metadata such as `Gemfile` and `Cargo.toml`, while other tools show limited or no support for raw metadata. Despite claims by Trivy and Syft to support `package.json`, they do not extract dependencies from the JSON file. In our evaluation, we found that 93% of Python repositories, 47% of JavaScript repositories, and 56% of Rust repositories contain raw metadata only.
B. Incomplete Metadata Parsing
Our evaluation shows that all the evaluated SBOM tools implement custom parsers for metadata. However, certain metadata, like `requirements.txt` defined in PEP 508, poses challenges due to its complex syntax. The self-implemented parsers only support common syntaxes, leading to false negatives. For instance, the lack of support for the backslash “\” as a line continuation in all the SBOM tools causes parsing errors, resulting in incorrect versions or missed dependencies. About 1.8% of Python repositories are affected by this.
C. Transitive Dependency
The offline nature of SBOM tools (except Microsoft SBOM Tool) implies a lack of attempts to resolve transitive dependencies. In the case where lockfiles are not present, the absence of transitive dependencies will adversely affect SBOM applications. Microsoft SBOM Tool attempts to resolve transitive dependencies by querying package managers for each detected dependency, but this functionality is not well-implemented and often fails to retrieve dependency information from package managers. About 74% of Python dependencies are transitive dependencies.
D. Limited Support for Version Constraints
Raw metadata often contains version ranges or constraints instead of pinned versions; for example, developers use `>=1.2.3 <2.0.0` to get the latest version while ensuring backward compatibility. Trivy and Syft handle version constraints by silently discarding dependencies without pinned versions, resulting in false negatives. The GitHub Dependency Graph reports version ranges as they appear in the metadata, introducing additional parsing challenges for SBOM management. In our evaluation, only 46% of dependencies declared in requirements.txt have pinned versions, indicating that Trivy and Syft may miss more than half of the dependencies even when transitive dependencies are not considered. Microsoft SBOM Tool addresses this by pinning a version after querying the corresponding package manager for the latest version within the specified range.
E. Inconsistent Package Naming Convention
When dealing with packages having compound names, SBOM tools name them differently. For Java, a package is located using the group ID and artifact ID. Syft uses the artifact ID as the package name, Microsoft SBOM Tool concatenates the group and artifact ID with a dot “.” as the package name, while Trivy and the GitHub Dependency Graph use a colon “:” for this purpose. Similarly, Swift package manager CocoaPods supports subpecs when declaring a dependency. Subspecs are a way of chopping up the functionality of a library, allowing people to install a subset of the library. Syft and Trivy report the subspecs, while Microsoft SBOM Tool reports their main dependency names. Additionally, Golang uses a leading letter “v” when specifying versions (e.g., v1.0.0). Syft and Microsoft SBOM Tool adhere to this convention, while Trivy and the GitHub Dependency Graph omit this leading letter.
letter. Such inconsistencies can potentially compromise the accuracy of vulnerability detection.
**F. Different Dependency Definition**
SBOM tools employ different strategies regarding whether to include development dependencies (e.g., test suites, linters, etc.) in SBOM files. Trivy focuses solely on production dependencies and ignores development dependencies, whereas Syft and GitHub Dependency Graph include both types. Our evaluation reveals that in JavaScript, 76% of dependencies declared in package.json are development dependencies. It is crucial to note that there is no definitive answer regarding which approach is better. Including development dependencies in the SBOM report offers several advantages, such as more comprehensive vulnerability assessments and license violation checks, but it may also introduce false alarms as the code of development dependencies rarely goes into the final product.
The root problem lies in the absence of an existing field in SBOM formats representing the dependency scope. While most metadata have distinct fields for this purpose, such as the scope field in pom.xml and the devDependencies in package.json, the current SBOM formats lack this support and may cause confusion in downstream applications.
**G. Multiple Projects and Metadata**
Our evaluation indicates that, on average, over 10% of the detected dependencies appear more than once in a repository, causing duplicate entries in SBOM files. This is primarily due to multiple metadata files present in a repository, either because of having multiple subprojects or submodules or having both raw metadata and lockfiles present. The SBOM tools analyze metadata individually without merging dependencies in the same project. Duplicate entries in SBOMs can lead to confusion and potentially inflate the apparent package count. Our evaluation shows that there are 5.7 metadata files in a Python repository and 12.8 metadata files in a JavaScript repository on average.
**H. Accuracy on Ground Truth**
Our large-scale evaluation employed a differential analysis due to the lack of ground truth. In this section, we quantify the accuracy of each SBOM tool on requirements.txt using our manually crafted ground truth. The ground truth is obtained by dry-running pip install (Python 3.11, pip 23.1.2), and we consider a correct dependency (name, version) pair as a correct match. Dry run simulates the installation process and the dependencies reported by pip install are those that will be installed in our environment. This evaluation aims to highlight the differences between the reported libraries and the ones actually installed.
The evaluation result is presented in Table III. Most SBOM tools fail to detect over 90% of the dependencies in requirements.txt due to incomplete syntax support and the lack of transitive dependency resolution. The Microsoft SBOM Tool excels in this test because it attempts to resolve transitive dependencies, but it ignores the extras field, and achieves a high accuracy on the requirements.txt file. Trivy and Syft perform better on Python requirements.txt, where the recall is higher than 70%.
**TABLE III: SBOM Accuracy on requirements.txt**
<table>
<thead>
<tr>
<th></th>
<th>Trivy</th>
<th>Syft</th>
<th>sbom-tool</th>
<th>GitHub DG</th>
</tr>
</thead>
<tbody>
<tr>
<td><strong>Precision</strong></td>
<td>0.25</td>
<td>0.25</td>
<td>0.74</td>
<td>0.13</td>
</tr>
<tr>
<td><strong>Recall</strong></td>
<td>0.10</td>
<td>0.10</td>
<td>0.73</td>
<td>0.08</td>
</tr>
</tbody>
</table>
OS and Python requirements. The low recall suggests that relying solely on these SBOM tools in practice may have serious negative impacts on downstream applications, such as vulnerability detection and license violation checks.
**VI. Parser Confusion Attack**
Motivated by the findings in Section V-H, we present a parser confusion attack [20] to illustrate how adversaries can obscure malicious dependencies. A parser confusion attack exploits inconsistencies among different parsers processing the same input, enabling malicious actors to craft input that is benign for one parser but harmful for another. Our case study shows that SBOM tools, employing custom metadata parsers, introduce a new attack vector for constructing parser confusion attacks within the SBOM ecosystem. In this study, we use Python’s requirements.txt as an illustrative example.
**Constructing the attack:** Given that requirements.txt lacks a locking mechanism and exhibits a rich syntax, it becomes a suitable candidate for this type of attack. For instance, none of the SBOM tools support the backslash as a line continuation; Trivy and Syft rely on the double-equal sign to separate package names and versions; installations from wheel packages are not universally supported; and many more. Table IV provides some input patterns that can be used to bypass detections based on our manual analysis and benchmark (discussed in Section VII). It shows how attackers can leverage different syntax elements to either conceal specific dependencies or confuse SBOM tools, leading to inaccurate results. In the table, a dash (“-”) signifies that the corresponding SBOM tool cannot detect anything from the given dependency declaration.
**Achieving Damage**: When the SBOM tools encounter unsupported syntax, the default behavior is to silently ignore the associated dependency. Adversaries can exploit this and inject malicious or vulnerable dependencies in metadata using unsupported syntax, effectively evading the tools’ detection entirely. In our dataset, the two most common patterns are installing from other requirement files (-r) and installing from version control systems, each appearing in over 50 requirements.txt files.
VII. BEST PRACTICE AND BENCHMARK
Drawing from our evaluation, we present what we believe are the most optimal solutions to address identified issues and minimize the attack surface. We propose the following best practices for metadata-based approaches:
**Package Manager Dry Run for Lockfile Generation:** The root cause of the large discrepancies lies in the limitations of self-implemented parsers, particularly in their support for metadata and metadata syntax. Instead of relying on these parsers, we recommend employing a package manager dry run to generate lockfiles. This simulates the dependency installation process, providing both transitive dependencies and accurate version information for each package. Adopting this approach ensures the creation of a precise and reliable SBOM file, thereby enhancing resilience against confusion attacks.
**PURL and CPE Support:** Each dependency should include a PURL (Package URL) entry and a CPE (Common Product Enumerator) entry for consistent package naming convention, maximum compatibility with vulnerability databases, and facilitate software identification.
In addition, we plan to release our evaluation benchmark and add support for more programming languages. This benchmark includes manually crafted metadata files and ground truth datasets for common languages. These metadata files try to cover all supported syntaxes for each language, and can be used to evaluate of the SBOM tools’ capability to handle corner cases. This initiative aims to guide the development of SBOM tools, emphasizing completeness and accuracy.
VIII. DISCUSSION
This study aims to assess the quality of SBOMs produced by widely used SBOM tools. Our analysis exposes deficiencies in the SBOM generation process employed by these tools. Trivy, Syft, and GitHub Dependency Graph do not identify transitive dependencies or determine an appropriate version when no pinned version is provided. In contrast, the Microsoft SBOM Tool reaches out to package managers to validate package names and ascertain a suitable version.
While conducting our evaluation, we encountered a significant challenge stemming from the absence of a well-defined benchmark for accurately assessing the quality of the generated SBOMs. Currently, the industry lacks a standardized dataset and uniform statistical methods for conducting evaluations in this area. In response to this issue, we created our own dataset.
Our experiment focuses on metadata-based Source SBOM generation on file system. It is important to note that certain SBOM tools, such as Trivy, may exhibit different behaviors depending on the specific targets of their scans. For example, scanning metadata files is enabled for both file system and git repository scans, while the activation of wheel packages is restricted to Docker image and Roots scans.
It is worth mentioning that our evaluation was specifically limited to a subset of SBOM tools, namely Trivy, Syft, Microsoft SBOM Tool, and GitHub Dependency Graph. Despite our careful selection of these prominent tools, the dynamic and ever-evolving landscape of SBOM generation solutions implies that our findings may not cover the entirety of available options. There is a possibility that subtle variations presented by other tools might have been inadvertently overlooked.
While metadata-based SBOM generation is relatively simple to implement, this approach has inherent limitations. First, declared dependencies may only be partially built into the final product or not be used at all, potentially leading to false alarms. Transitive dependencies are not well-captured, causing false negatives. Moreover, developers might add code directly to the project for experiments or testing, and metadata-based approaches are unable to detect such cases. We recommend implementing def-use analysis to determine whether each library within the project has been used or not. Additionally, code clone detection can identify libraries introduced via copy & paste. Employing these techniques helps eliminate false positives and false negatives, enhancing the overall correctness of the SBOM.
IX. RELATED WORK
**Software Supply Chain Attacks** Malicious or vulnerable packages have resulted in increasing software supply chain attacks (SolarWinds, NotPetya, etc.). Various approaches have been proposed. SBOM demonstrates its efficiency in managing risks in the software supply chain and has been advocated by both the industry and government stakeholders.
**SBOM & Vulnerability Exploitability eXchange (VEX)** VEX, as defined by NTIA, is a “companion artifact” to a SBOM, allowing manufacturers to share product vulnerability exploitability in a standardized, automatable format. Ahmed al. applied SBOM tools to assess how code debloating reduces vulnerabilities in Docker images. Numerous tools (DependencyTrack, DeepSCA, Nadgowa, Girdha, etc.) have been developed to support SBOM generation and consumption. In particular, DeepSCA is a complimentary online service that generates different types of SBOMs and conducts risk analysis for most popular languages and platforms with or without the source code.
**Software Composition Analysis (SCA)** Apart from metadata-based parsing, SCA is also a promising technique for generating SBOMs. When source code is available, SCA solutions such as CENTRIS and Tamer can be combined with program analysis to identify components that are actively invoked in the software, yielding more accurate SBOMs. When the source code is not available, binary-focused SCA tools like BAT, OSSPolice, B2SFinder, and LibScout utilize string literals and other language-specific features to discern components in the examined binaries. Though their accuracy might not be optimal, they still enhance transparency to a certain degree.
X. CONCLUSION AND FUTURE WORK
In this paper, we conducted the first large-scale differential analysis to examine the correctness of SBOM generation
solutions. We generated SBOMs using four popular SBOM generators for 7,876 open-source projects and systematically studied the correctness of these SBOMs. Our evaluation uncovered significant deficiencies in current SBOM generators. Additionally, we identified the design flaws in each SBOM generator, and devised a parser confusion attack against these generators, introducing a new path for injecting malicious, vulnerable, or illegal packages. Finally, based on our findings, we established best practices for creating SBOM generators and introduced a benchmark to aid their development.
In the future, we plan to extend our benchmark to support languages beyond just Python. Additionally, we aim to establish a ranking system to qualitatively measure the quality of SBOM generators in the market, allowing security professionals to select the most suitable tools and SBOM generator vendors to evaluate and improve their offerings.
REFERENCES
[16] Why 2023 is the year for software supply chain attacks. [2023-05-23]. https://hadrian.io/blog/why-2023-is-the-year-for-software-supply-chain-attacks
|
{"Source-Url": "https://www.cs.ucr.edu/~heng/pubs/sbom-dsn24.pdf", "len_cl100k_base": 6663, "olmocr-version": "0.1.53", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 28997, "total-output-tokens": 7947, "length": "2e12", "weborganizer": {"__label__adult": 0.00035953521728515625, "__label__art_design": 0.0003361701965332031, "__label__crime_law": 0.0013408660888671875, "__label__education_jobs": 0.0005221366882324219, "__label__entertainment": 7.086992263793945e-05, "__label__fashion_beauty": 0.0001323223114013672, "__label__finance_business": 0.0002512931823730469, "__label__food_dining": 0.0002301931381225586, "__label__games": 0.0007376670837402344, "__label__hardware": 0.0007328987121582031, "__label__health": 0.00027179718017578125, "__label__history": 0.00018465518951416016, "__label__home_hobbies": 6.878376007080078e-05, "__label__industrial": 0.0002887248992919922, "__label__literature": 0.00020599365234375, "__label__politics": 0.0002765655517578125, "__label__religion": 0.00028014183044433594, "__label__science_tech": 0.0204010009765625, "__label__social_life": 9.000301361083984e-05, "__label__software": 0.0234375, "__label__software_dev": 0.94921875, "__label__sports_fitness": 0.0001806020736694336, "__label__transportation": 0.00024056434631347656, "__label__travel": 0.00014460086822509766}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 35444, 0.04127]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 35444, 0.13509]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 35444, 0.8757]], "google_gemma-3-12b-it_contains_pii": [[0, 4904, false], [4904, 10703, null], [10703, 16228, null], [16228, 18057, null], [18057, 19891, null], [19891, 25512, null], [25512, 31458, null], [31458, 35444, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4904, true], [4904, 10703, null], [10703, 16228, null], [16228, 18057, null], [18057, 19891, null], [19891, 25512, null], [25512, 31458, null], [31458, 35444, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 35444, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 35444, null]], "pdf_page_numbers": [[0, 4904, 1], [4904, 10703, 2], [10703, 16228, 3], [16228, 18057, 4], [18057, 19891, 5], [19891, 25512, 6], [25512, 31458, 7], [31458, 35444, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 35444, 0.1145]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
546beaf8c084908676f9f6cbb1966ff30adb4cde
|
Hierarchical conditional dependency graphs as a unifying design representation in the CODESIS high-level synthesis system
Apostolos Kountouris, Christophe Wolinski
To cite this version:
Apostolos Kountouris, Christophe Wolinski. Hierarchical conditional dependency graphs as a unifying design representation in the CODESIS high-level synthesis system. 13th International Symposium on System Synthesis (ISSS '00), Sep 2000, Madrid, Spain. IEEE Computer Society, pp.66-71, 2000, <10.1109/ISSS.2000.874030>. <hal-00545528>
HAL Id: hal-00545528
https://hal.archives-ouvertes.fr/hal-00545528
Submitted on 10 Dec 2010
HAL is a multi-disciplinary open access archive for the deposit and dissemination of scientific research documents, whether they are published or not. The documents may come from teaching and research institutions in France or abroad, or from public or private research centers.
L’archive ouverte pluridisciplinaire HAL, est destinée au dépôt et à la diffusion de documents scientifiques de niveau recherche, publiés ou non, émanant des établissements d’enseignement et de recherche français ou étrangers, des laboratoires publics ou privés.
Hierarchical Conditional Dependency Graphs as a Unifying Design Representation in the CODESIS High-Level Synthesis System
Apostolos A. Kountouris
MITSUBISHI ELECTRIC ITE
80, Av. Des Buttes de Coesmes
35700 Rennes, FRANCE
kountouris@tcl.ite.mee.com
Christophe Wolinski
IRISA
Campus Universitaire de Beaulieu
F-35042 Rennes CEDEX, FRANCE
wolinski@irisa.fr
Abstract
In high-level hardware synthesis (HLS) there is a gap on the quality of the synthesized results between data-flow and control-flow dominated behavioral descriptions. Heuristics destined for the former usually perform poorly on the latter. To close this gap, the CODESIS interactive HLS tool relies on a unifying intermediate design representation and adapted heuristics that are able to accommodate both types of designs as well as designs of a mixed data-flow and control-flow nature. Preliminary experimental results in mutual exclusiveness detection and in efficiently scheduling conditional behaviors, are encouraging and prompt for more extensive experimentation.
1. Introduction
The topic of efficiently scheduling conditional behaviors having a complex conditional structure, has been thoroughly investigated in previous research work mainly because traditional DFG based heuristics do not efficiently handle this kind of descriptions [1].
Several better adapted heuristics were proposed ([1], [2], [3], [4], [5], [6]). The quality of their results depends heavily on the ability to exploit conditional resource sharing ([2], [4], [6], [7]) and speculative execution ([3], [5], [16], [17]) possibilities as well as shorter path lengths using node duplication techniques [3].
In resource constrained scheduling these techniques permit to better utilize the hardware resources in the datapath and obtain better schedules which result in shorter execution paths and less control logic.
An important issue, also underlined in previous work ([9], [10]) relates to the effects of the syntactic variance of the input descriptions, on the synthesis results. These negative effects intervene in two distinct but interrelated levels as far as scheduling conditional behaviors is concerned; mutual exclusiveness detection and operation scheduling. CDFG based mutual exclusiveness detection techniques [3], [11] using the structure of the input description, produce different schedules for semantically equivalent but syntactically different descriptions. This is due to the variability on the amount of detected mutual exclusiveness [9]. Furthermore, CFG-based scheduling (i.e. PBS [6]) is very sensitive to the statement order in the input description.
From the above it is clear that efficient HLS for control dominated designs relies on the combination of the above techniques and in effectively coping with the problem of syntactic variance.
1.1. A unifying approach
In our previous work of [22], [19] we aligned with the view supported by others [5], [8], [9], in advocating for the need of more flexible internal design representations, to optimize the HLS results and effectively handle both control and data flow dominated designs.
In this paper it is explained why the adoption of an intermediate design representation, like the Hierarchical Conditional Dependency Graph (HCDG) unifies and enhances the high-level synthesis of behavioral descriptions. Unification is mainly achieved because the HCDG is well adapted to describe both control-flow and data-flow designs. Representing control and data flow in a uniform manner is key to efficient scheduling/allocation heuristics that combine the aforementioned optimization techniques under a single framework.
Thanks to its origins in formal specification the HCDG constitutes a formal framework on which HLS design activities can be optimized and freed from the negative effects of structural syntactic variance (i.e nesting, order).
Though benchmark results are a good indication on the interest of the proposed approach, further refinement and validation on larger designs is needed. To this end the CODESIS interactive synthesis tool has been developed.
Permission to make digital or hard copies of part or all of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. To copy otherwise, to republish, to post on servers, or to redistribute to lists, requires prior specific permission and/or a fee.
ISSS 2000, Madrid, Spain
© 2000 IEEE 1080-1082/00 $10.00
2. The HCDG Internal Design Representation
The HCDG [20] is a special kind of directed graph that represents data and control dependencies from a uniform dataflow perspective. It consists of the Conditional Dependency Graph (CDG) and the Guard Hierarchy (GH). For better illustration, the notions of the HCDG a small example will be used throughout this paper. Taken from [8], its CDG-like representation is shown in figure 1 and its HCDG in figure 2. For details on the HCDG construction process, the interested reader is referred to [19].
The HCDG obeys the principle of static single assignment. Nodes may have more than one definition only under mutually exclusive conditions (e.g. !u). In table 1 the guard definitions for the example are given.
<table>
<thead>
<tr>
<th>Guard</th>
<th>Boolean Definition</th>
<th>Guard</th>
<th>Boolean Definition</th>
</tr>
</thead>
<tbody>
<tr>
<td>H₃₂</td>
<td>i</td>
<td>H₃₆</td>
<td>y - T₁</td>
</tr>
<tr>
<td>H₂</td>
<td>y</td>
<td>H₇</td>
<td>y - T₁</td>
</tr>
<tr>
<td>H₃</td>
<td>y y T₂ y T₃ y T₄ y T₅ y T₆</td>
<td>H₈</td>
<td>y - T₁</td>
</tr>
<tr>
<td>H₄</td>
<td>y y T₁ y T₄ y T₅ y T₆</td>
<td>H₉</td>
<td>y - T₁</td>
</tr>
<tr>
<td>H₅</td>
<td>y y T₁ y T₄ y T₅ y T₆</td>
<td>H₁₀</td>
<td>y - T₁</td>
</tr>
</tbody>
</table>
Table 1. Guard Definitions
2.1. Formal Semantics and the Guard Hierarchy
Initially, the HCDG was developed as internal representation of systems described in the SIGNAL synchronous formal specification language, used for the specification of reactive, real-time systems. The interested reader is referred to [25] for more details. Being so it dispose of a formal calculus that allows for the compile time proof of correctness properties as well as the definition of correctness-preserving graph transformations useful in optimizing the synthesis results [24].
In a discrete time model where time is considered as an infinite sequence of logical instants, a guard is the set of logical instants that the boolean condition defining it, evaluates to true. The theoretical foundations of the HCDG consider guards as sets and guard formulas as application of set operations on these sets. In [21] it is shown how an equivalent representation of guard formulas as boolean functions can be obtained and vice-versa. Guards are equivalence classes of the HCDG nodes grouping together nodes labeled by the same guard, thus active at the same logical instants.
The guard nodes of a HCDG are organized in a Guard Hierarchy (GH) which is a hierarchical tree-like, representation of the design control (figure 2, bottom). The GH represents the inclusion relation between guards.
Inclusion relation. Let denote by $h_i$ the boolean function corresponding to guard $H_i$. $h_i$ evaluates to true when-
The rest of nodes (ovals) correspond to operations (i/o, computation, data multiplexing and state storage with either register or transparent latch semantics) that compute/assign values to variables. I/O node names are prefixed by !/ respectively.
Edges represent control and data dependencies. Control dependencies (most of them omitted in figure 2 for readability reasons) are from guard nodes to the CDG nodes labelled by them and are represented by dashed arrows. Solid arrows represent data (computation) dependencies.
Process jian(a, b, c, d, e, f, g, x, y)
in port a[8], b[8], c[8], d[8], e[8], f[8], g[8];
in port x, y;
out port u[8], v[8];
- static T1;
- static T2[8], T3[8], T4[8], T5[8];
- T1 = (a + b) < c;
- T2 = d + e;
- T3 = c + 1;
- if (y)
- if (T1) u = T3 + 2 d; /*u1*/
- else if (x) u = T2 + 2 d; /*u2*/
- if (!x) u = T3 + 2 e; /*u3*/
- else{
- T4 = T3 + e;
- T5 = T4 + 2 f;
- u = T5 + g; /*u3*/
- }
---
Theoretical foundations of the HCDG were given from the formal calculus approach [21]. In [25], it was shown how an equivalent representation of guard formulas as boolean functions can be obtained and vice-versa. Guards are equivalence classes of the HCDG nodes grouping together nodes labeled by the same guard, thus active at the same logical instants.
The guard nodes of a HCDG are organized in a Guard Hierarchy (GH) which is a hierarchical tree-like, representation of the design control (figure 2, bottom). The GH represents the inclusion relation between guards.
Inclusion relation. Let denote by $h_i$ the boolean function corresponding to guard $H_i$. $h_i$ evaluates to true when-
ever $H_i$ is present otherwise to false. The inclusion relation represented by the tree like structure of GH simply states that: $\forall (H_j \in \text{descendants}(H_i)) \Rightarrow H_j \subseteq H_i$. Using the boolean definitions the inclusion relation between two guards will be denoted as: $H_2 \subseteq H_1 \Rightarrow h_2 \subseteq h_1$. In addition, inclusion can be extended to the following cases:
$$H_k = H_i \cup H_j \Rightarrow H_k \subseteq H_i, H_k \subseteq H_j$$
$$H_k = H_i \cap H_j \Rightarrow H_k \subseteq H_i, H_k \subseteq H_j$$
In [21], the guard hierarchy is implemented as a hierarchy of BDD’s. Control representations based on BDD’s have already been used in previous work ([15], [4], [5]). The originality of the GH lies on the hierarchy construction and not at the use of BDD’s which are simply used for their efficiency. Using BDD’s two things can be efficiently achieved. First, equivalence between guard formulas can be easily established to avoid redundancy. Second, during hierarchization, it is easy to find the maximum depth in the tree that a guard node can be inserted, by means of a special factorization algorithm (see [21] for details). This yields an optimally refined inclusion hierarchy.
The some of the advantages of using the inclusion hierarchy information will be shown later on. Briefly, it permits to minimize the number of mutex tests [19] in guard exclusiveness detection used for conditional resource sharing especially useful in interactive design environments where speed is important. The hierarchy also enables the development of probabilistic priority functions used in HCDG based list scheduling that efficiently account for conditional behavior [24]. Finally, in [20] it is shown that guard inclusion information is very important in order to triangularize a larger number of systems of guard equations than it would be possible by using a rewriting system based only on the axioms of boolean algebra.
### 2.2. Efficient static mutual exclusiveness detection
Mutual guard exclusiveness will be noted by $\otimes$. Since in the formal foundations of the HCDG guards are sets of logical instants, two guards are mutually exclusive if their intersection is empty: $(H_1 \cap H_2 = \emptyset) \Leftrightarrow H_1 \otimes H_2$. In terms of the guard boolean function representations the above translates to: $h_1 \otimes h_2 = false \Leftrightarrow H_1 \otimes H_2$, which is the mutex test of [15].
Guard inclusion, as shown in [19], permits to minimize the number of mutual exclusion tests significantly. This optimization relies on the following proposition: Let $\text{subhier}(H) = \text{descendants}(H) + \{H\}$ then:
$$H_1 \otimes H_2 \Rightarrow \forall ((H_i, H_j)) \in \text{subhier}(H_1) \times \text{subhier}(H_2), H_i \otimes H_j$$
meaning that if two guards $H_1, H_2$ are mutually exclusive then every guard in the sub-hierarchy of $H_1$ is mutually exclusive to every guard in the sub-hierarchy of $H_2$.
A set of benchmarks was used for the experimental evaluation of the mutual exclusiveness identification capabilities of the proposed approach compared to the methods of [26], [8], which are the most powerful methods so far in terms of coverage and insensitivity to syntactic variance. The benchmark from [19], was included to test the capabilities of our approach to reason on conditions defined by simple arithmetic relations [23]. Two semantically equivalent but syntactically different descriptions for each benchmark were used (desc.1, desc.2). The first, has a maximal conditional nesting as opposed to second one where conditions are flattened and each assignment is in its own conditional block. The results in the table below show that our method has at least as much coverage as the other two methods for a smaller number of mutex tests.
### 2.3. Mutual exclusiveness representation
Guard mutual exclusiveness is represented by a compatibility graph, MEG for Mutual Exclusiveness Graph, where vertices represent guards and edges the mutual exclusiveness relation between the guards connected by the edge. For the example the resulting MEG is shown in figure 3. Cliques in the MEG correspond to groups of pairwise mutually exclusive guards. Depending on the resource sharing context (FUs, registers, interconnects) each vertex has an associated list of specification objects.
being active under this guard and can be allocated to a resource of that type. For instance, during scheduling such a structure permits to easily find groups of mutually exclusive operations that may share the same functional unit of a specific type.
In [22] it is argued that the best adapted algorithm to find such cliques is based on the initial-graph-partition algorithm presented in [13]. Other heuristics e.g. [14] are not as well adapted to satisfy our clique construction objectives since clique maximality is not always a good optimization criterion when scheduling is considered.

Amongst other applications HCDGs and guard exclusiveness have also been used to false path identification (see [23] for more details) useful in path-based scheduling heuristics as well as more accurate static timing analysis.
### 2.4. Optimization by HCDG transformations
Constructing the HCDG reflects the way the design is described by the designer. Applying graph transformations semantically equivalent representations are produced. Using guard information transformations like dead code elimination, code motions, node duplication, path length reduction by dependency rearrangement, etc. can be easily performed. In our approach, transformations are of two types; pre- and post-scheduling.
The objective of pre-scheduling transformations is to remove syntactic variance and bring the HCDG into a form that will eventually yield better scheduling results. Such transformations include, lazy execution guard transformation to increase conditional resource sharing possibilities, dependency rearrangement and node duplication at mutually exclusive guards to shorten path lengths.
The term lazy execution is used to denote the situation when a node produces a value only as often as this value is used by other nodes. Computing the appropriate node guards for lazy execution may introduce additional guards in the guard hierarchy and some control paths may become longer. However the transformed graph contains more conditional resource sharing possibilities and in a scheduling scheme where conditional resource sharing is combined to speculative execution this lengthening of control paths can be effectively amortized. Finally, in certain cases where the result of a node is used at mutually exclusive guards the node can be duplicated at these guards without increasing hardware costs since the duplicated operation nodes are mutually exclusive and may share the same resource during scheduling. Post-scheduling transformations, incorporate scheduling information (i.e. conditional resource sharing and speculative execution) into the HCDG and so the transformed graph can be used in subsequent scheduling iterations or post-scheduling high-level synthesis activities (i.e. allocation/binding etc.).
Comparing figure 2 to figure 4, in the HCDG of the example the initial node guards were modified to enforce lazy node execution (e.g. $+1, +2, +3, <$ initially labelled by guard $H_1$). Also, the node $+3$, used under mutually exclusive conditions ($H_6 \otimes H_3$), was duplicated to shorten the control paths. The data merge node (triangle $u$) is introduced to enforce the single assignment principle for variable $u$ (in the behavioral description) which has multiple definitions ($u_1, u_2, u_3$) under mutually exclusive conditions, represented by guards $H_3, H_6, H_{10}$ respectively.

### 3. HCDG based List Scheduling Heuristic
In this section a modified list scheduling heuristic that takes advantage of the HCDG features, is described. One important advantage of list scheduling is that its quality depends on the choice of the priority function [1]. In [22] we exploit the guard hierarchy to define a probabilistic priority function that better accounts for the conditional nature of the design. This is combined to an intelligent scheduling policy that employs pre-scheduling optimizing transformations (lazy execution, node duplication), conditional resource sharing and speculative execution.
This process has several advantages. The list scheduling priority criterion is satisfied for the greatest number of distinct execution instances (paths) simultaneously because the constructed cliques for conditional resource
sharing contain always the highest priority node and the largest number of other higher priority nodes that can share a resource with it. In respect to [9] and [5], speculative execution is considered only after normally executing nodes have been scheduled. In this way the risk of lengthening execution paths by displacing normally executing operations in favor of speculatively executing ones, is avoided. Finally, conditional resource sharing is exploited during scheduling and not before and so lengthening of execution paths due to inappropriate conditional resource sharing (i.e. [2], [11]), is also avoided.
3.1. Experimental results
The HCDG-based list scheduling heuristic is compared to other similar heuristics (Kim [2], CVLS [7], [3], PBS [6], Brewer [5], ADD-FDLS [9]) using benchmarks appearing in previous work (kim, waka, maha, jian from [2], [7], [12], [8] respectively). For each benchmark the HCDG was constructed, the guard hierarchy was refined, the HCDG was transformed for lazy execution and guard mutual exclusiveness was established using the techniques described in [18]. Results are given in the tables 2 to 5, for various resource constraints (cmp+/−/ one cycle resources) and chaining length (cn: 1, means no chaining) in terms of “total / longest path / shortest path” numbers of states.
### Table 2. Results for the “maha” benchmark
<table>
<thead>
<tr>
<th>Resources</th>
<th>Kim</th>
<th>PBS</th>
<th>crit. path</th>
<th>Brewer</th>
<th>ours</th>
</tr>
</thead>
<tbody>
<tr>
<td>cmp: 0, +: 1, −: 1, cn: 1</td>
<td>8/8/3</td>
<td>-</td>
<td>-</td>
<td>5/5/4</td>
<td></td>
</tr>
<tr>
<td>cmp: 0, +: 1, −: 1, cn: 2</td>
<td>6/5/2</td>
<td>9/5/2</td>
<td>8/6/6</td>
<td>-</td>
<td>5/5/4</td>
</tr>
<tr>
<td>cmp: 0, +: 2, −: 3, cn: 1</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>4/4/2</td>
<td></td>
</tr>
<tr>
<td>cmp: 0, +: 2, −: 3, cn: 3</td>
<td>3/3/2</td>
<td>-</td>
<td>4/4/2</td>
<td>-</td>
<td>3/3/2</td>
</tr>
<tr>
<td>cmp: 0, +: 2, −: 3, cn: 5</td>
<td>-</td>
<td>4/3/1</td>
<td>-</td>
<td>3/3/2</td>
<td></td>
</tr>
</tbody>
</table>
### Table 3. Results for the “waka” benchmark
<table>
<thead>
<tr>
<th>Resources</th>
<th>CVLS</th>
<th>Kim</th>
<th>PBS</th>
<th>Brewer</th>
<th>ours</th>
</tr>
</thead>
<tbody>
<tr>
<td>cmp: 1, +: 1, −: 1, cn: 1</td>
<td>7/7/5</td>
<td>7/7/5</td>
<td>-</td>
<td>-</td>
<td>7/7/4</td>
</tr>
<tr>
<td>cmp: 1, +: 1, −: 1, cn: 2</td>
<td>-</td>
<td>-</td>
<td>8/7/3</td>
<td>-</td>
<td>6/6/3</td>
</tr>
<tr>
<td>cmp: 1, ALU: 2, cn: 1</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>7/7/4</td>
<td></td>
</tr>
<tr>
<td>cmp: 1, ALU: 2, cn: 2</td>
<td>-</td>
<td>-</td>
<td>8/6/5</td>
<td>8/6/5</td>
<td>-</td>
</tr>
</tbody>
</table>
### Table 4. Results for the “kim” benchmark
<table>
<thead>
<tr>
<th>Resources</th>
<th>Kim</th>
<th>Brewer</th>
<th>ADD</th>
<th>ours</th>
</tr>
</thead>
<tbody>
<tr>
<td>cmp: 2, +: 2, −: 1, cn: 1</td>
<td>8/8/6</td>
<td>-</td>
<td>6/6/5</td>
<td>6/6/6</td>
</tr>
<tr>
<td>cmp: 1, +: 2, −: 1, cn: 1</td>
<td>-</td>
<td>-</td>
<td>6/6/6</td>
<td>-</td>
</tr>
<tr>
<td>cmp: 2, ALU: 2, cn: 1</td>
<td>-</td>
<td>-</td>
<td>-</td>
<td>6/6/6</td>
</tr>
</tbody>
</table>
### Table 5. Results for the “jian” benchmark
<table>
<thead>
<tr>
<th>Resources</th>
<th>ours (cn=1)</th>
<th>ours (cn=2)</th>
</tr>
</thead>
<tbody>
<tr>
<td>cmp: 1, +: 1, cn: 1</td>
<td>4/4/5</td>
<td>4/4/5</td>
</tr>
<tr>
<td>cmp: 1, +: 2, cn: 1</td>
<td>4/4/2</td>
<td>3/3/2</td>
</tr>
</tbody>
</table>
### Table 6. Insensitivity to syntactic variance
<table>
<thead>
<tr>
<th>Bench.</th>
<th>cmp: 1</th>
<th>cmp: 2</th>
<th>ALU: 2</th>
<th>cmp: 0</th>
<th>cmp: 0</th>
<th>ALU: 2</th>
<th>cmp: 1</th>
<th>cmp: 1</th>
</tr>
</thead>
<tbody>
<tr>
<td>maha</td>
<td>7/7/4</td>
<td>9/7/4</td>
<td>9/5/2</td>
<td>5/5/2</td>
<td>4/4/2</td>
<td>6/6/6</td>
<td>6/6/6</td>
<td>4/4/4</td>
</tr>
</tbody>
</table>
4. The CODESIS tool
In order to validate our results in more realistic contexts and quantitatively evaluate the effectiveness of the HCDG and the HCDG-based heuristics the CODESIS interactive CAD tool has been developed. Currently the specification front-end is the SIGNAL formal specification language but in the future other standard descriptions languages (e.g. C, VHDL) will be supported. Translation of the HCDG into C and VHDL already exists and allows us to interface to existing implementation tools like software compilers and hardware synthesis (behavioral and RTL) tools. A graphical user interface permits to visualize the HCDG, interactively apply graph transformations, scheduling heuristics and visualize the obtained results.
CODESIS screenshots in figure 5 and figure 6 show conditional resource sharing for functional units and registers used in scheduling and register allocation algorithms, for the example. In figure 7 the automatically derived and optimized control FSM is shown.
The design and development of the tool are entirely object oriented in Java allowing for easy extensions and incorporation and use of new features in a plug and play fashion. For instance, new scheduling heuristics can be introduced, different priority functions can be tested, pre- and post-scheduling transformations can be applied in variable order etc. Due to its interactivity, extensibility and visualization capabilities, this tool will be very useful for research, experimentation and educational purposes.
5. Conclusions
The HCDG is a powerful internal design representation with the ability to treat both data-flow and control-flow designs under the same framework. Techniques and heuristics developed for data-flow oriented designs can be readily adapted for the HCDG. In addition several others have been developed to tackle the problems related to control-flow intensive designs.
The HCDG-based scheduling approach exploits most of the existing scheduling optimization techniques, enjoying their combined benefits. Both speculative execution and conditional resource sharing are combined in a uniform and consistent framework similarly to dynamic CV's of [3] and guards in [4], [5]. Even more, it does not suffer from effects of syntactic variance at both the mutual exclusiveness detection and scheduling levels, as CDFG or CFG based approaches do. The hierarchical control representation permits to minimize the number of mutual exclusiveness tests and also develop probabilistic priority functions that account for the conditional nature of the design.
Finally, to test our ideas in more realistic contexts a user friendly HLS tool has been built using the HCDG as its internal representation.
References
|
{"Source-Url": "https://hal.archives-ouvertes.fr/file/index/docid/545528/filename/isss00.pdf", "len_cl100k_base": 6352, "olmocr-version": "0.1.53", "pdf-total-pages": 7, "total-fallback-pages": 0, "total-input-tokens": 23825, "total-output-tokens": 7854, "length": "2e12", "weborganizer": {"__label__adult": 0.0005054473876953125, "__label__art_design": 0.0007963180541992188, "__label__crime_law": 0.0005254745483398438, "__label__education_jobs": 0.0006604194641113281, "__label__entertainment": 0.00014019012451171875, "__label__fashion_beauty": 0.00026035308837890625, "__label__finance_business": 0.0003962516784667969, "__label__food_dining": 0.00045371055603027344, "__label__games": 0.0007791519165039062, "__label__hardware": 0.00901031494140625, "__label__health": 0.0007085800170898438, "__label__history": 0.0004029273986816406, "__label__home_hobbies": 0.0002027750015258789, "__label__industrial": 0.0016613006591796875, "__label__literature": 0.00025844573974609375, "__label__politics": 0.0005006790161132812, "__label__religion": 0.0008063316345214844, "__label__science_tech": 0.2763671875, "__label__social_life": 9.697675704956056e-05, "__label__software": 0.008575439453125, "__label__software_dev": 0.69482421875, "__label__sports_fitness": 0.0003974437713623047, "__label__transportation": 0.0011644363403320312, "__label__travel": 0.0002856254577636719}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28336, 0.06501]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28336, 0.50124]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28336, 0.85357]], "google_gemma-3-12b-it_contains_pii": [[0, 1158, false], [1158, 5719, null], [5719, 10017, null], [10017, 14392, null], [14392, 18733, null], [18733, 22651, null], [22651, 28336, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1158, true], [1158, 5719, null], [5719, 10017, null], [10017, 14392, null], [14392, 18733, null], [18733, 22651, null], [22651, 28336, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28336, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28336, null]], "pdf_page_numbers": [[0, 1158, 1], [1158, 5719, 2], [5719, 10017, 3], [10017, 14392, 4], [14392, 18733, 5], [18733, 22651, 6], [22651, 28336, 7]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28336, 0.20231]]}
|
olmocr_science_pdfs
|
2024-12-08
|
2024-12-08
|
67e175ebdc9ad7b0288de8ce7fd7341eaebe2c12
|
An Aspect-Oriented Approach to Consistency-Preserving Caching and Compression of Web Service Response Messages
Wubin Li, Johan Tordsson, and Erik Elmroth
Department of Computing Science and HPC2N
Umeå University
Umeå, Sweden
{wubin.li, tordsson, elmroth}@cs.umu.se
Abstract—Web Services communicate through XML-encoded messages and suffer from substantial overhead due to verbose encoding of transferred messages and extensive (de)serialization at the end-points. We demonstrate that response caching is an effective approach to reduce Internet latency and server load. Our Tantivy middleware layer reduces the volume of data transmitted without semantic interpretation of service requests or responses and thus improves the service response time. Tantivy achieves this reduction through the combined use of caching of recent responses and data compression techniques to decrease the data representation size. These benefits do not compromise the strict consistency semantics. Tantivy also decreases the overhead of message parsing via storage of application-level data objects rather than XML-representations. Furthermore, we demonstrate how the use of aspect-oriented programming techniques provides modularity and transparency in the implementation. Experimental evaluations based on the WSTest benchmark suite demonstrate that our Tantivy system gives significant performance improvements compared to non-caching techniques.
Keywords- Web Services; response caching; consistency-preservation; hashing; data compression; aspect-oriented programming;
I. INTRODUCTION
Reducing the response time of costly remote service invocations in Web Services environments is a critical challenge in many real life scenarios. Our previous work [1] describes a consistency-preserving mechanism for Web Service response caching that reduces the volume of data transmitted without semantic interpretation of service requests or responses and thus improves the service response time. It achieves this reduction through the use of hashing to detect similarities with previous results. In this paper, we propose an aspect-oriented middleware solution called Tantivy that extends on our previous results. Unlike previous approaches that exploits the structure of cache entry [2], Tantivy treats each SOAP message as a black box and converts it into application-specific data object (henceforth referred to as an application object) before it is cached. Moreover, Tantivy reduces the size of the data that is transmitted between client and services through the use of data compression techniques.
A Web Service framework that supports caching (as opposed to having to add this functionality in each application that invokes the services) is particularly beneficial as this enables the application developer to ignore caching issues [3]. Some earlier research on how to transparently add caching to an application exists [4], [5], but these efforts ignore the consistency aspect. Other solutions provide consistency, but ignore transparency and thus require substantial effort in implementation of caching-enabled applications [6]. In our Tantivy solution, we keep caching and data compression transparent by casting these as aspects and use an AOP framework to add a proxy layer that intercepts Web Service messages.
We evaluate the performance of our solution with the WSTest [7] benchmark suite. Our results show that Tantivy can improve performance significantly, especially for Web Services that communicate with large amounts of data and/or commonly exchange the same messages.
In summary, our contributions are the following:
1) We investigate how textual data compression techniques and optimized cache representation can further improve response time in an existing solution [1] for consistency-preserving Web Service response caching.
2) We demonstrate that caching of Web Service responses can be considered a crosscutting aspect and illustrate how AOP methods can be used to achieve simple, yet flexible, development of out system.
3) We evaluate the performance of our proposed system through industry standard benchmarks and demonstrates that our approach achieves significant performance improvements.
The rest of this paper is organized as follows. Section II describes background information about Web Services, data compression techniques and AOP. Section III introduces the proposed mechanism and describes its overall structure. Section IV describes the implementation of the Tantivy system using AOP techniques and discusses the related application transparency aspects. Section V presents the experimental evaluation. Finally, some conclusions are presented in Section VI followed by a presentation of continued and future work, acknowledgments, and a list of references.
II. BACKGROUND AND RELATED WORK
A. Web Services
In Web Service environments, SOAP provides interoperability between the clients and the service. However, as the client and the hosted services are connected by a network and communicate through XML-encoded messages, substantial overhead is induced due to (de)serialization. Special care must hence be taken to reduce the response latency for Web Service invocations, and thus improve service throughput. In this paper, we adopt caching and data compression techniques to address this performance issue of Web Services invocations.
B. Web Service Response Caching
Research on remote object caching for distributed systems [8] has caught substantial attention, including efforts that target CORBA [9], SOAP objects [10], and Java RMI [11]. An efficient response cache mechanism appropriate for the Web Services architecture is proposed by Takase et al [12]. This mechanism reduces the overhead of XML processing and application object copying by optimized data representation. However, consistency problems are not addressed. Instead, the discussion is based on the assumption that it is the responsibility of the client application to avoid consistency problems by configuring a short enough time-to-live for each operation. It should be remarked that this approach also depends on the service semantics.
The SigsitAccelerator solution [1] tackles the consistency problem through the use of cryptographical hashing [13]. We extend on this approach and add two additional techniques to further improve performance, textual data compression and optimized data representation of cached entries. To avoid implementations of caching features that are hard-coded in the client and the service side, AOP techniques are applied to improve software modularization, reusability, transparency, and portability.
C. XML Compression and SOAP Performance
XML, the foundation of the SOAP protocol, is a self-descriptive textual format for structured data. XML provides a good basis for interoperability and facilitates the adaptation of services, but it is also renowned to be verbose. This verbosity, mainly due to the excessive use of markup and metadata, can cause problems due to communication and processing overhead in resource-constrained environments such as small wireless devices and in environments with network limitations. Fortunately, the impact of this verbosity can be alleviated through the use of text compression techniques. According to a summary [14], three categories of compression algorithms can be used to reduce the verbosity of XML: general-purpose compression agnostic of XML, algorithms based on the general knowledge that the data is XML-based, and techniques that take advantage of the schema used for the particular XML documents to be compressed. Tantivy does not depend on any specific compression algorithm. While we currently use GZip [15], a general-purpose compression algorithm for textual data, replacing this with another compression algorithm is straight-forward.
D. Aspect-Oriented Programming
Aspect-Oriented Programming (AOP) is a programming methodology for separating crosscutting concerns (behaviors that cut across the typical divisions of responsibility, such as logging and caching) into single units called aspects. An aspect is a modular unit that provides a functionality with crosscutting concern. It encapsulates behaviors that affect multiple classes into reusable modules. Through weaving rules specified by the developer, aspects are incorporated to form the final system. As a result, a single aspect can contribute to the implementation of multiple methods, modules, or classes, and can thus increase both the reusability and the maintainability of the code.
Figure 1. Conceptual illustration of how weaving rules incorporate a caching aspect into an application to make it caching-enabled.
Figure 1 shows the basic principle of adding caching transparently to an application through aspect weaving. AOP allows us to dynamically modify the static model to include the code required to fulfill the secondary requirements without having to modify the original model. Better still, we can often keep this additional code in a single location rather than having to scatter it across the existing model, as we would have to if we were using object-oriented techniques only. There is a considerable number of technologies that support the AOP paradigm. We choose AspectJ [16] for Tantivy as it is among the most mature and full-featured frameworks available today.
III. THE TANTIVY SOLUTION
A. Main Principles of Tantivy
The two aspects of Web Service communication overhead are data representation and data transfer. Each of these provides an opportunity to reduce communication overhead. In the context of Web Services, data representation requires the transformation of application data into internal representations in the form of XML Infosets [17]. The type of
representation determines the amount of data that has to be transmitted. One additional transformation required is that from the internal representation of what should be sent, to something that actually can be sent. For Web Services, this means that the internal XML Infoset representation is serialized into an XML document before it is transferred over the network.
As Web Services are platform neutral and thus cannot depend on a specific wire protocol, it is not possible to affect the data transfer step. This leaves us with only one opportunity for communication performance improvements, namely data representation. In this paper, we adopt two optimization methods to improve the performance of Web Service responses. The first improvement is caching of the post-parsing representation (application object) instead of the XML message itself in the client side. The second optimization method is to reduce the size of the data representation transmitted over the network through caching and data compression techniques.
### B. Proxy-Based Caching
To guarantee consistency, a caching layer is added to the Web Service framework. This layer is provided through AOP techniques instead of hard-coded implementations in the service engine. The Tantivy Client shown in Figure 2(b) is a lightweight component that mediates communication between the Web Services client and the remote Tantivy Proxy. It forwards requests from the Web Services client, buffers the entire results, and responses to the Web Services client to acquire the results. The Tantivy Client provides the ability to retrieve results from hash-based descriptions (digest) sent by the proxy by maintaining an in-memory cache of recently received results.

**Figure 2. Conceptual overview of the Tantivy architecture as compared to a native Web Service.**
The Tantivy Proxy shown in Figure 2(b) does not examine any request messages received from the Tantivy Client but directly forwards them to the Web Service. Instead, the proxy is responsible for inspecting response results received from the Web Service provider. The proxy rapidly generates hash-based encodings of the results and caches these encodings. If the results are similar to previous ones, only the hash digests are sent to Tantivy. Note that the proxy does not need to keep the actual response messages but only the digests. This enables the proxy to scale well also when many clients are using the same service.
### C. Results Handling at Service and Client Side
Figure 3 shows the dataflow for results handling at the Tantivy Proxy side. The Tantivy Proxy first receives response results from the Web Services provider, and then checks the size of the result. If the size of the result is less than a threshold value (e.g. 200 KB), the proxy does not generate a hash digest of that result, but forwards it directly to Tantivy Client.

**Figure 3. Dataflow for results handling at the Tantivy Proxy side.**
Otherwise, the proxy generates a digest of the result. Tantivy does not depend on any specific hash function. Modern hash functions [13] computes hash digests very fast. The size of the digest depends on the hash function used, but is in general much smaller than the size of the original response. In our prototype, we currently use SHA-1 [18] as the hash function and the size of each hashed result is thus 160 bits.
The next step is to check whether the hashed result already is stored in the cache. If so, the client has requested this result before and the proxy only needs to transmit the hashed result. Otherwise, the hashed result is new and the Tantivy Proxy stores it in the cache. The proxy also compresses the original response message to a compact one before finally transmitting it to the client side. This way, large messages are always compressed and the amount of data transmitted over the network is reduced even if cache misses occur at the Tantivy Proxy side.
Figure 4 shows the overall dataflow in the Tantivy Client. The first step in the client is to inspect the type of a result received from the Tantivy Proxy. If the result message is a hash digest, Tantivy retrieves the stored response result from cache through the use of the received hash digest as key.
Otherwise, Tantivy checks whether the result is compressed and if so, the result is decompressed to the original one. Next, the response result is stored in cache with the hash digest as key before it is finally passed to the Web Service client.
At the client side, the data representation for cached data is made efficient by deserializing responses only once and storing the resulting application objects in the cache. This way, upon a cache-hit, the client can immediately fetch the application object from cache without any parsing or deserialization process, and the response latency is further reduced. In detail, before delivering a response message to the client, the response result is converted to an application object in advance. This process is fulfilled by an XML parser, which can be based either on DOM [19] or SAX [20]. If it is a DOM parser, a DOM tree object, as the post-parsing representation, is created from the XML message. If the parser is a SAX parser, the SAX parser reads the XML documents and notifies the deserializer of the SAX events sequentially. The deserializer constructs the application objects from the DOM tree object or the SAX events sequence. As the parsing and deserialization of XML messages constitutes a large part of the Web Services overhead, caching of application objects instead of XML objects can significantly improve the performance of service response caching.
IV. DESIGN AND IMPLEMENTATION
Aspect-oriented programming provides an elegant approach to perform caching by treating it as concern that cuts across the application. We choose Codehaus XFire [21] as the Web Service framework to build Tantivy on and add a caching aspect as a new feature. XFire, which has been merged with the Celtix project [22] to constitute Apache’s CXF incubation project [23], is a Java SOAP framework. It facilitates service-oriented development through its easy to use API and support for standards such as WS-I [24]. We have also made similar experiments with other frameworks such as Axis [25], Axis2 [26] and JBossWS [27], and we believe that all of these frameworks could be used to implement Tantivy through AOP techniques.
We choose AspectJ as the AOP framework. AspectJ is a seamless aspect-oriented extension to the Java programming language that enables clean modularization of crosscutting concerns. The AspectJ language exposes a set of join points that are well-defined places in the execution of a Java program flow.
```java
public aspect Tantivy{
//Crosscutting actions for cryptographic hashing
pointcut hashing(MessageContext context):
Object msg=context.getDataContextProperty
{ PostInvocationHandler.RESPONSE_VALUE }
if( msg.size() > Threshold )
{ //Compute the hash key for the message.
Object key=Cryptographic_Hashing(msg);
if( IsCacheHit(key) )
{ //Only need to send the key.
msg->key;
}
else{
//Save message in the cache.
Save2Cache(key,msg);
//Compress message to compact data.
Object compactData=
Data_Compression(msg);
msg.compactData;
}
}
//Message size is too small,
//no need to handle it.
}
}
```
Figure 5. Code example that outlines the caching aspect in the Tantivy Proxy.
Figure 5 shows the implementation of the Tantivy Proxy through a pointcut (a piece of code that can be inserted at a join point) and advice declaration in the AspectJ language. This code follows the dataflow process described in Figure 3 (a). The code includes a pointcut called `hashing` that defines the execution of the `sendMessage` method in the `ServiceInvocationHandler` class (or any subclass) that takes a first argument of type `MessageContext` (lines 3-7 in Figure 5). This pseudocode also defines an advice that executes posterior to the specified pointcut (the `sendMessage` method at Line 10).
The client side of the Tantivy system (see Figure 3 (b)) is similar. Note that the pointcuts and advices that define the weaving rules to be applied are specified as entities separate from the individual aspect modules. The weaving of the final system from individual aspects is performed by the AspectJ compiler, *ajc*.
V. EXPERIMENTAL EVALUATION
A. Web Service Benchmarking
The WSTest [7] benchmark suite is used to evaluate the performance of the Tantivy framework. These benchmarks are developed by Sun Microsystems and later extended by Microsoft. WSTest is designed to measure the performance of various types of Web Services calls and includes the following benchmarks:
- **EchoVoid** - sends and receives an empty message. This benchmark performs neither serialization nor deserialization.
- **EchoStruct** - receives an array of arbitrary length as input parameter and returns this array. The structures in the array contain one element each of type integer, float, and string. The longer the array, the more work is required in deserialization and re-serialization of the SOAP object to and from XML.
- **EchoList** - sends and receives a linked list of any size, where each element in the list consists of the same structure as used in EchoStruct.
- **EchoSynthetic** - sends and receives a structure that contains a byte array of varying length.
- **GetOrder** - simulates a request for a complete purchase order for an e-commerce service. This benchmark takes three integer input parameters and returns an order object. The order object is a complex structure that includes order header information, a customer structure with shipping address and billing address structures, as well as any number of objects.
WSTest (version 1.5) consists of a multi-threaded application that performs multiple Web Service calls in parallel in order to simulate a real life scenario with multiple clients that access the services. To avoid the overhead of other platform components, the Web Service operations perform no business logic but simply return the input parameters. WSTest measures the throughput of a system handling multiple types of Web Service invocations. The notion of a Web Service invocation here corresponds to one request-response cycle. WSTest reports the throughput (average number of Web Service invocations executed per second) and the response time (average time it takes to process a request). These metrics are reported separately for each of the five operations.
B. Experimental Setup
The experimental setup consists of a client side extended with a Tantivy Client module. This client has five threads, one for each benchmark. The other part in the setup is the server side that implements the WSTest services. The server side is extended with a Tantivy Proxy module. The two sides have identical system configurations, shown in Table I.
The client side and the services are connected by a network router that allows us to control the bandwidth and latency settings on the network. We focus our evaluation on three network configurations; 5 Mb/s, representative for severely constrained network paths, 20 Mb/s, representative for moderately constrained network paths, and 100 Mb/s, representative for unconstrained networks. The last setup is used to investigate any potential overhead of Tantivy in situations where bandwidth is not a limiting factor.
Each client submits a mix of invocations, with 20% of the calls for each of the five benchmarks. After a warmup period of 300 seconds, each client thread initiates invocations of the benchmark services as per the defined mix. A new invocation is started as soon as the prior one is completed. The number of invocations executed and the response time is accumulated during a steady state period of 600 seconds and is reported at the end of the execution. Moreover, invocations during each execution have a certain repetition rate. For example, in our second experiment, 8% of the invocations are duplicate. For repeating invocations the same request parameters are used and the response results from the server are thus the same. Steering this repetition rate, i.e., the cache-hit ratio, enables us to study the performance impact of caching in the Tantivy system.
Using this setup, we measured results for various combinations of number of clients, cache-hit ratio, and network bandwidth for the following two configurations:
- The Native configuration, corresponding to Figure 2(a) where no proxy is used.
- The Tantivy configuration, corresponding to Figure 2(b) where the Tantivy layer is used. For a given number of client threads and a certain network bandwidth, comparing these results to the corresponding Native ones investigates the potential performance improvements.
C. Performance Analysis
Figure 6 presents the average number of requests served per second and the average response time for these requests in the scenario with 5 Mb/s bandwidth. There are no duplicate requests during this test run and the cache-hit ratio is thus zero. Due to the computation overhead for hashing and data compression, Tantivy is a bit slower than Native for the EchoVoid, EchoStruct, and EchoSynthetic benchmarks. For the EchoList and EchoOrder tests, both with larger amounts of data being sent, the use of compression in Tantivy results in slightly better performance than Native despite the 0% cache-hit ratio.
When the cache-hit ratio is increased to 8%, we observe in Figure 7 that the benefits of caching balances out the
overhead induced Tantivy and the performance Native and Tantivy is almost identical for the EchoVoid, EchoStruct, and EchoSynthetic benchmarks. Furthermore, for this configuration, Tantivy gives around half the response time of Native for EchoList and EchoOrder. Increasing the cache-hit ration even further to 13 %, we see additional improvements for the EchoList and EchoOrder benchmarks, with Tantivy being almost five times faster than Native for the previous benchmark. As Figure 8 shows, at this higher cache-hit ratio Tantivy results in a significant performance improvement over Native also for the EchoStruct benchmark.
We observe in Figure 9 that for 8 % cache-hit ratio and a 20 Mb/s network, the response times of Native are similar to those of Tantivy, except for the EchoList benchmark, where Tantivy performs substantially better. As caching and data compression are more beneficial for slower networks, we note that the performance improvement of using Tantivy is much higher for 5 Mb/s networks than for 20 Mb/s ones.
As illustrated in Figure 10, Tantivy gives only minor performance improvements over Native for a network with a bandwidth of 100 Mb/s. We thus conclude there is no significant advantage in using Tantivy in a 100 Mb/s network for services that exchange messages with sizes representative for the WSTest experiments. However, we foresee that for very large message sizes, the compression capabilities in Tantivy can improve performance also for high bandwidth networks.
Comparing the relative impact of cache-hit ratio and network bandwidth on Tantivy performance, we observe that the setup with a 5 Mb/s network and a cache-hit ratio of 13% is faster than one with a 20Mb/s network and 8% cache-hit ratio and in fact very close to the performance of Tantivy for a 100 Mb/s network and 8% cache-hit ratio. We thus conclude that the main performance improvement of the Tantivy system is achieved for services where the message exchange patterns result in high cache-hit ratios, whereas the network bandwidth is a minor factor for Tantivy performance.
Figure 11. Throughput and average response time with 8 % cache-hit ratio and a bandwidth of 20 Mb/s and different number of concurrent client threads.
Figure 11 illustrates, for a varying number of client threads, the performance of Native and Tantivy for a 20 Mb/s network and a cache-hit ratio of 8 %. We note that the performance improvement of Tantivy over Native increases with the number of client threads. This suggests that Tantivy is a scalable solution that can improve both response time and throughput for highly loaded services.
VI. CONCLUSIONS AND FUTURE WORK
Web Services have received substantial attention and there is a great deal of industry excitement around the opportunities they provide. Most of the attention today has focused on Web Services architectures, leaving the performance problem of Web Service responses. With applications based on Web Services, site-to-site traffic volume grows significantly, which can be a problem for performance sensitive applications. In this paper, we focus on the response latency issue that arises in Web Services invocations. Our solution demonstrates that the impact of low network performance can be substantially reduced through caching and compression without any compromise to the strict consistency semantics of service response messages.
The essence of our Tantivy architecture is the use of computation at the edges of the Internet to reduce communication overhead in the network. Tantivy uses hashing to detect similarity with previous results and sends either hashes of results or compressed results rather than original results. Our experimental evaluation based with the WSTest benchmark confirms that Tantivy, while conceptually simple, can be highly effective in improving service throughput and response time.
To the best of our knowledge, Tantivy is the first system that combines the use of hash-based techniques, application object data representation, and data compression techniques with caching of response messages to improve Web Service performance. Finally, we foresee several future directions to tackle the performance problem for Web Service responses:
- Web Service cache consistency management protocols would help in avoiding consistency problems and in achieving higher cache-hit ratios.
- The performance of the Tantivy middleware system relies on some parameters, e.g., the compression threshold value mentioned in Section III-C, which must be configured before the system is started. Preferably, these parameters should be adjusted according to the current network conditions. Work on self-tuning approaches may be a solution to address this issue.
- Another interesting topic would be to investigate if the herein proposed technique with SOAP message response caching could be applied also to commonly occurring subelements of a SOAP message, instead of only to whole messages. This would allow further performance improvements for services that frequently exchange a common set of data objects, e.g., shopping basket contents for e-commerce services.
ACKNOWLEDGMENTS
We thank Zhuofeng Zhao, Jun Fang, and the other members of the SIGSIT Laboratory for their contributions to the previous prototype of this work. We are also grateful to the anonymous reviewers for their constructive comments. Financial support is provided by the European Community’s Seventh Framework Programme ([FP7/2001-2013]) under grant agreement no. 215605 (RESERVOIR).
REFERENCES
|
{"Source-Url": "https://people.cs.umu.se/viali/papers/icws2010.pdf", "len_cl100k_base": 5622, "olmocr-version": "0.1.53", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 29015, "total-output-tokens": 7505, "length": "2e12", "weborganizer": {"__label__adult": 0.0003275871276855469, "__label__art_design": 0.00025773048400878906, "__label__crime_law": 0.0003077983856201172, "__label__education_jobs": 0.0003237724304199219, "__label__entertainment": 5.996227264404297e-05, "__label__fashion_beauty": 0.00014495849609375, "__label__finance_business": 0.00019216537475585935, "__label__food_dining": 0.00031495094299316406, "__label__games": 0.00030922889709472656, "__label__hardware": 0.0008702278137207031, "__label__health": 0.0005164146423339844, "__label__history": 0.00020575523376464844, "__label__home_hobbies": 6.097555160522461e-05, "__label__industrial": 0.0003085136413574219, "__label__literature": 0.00020134449005126953, "__label__politics": 0.00020956993103027344, "__label__religion": 0.0003883838653564453, "__label__science_tech": 0.020172119140625, "__label__social_life": 7.82012939453125e-05, "__label__software": 0.0075531005859375, "__label__software_dev": 0.96630859375, "__label__sports_fitness": 0.00026154518127441406, "__label__transportation": 0.0004367828369140625, "__label__travel": 0.0002143383026123047}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33148, 0.01656]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33148, 0.20783]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33148, 0.88322]], "google_gemma-3-12b-it_contains_pii": [[0, 4784, false], [4784, 9751, null], [9751, 14028, null], [14028, 18437, null], [18437, 23630, null], [23630, 25135, null], [25135, 29824, null], [29824, 33148, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4784, true], [4784, 9751, null], [9751, 14028, null], [14028, 18437, null], [18437, 23630, null], [23630, 25135, null], [25135, 29824, null], [29824, 33148, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33148, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33148, null]], "pdf_page_numbers": [[0, 4784, 1], [4784, 9751, 2], [9751, 14028, 3], [14028, 18437, 4], [18437, 23630, 5], [23630, 25135, 6], [25135, 29824, 7], [29824, 33148, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33148, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-12
|
2024-12-12
|
59ae4702e27ce66d96c8e3bf1019917d174f25bc
|
Efficient Software Testing Technique based on Hybrid Database Approach
Humma Nargis Aleem¹, Prof. Dr. Mirza Mahmood Baig², Dr. Muhammad Mubashir Khan³
NED University of Engineering and Technology, Karachi, Pakistan
Abstract—In the field of computer science, software testing is referred as a critical process which is executed in order to assess and analyze the performance and risks existing in software applications. There is an emphasis on integrating specific approaches to carry out testing activities in an effective mode; the efficient strategy being explored recently is adopting hybrid database approach. For this purpose, a hybrid algorithm will be proposed to ensure the functionality and outcomes of testing procedure. The technical processes and its impact on current methodology would help to evaluate its effectiveness in software testing through which specific conclusions could be drawn. The findings of the research will elaborate effectiveness of the proposed algorithm that would be used in software testing. It would be suggested that new technology is easier and simple to assess and analyze the reliability of the software. Basically, hybrid database approach comprises of traditional and modern techniques that are deployed in order to achieve testing outcomes. It is explored from various testing methods that challenges have been identified related to focusing on traditional techniques due to which hybrid approach is now being developed in most of the areas. In the light of addressing these concepts, the paper aims to investigate the complexity and efficiency of hybrid database approach in software testing, as well as its scope in the IT industry.
Keywords—Software testing; database testing; hypothetical database testing; traditional database testing; test case(s); grey box testing; software quality assurance
I. INTRODUCTION
Computer science is a vast field which is distributed across innumerable sections in order to address different technicalities. Software testing is one of the fields in Computer Science which is referred as incorporating critical processes for assessing and analyzing possible risks and performance of a software [1]. A number of professional software testers across the world have explored variety of mechanisms that are performed to test vulnerabilities, as well as the efficiency which are the core areas of software testing. In the recent IT industry, the major concern that software developers and testers have reflected is the incompetent approaches being applied in the field of testing that hinders functionalities while business needs are not appropriately catered as they should be [2].
The IT industry has always performed strategically in providing utmost facilities to the business so that no technical issues could affect their performance and productivity [3]. In every day technical activities, a number of software techniques are adopted and used according to the specifications so that significant outcomes could be met. However, difficulty is faced when appropriate methods are not satisfying technical needs due to which businesses are affected at large [4]. All of these professional ensure that developed software are bug-free because they follow specific software development life cycle in order to make sure that each component of the software is developed under full consideration. Among these phases, software testing is also applied which is actually performed to assure quality and necessary fixes that are done to improve its functions [5].
Nowadays, reliable software development needs are not properly reviewed due to which businesses, as well as consumers, are facing difficulty in taking benefits from its use [6, 7]. Software testing has been given more attention in every aspect but due to outdated methods and techniques, certain technical needs are not properly fixed. In this regard, concerns have been placed to improve traditional methods by integrating modern approaches in order to improve software testing approaches. In the current methods, database approach is getting more attention due to its reliability and efficiency to fulfill testing needs thus, hybrid approaches have become research’s focus recently. The following study is developed to address the need to overcome the problem(s) and introduce hybrid approach in software database testing [8, 9].
A. Paper Structure
Section II presents a brief description of utilization of software testing methodologies along with their key research contributors under the umbrella of literature review. Section III put forward the proposed software testing methodology based on hybrid database approach. Section IV emphasizes more specifically on the algorithm and execution of proposed software testing approach in an illustrative manner with a brief on limitations of the study. Section V and Section VI compares the performance of proposed methodology with the methodology based on traditional database testing approach by considering various parameters for testing goals and its accomplishment. Section VII concludes the proposed methodology better than the traditional approaches in terms of performance and foresees more refined methodologies even better. It also foresees future aspects of this research in terms of quantum computing and machine learning perspectives.
II. LITERATURE REVIEW
A. Software Testing
According to Bajaj, Kamini Simi [2], the process of software testing is not complicated but its approaches have increased its complexity to the greatest extent. The author further sheds light on the definition of software testing in terms
of evaluation process in which the software is tested to ensure whether it is developed to meet system originality or not. Furthermore, the author also adds that the process of software testing comprises of validation and verification aspects that checks if the developed software is meeting certain criteria defined by the user [10]. The analysis of the study determines another important part of software testing which include results that defines the major difference between actual and expected result.
B. Existing Testing Methods
In software testing, the pre-defined traditional methods are recognized in almost every technical area and thus, their functionalities and approaches varies with the level of testing method. Based on the study of Arnicans, Guntis, and Vineta Arnicane [5], fundamental software testing methods incorporated in every aspect are black box testing, white box testing, and grey box testing. Different forms of database testing types and techniques have already been developed that are being preferably used according to the suitability and applicability of the specific type of database on a specific platform. The generally discussed types of database testing techniques are in the form of structural (internal) database testing, non-functional (external) database testing and functional (logical / conceptual) database testing [11, 12].
C. Black Box Testing
The paper proposed by Jamil, Muhammad Abid, Muhammad Arif, Normi Sham Awang Abubakar, and Akhlaq Ahmad [1] describes that black box testing only performs testing measures in evaluating software’s functionalities rather than focusing on its implementation in detail. It is identified that black box software testing is appropriate at every level of SDLC in order to examine the bugs and errors within major functionalities. The basic function of the testing method is to assess the required functions and compares it with user requirements to verify if the application is developed according to desired needs [13, 14]. The following existing method is efficient in finding adequate functionalities by testing each phase at their minimum and maximum case value. Jamil, Muhammad Abid, Muhammad Arif, Normi Sham Awang Abubakar, and Akhlaq Ahmad [1] also explain that black box testing is one of the simplest and widespread methods which are mainly carried out by professionals across the globe.
D. White Box Testing
Mutlu, Kivang, YuriiBrun, and Alexandra Meliou [3] define the significance of white box testing in terms of its effectiveness and important functions. Basically, white box testing is one of the approaches that are famous for testing internal structure of the developed software. It is also evident that to perform white box testing, IT industry requires specific programming skills and knowledge as a pre-requisite in order to develop test cases. Another study provides more information regarding white box testing [15]. In the study, the method is also illustrated as clear box or glass box testing due to the fact that it validates and verifies internal mechanism to satisfy development process. In addition, white box testing is also known to be applied to different levels, such as unit, integration, and even system testing. It is also explained that among all other testing methodologies, white box testing is excellent due to its nature and complexity [16].
E. Grey Box Testing
With respect to grey box testing, Arnicans, Guntis, and Vineta Arnicane [5] define that it is hybrid in nature because it accompanies all the basic requirements and functions that are performed by black box and white box testing. As the approach carries advantages of both black box and white box testing, grey box testing is vitally used across different areas in order to evaluate vulnerabilities and security of the developed software. Inputs are provided from the front interface of the application in order to verify back-end data structure through debugging process which reveals internal culpabilities of database schema [17].
III. PROPOSED METHODOLOGY
To display the functions and phases of testing, following research proposes an algorithm for hybrid software testing database approach which can be used to develop an efficient and effective testing methodology for software developers [18]. Secondary sources are used to collect specific information regarding testing methods and emergence of new technology. For this purpose, different scholarly articles and tech blogs were reviewed. The research is designed on the basis of addressing technical processes and its impact on software development to ensure the effectiveness and efficiency of designed procedure. Illustrated “Fig. 1” beneath depicts the proposed methodology [19].

IV. HYBRID DATABASE APPROACH
It is evident from recent studies that mode of testing practices are changing due to the software preferences, as well as concerns regarding quality. A new way of to test software applications have been proposed but limited information and significance is available to prove its efficiency. As illustrated in the study by MM Baig [20, 21, 22], database approach is one of the effective ways that provides new features to current testing methods in order to evaluate the performance and risks of the software by building effective test cases. It is also examined that by using hybrid technology, the upgraded features enable developers to assess improved quality of the application while it reduces amount of critical bugs in the application. Moreover, the proposed methods are also effective in terms of providing early information regarding issues that might affect performance of software while deviate its results from the expected outcomes [23].
An algorithm to execute hybrid testing method is proposed which is said to be more efficient and reliable as compared to traditional and hypothetical database testing method. In the process, when a request is generated by the initiator the algorithm analyzes it and based on the evaluation different states are maintained prior loading it into the original database [24, 25]. Treatment of initiated request(s) is as follows:
1) Request initiated by end user / customer / online order:
Requests generated by Customer(s) and Online Order(s) are by default approved and original database are updated as per traditional way on reaching timestamps defined for the system.
2) Request initiated by Developer(s): Requests generated by developer(s) are treated through hypothetical database testing method in which a new database state would be generated, known as hypothetical database while it is ensured that the originality of the previously generated database is intact. This is due to the association established between the original database and differential table subject to approval from developer(s) side. Changes in the schema made by the developer can easily be implemented on the primary database at day end after stoppage of daily transactions.
3) Request initiated by Tester(s): When a tester executes a test case(s), the results will be displayed in the grid and will be viewed to tester only. If required these can be saved in differential files related to testing for future correspondence else they will be rolled back when the tester exits the system. This would be done in order to facilitate different anomalies that could be performed through differential file on hypothetical database states. The traditional approaches were using these anomalies on original databases which are inappropriate as its originality would be affected due to which tester face problems in analyzing actual requirements.
By referring to “Fig. 2”, it can be viewed that tester implements first test case which is being analyzed by the hybrid algorithm which contains an instance of the primary database state. When the test case is successfully completed at this level, the updated results will be stored in differential file containing differential table ensuring that the originality of the primary database is not intact. When the second test case is run, same procedure will be followed. After execution of entire test suite if the desired results need to be stored in the original database with tester’s login for future referencing it is possible only after approval. All the unapproved request(s) will be rolled back automatically [26, 27, 28].
In “Fig. 3” below, state transition diagram is illustrated which is based on different states. From the illustration, it can be viewed a transition will occur on the fulfillment of requirements by the hybrid database instead of original or hypothetically generated database. From the analysis of the following method, it is suggested that hypothetical rollback can be performed to any state rather than executing the action on the original database [29]. However, the time complexity of the roll back is quite efficient as it quickly reaches to the destination state as compared to traditional ways. With the fast and efficient approach, processing time is minimized, while costing and budgeting of the whole method is also reduced. Based on the proposed plan, the algorithm of testing includes number of test cases while test case generates number of differential tables in differential files. In the next step, software tester would be required some time to prepare more test cases as the previously generated cases are not appropriate by current database states in the hypothetical chain [30, 31].
Fig. 3. State Transition Diagram.
State transition illustrated above completely elaborates the mechanism that a tester would be following in order to achieve desired results. In the following situation, another concept would be proposed in order to fix the state while new generated databases would comply with the established standards. The concept of database rollback will also be used in the following situation in order to let generated databases fulfill conditions of developed test cases. The main purpose for using the option in current testing methodology is that it increases efficiency of the database generation while the preparation time of current state is improved [32, 33].
A. Hybrid Database Testing Algorithm
HYBRID DATABASE TESTING ALGORITHM to propose hybrid plan is detailed in “Fig. 4” beneath.
B. Roadmap towards Hybrid Database Testing
The strategy to propose hybrid database testing plan is elaborated in Table I below.
C. Limitations of the Proposed Study
Different challenges are associated with database testing due to nature of databases which is complex both structural-wise and magnitude-wise. Testers skill set ascends a handful of challenges related to designing of test cases and the tactics to execute them with proper exploration. Core challenges were related to database schema structure, cleansing / synchronization / reliability of quality data and under-testing / incomplete testing of colossal database. However, efforts were made to overcome these issues in the proposed plan so that more effective method could be developed.
<table>
<thead>
<tr>
<th>TABLE I. HYBRID DATABASE TESTING ROADMAP</th>
</tr>
</thead>
<tbody>
<tr>
<td>Description</td>
</tr>
<tr>
<td>OBJECTIVE</td>
</tr>
<tr>
<td>To perform database testing in order to uncover incomplete schema, malfunctioned functionalities, data corruption, deadlocks, data mapping issues and exceptions.</td>
</tr>
<tr>
<td>TESTING CRITERIA</td>
</tr>
<tr>
<td>Testing of all key database schema tables, methods, processes, sequences, functions, indices, views, cursors, triggers and stored procedures.</td>
</tr>
<tr>
<td>PRE-REQUISITES</td>
</tr>
<tr>
<td>Testing requirements are well communicated / well documented to testers. Test cases covering all aspects of database testing are designed. Success criteria have been established prior testing phase. Test environment is setup and freeze with latest database schema.</td>
</tr>
<tr>
<td>EXCEPTIONAL CONTEMPLATION</td>
</tr>
<tr>
<td>Testing conducted with real time data on actual environment. Automatic invocation of stored procedures and processes. For large databases DBMS Development Environment is required to populate data directly into the database from backend in order to monitor its frontend adaptation. For small sized databases limited records are generated to test non-acceptable events / triggers / exceptions.</td>
</tr>
<tr>
<td>TESTING TECHNIQUE</td>
</tr>
<tr>
<td>Selection of testing technique / strategy is based on the fact it must support the testing of all key use-case scenarios and complete business flows i.e. main features.</td>
</tr>
<tr>
<td>REQUIRED TOOLS</td>
</tr>
</tbody>
</table>
| ➢ Test Management Tools
➢ Test Script Automation Tool
➢ SQL Query Analyzer
➢ Test Data Generator
➢ Bug Tracking tool
➢ Backup and Recovery tools |
| PROCEDURAL STEPS |
| Testers will work according to the database testing checklist and guidelines to inspect the database ensuring proper data for correct reasons is inserted and stored in the database. |
| POST-REQUISITES |
| Execute each test case separately but sequential using valid and invalid data should reveal expected results for valid data and timely error messages to refrain insertion of invalid data in the database. |
Fig. 4. Hybrid Database Approach Algorithm.
From the above algorithm, proposed strategy and limitations it is deduced that existing methods are inefficient and provides limited access to basic software testing which is why appropriate results are not achieved. By using hybrid software testing database approach, software developers and testers would gain more assistance in testing phase while there will be the exceptions for increased bugs and incidents. Moreover, it is considered as efficient while performance is also adequate because of less time consumption, cost-effective and limited resource utilization. This algorithm is not only convenient but also gives easy access to acknowledge basic requirements that must be present in developed software [34].
V. RESULTS WITH DISCUSSION
To check the efficacy of the algorithm it multiple queries and test case(s) were designed and run on the schemas separately in order to monitor their performance. Results of both queries and test case(s) are discussed below.
A. Query Processing Time
Multiple queries as illustrated in “Fig. 5” were designed as per the following types: Aggregate Queries (Q1 – Q50), Join Queries (Q51 – Q100) and Nested Queries (Q101 – Q150) where “Qn” is used to represent “Query number”. These queries were run on a sample database of following size(s): 400 MB, 4 GB, 40 GB and 400 GB. Due to classified database architecture and complex confidential queries only general queries related to student database are being shared in “Fig. 5” for assistance.
To analyze the processing time in seconds these queries were first executed through the traditional database testing approach, secondly through hypothetical database testing approach and lastly through hybrid database approach. According to the results shown in Table II below, it can be concluded that the processing time of the queries illustrated in “Fig. 6”, “Fig. 7” and “Fig. 8” has greatly reduced approximately 70% with the use of hybrid database approach as opposed to accessing the data directly.
<table>
<thead>
<tr>
<th>Query Processing Time Recorded on Different Schemas</th>
<th>Database Size</th>
</tr>
</thead>
<tbody>
<tr>
<td>Query Types</td>
<td>Direct Schema</td>
</tr>
<tr>
<td>Aggregate Queries (Q1 – Q50)</td>
<td>400 MB</td>
</tr>
<tr>
<td></td>
<td>4 GB</td>
</tr>
<tr>
<td></td>
<td>40 GB</td>
</tr>
<tr>
<td></td>
<td>400 GB</td>
</tr>
<tr>
<td>Join Queries (Q51 – Q100)</td>
<td>400 MB</td>
</tr>
<tr>
<td></td>
<td>4 GB</td>
</tr>
<tr>
<td></td>
<td>40 GB</td>
</tr>
<tr>
<td></td>
<td>400 GB</td>
</tr>
<tr>
<td>Nested Queries (Q101 – Q150)</td>
<td>400 MB</td>
</tr>
<tr>
<td></td>
<td>4 GB</td>
</tr>
<tr>
<td></td>
<td>40 GB</td>
</tr>
<tr>
<td></td>
<td>400 GB</td>
</tr>
</tbody>
</table>


B. Test Case(s) Execution Time
Test Case(s) Matrix as per “Fig. 9” was designed and executed on the database schema designed hypothetically and hybrid in addition to direct database schema. To check the functional aspects of database “Functional Test Case(s): TC₁ – TC₅₀” and for non-functional aspects “Non-Functional Test Case(s): TC₅₁ – TC₁₀₀” were designed where “TCₙ” is used to represent “Test Case number”. These test cases were run on the same sample database of size(s): 400 MB, 4 GB, 40 GB and 400 GB used for query execution.
To analyze the execution time in minutes these test case(s) were first executed through the traditional database testing approach, secondly through hypothetical database testing approach and lastly through hybrid database approach. According to the results illustrated in Table III below, it can be concluded that the overall execution time of test case(s) as shown in “Fig. 10” and “Fig. 11” almost reduced to 60% of the actual processing time required to execute the test case(s) directly on the primary database.
---
**TABLE III. TEST CASE(S) EXECUTION TIME RECORDED ON DIFFERENT SCHEMAS**
<table>
<thead>
<tr>
<th>Test Case(s) Execution Time</th>
<th>Database Size</th>
<th>Direct Schema</th>
<th>Hypothetical Schema</th>
<th>Hybrid Schema</th>
</tr>
</thead>
<tbody>
<tr>
<td>Functional Test Cases</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>(TC₁ – TC₅₀)</td>
<td>400 MB</td>
<td>≈ 15 min</td>
<td>≈ 11 min</td>
<td>≈ 5 min</td>
</tr>
<tr>
<td></td>
<td>4 GB</td>
<td>≈ 20 min</td>
<td>≈ 16 min</td>
<td>≈ 8 min</td>
</tr>
<tr>
<td></td>
<td>40 GB</td>
<td>≈ 33 min</td>
<td>≈ 25 min</td>
<td>≈ 18 min</td>
</tr>
<tr>
<td></td>
<td>400 GB</td>
<td>≈ 45 min</td>
<td>≈ 31 min</td>
<td>≈ 23 min</td>
</tr>
<tr>
<td>Non-Functional Test Cases</td>
<td></td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>(TC₅₁ – TC₁₀₀)</td>
<td>400 MB</td>
<td>≈ 18 min</td>
<td>≈ 10 min</td>
<td>≈ 4 min</td>
</tr>
<tr>
<td></td>
<td>4 GB</td>
<td>≈ 22 min</td>
<td>≈ 11 min</td>
<td>≈ 7 min</td>
</tr>
<tr>
<td></td>
<td>40 GB</td>
<td>≈ 38 min</td>
<td>≈ 27 min</td>
<td>≈ 15 min</td>
</tr>
<tr>
<td></td>
<td>400 GB</td>
<td>≈ 56 min</td>
<td>≈ 35 min</td>
<td>≈ 21 min</td>
</tr>
</tbody>
</table>
---
Fig. 8. Nested Queries Processing Time.
Fig. 9. Test Case Matrix.
Fig. 10. Functional Test Case(s) Execution Time.
Fig. 11. Non-Functional Test Case(s) Execution Time.
VI. TESTING GOALS ACHIEVED
Testing goals which were achieved are listed as:
1) **Normalization rules:** No data was repetitive in the database and all columns were logically connected.
2) **Data types:** As the systems are taking data in dynamic nature, deciding the type of data columns is very crucial. In some scenarios, you can expect some extremely weird inputs which are important. Make sure the data is going into the right columns and cells. This thing is taken care very seriously as a mismatch of the columns can cause huge issues in the stability of the database.
3) **Retrieval of data and number of joins:** Usually, developers avoid adding multiple joins or making query complex mainly because of the system's speed and its response time.
4) **Data endpoints:** It always checks the data populating the database from the system v/s data generating by the database for the database (triggers or metadata).
5) **Usage of the flag:** For the system to work properly, mainly columns worked as a flag for a different thing, e.g. 0=admin user, 1= Developer, 2= Tester etc. This is also tested seriously to control the access level permissions.
6) **In some big systems:** we also test the write speed. Sometimes, the data coming to the database is huge, and the database is not keeping up the phase. So the threshold was also tested. Mainly it happens in ecommerce website where user actions and activities are also recorded against their profile.
7) **Database security and password encryption issues.**
8) **Online testing of software application in parallel with order processing or daily transactional operations without the use of separate test environment was possible with add-on functionality of roll back.
VII. CONCLUSION AND FUTURE WORK
During software development, it becomes quite difficult for the developers and testers to identify the bugs and required test case(s) that are needed to be executed to achieve actual results. Hybrid Database testing approach was formulated and tested with execution of queries and test case(s) on sample database(s) of sufficient sizes reflecting the achievement of testing goals in an efficient manner. Using the proposed hybrid plan, businesses would improve in terms of using effective software testing methods without creating separate environments that would cause them to invest little while productive results are achieved. It is also acknowledged that complexity and time-consuming activities in recognizing errors in the software testing phase can be easily managed due to the flexibility it provides in searching and updating records in the primary database without keeping it intact during the whole course of time. Assessments of requests generated from different initiators can also be processed simultaneously with segregation as per requirement.
Moreover, in future, it is anticipated and expected to witness software testing tools being developed with build-in features of hybrid state transition between databases with integration of quantum technology and machine learning techniques. In software testing that would include different processes in order to provide accurate and unambiguous results in even lesser time. As the new technology is becoming famous in IT industry, it is expected to see more transformations in the field of software testing due to which traditional approaches would be diminished while integration of hybrid technologies would be seen.
ACKNOWLEDGEMENT
NED University of Engineering & Technology is highly acknowledged for providing research support required to carry out this research.
REFERENCES
|
{"Source-Url": "https://thesai.org/Downloads/Volume10No7/Paper_48-Efficient_Software_Testing_Technique.pdf", "len_cl100k_base": 5713, "olmocr-version": "0.1.50", "pdf-total-pages": 8, "total-fallback-pages": 0, "total-input-tokens": 30613, "total-output-tokens": 8229, "length": "2e12", "weborganizer": {"__label__adult": 0.00033926963806152344, "__label__art_design": 0.0002665519714355469, "__label__crime_law": 0.0003483295440673828, "__label__education_jobs": 0.001194000244140625, "__label__entertainment": 5.733966827392578e-05, "__label__fashion_beauty": 0.00014781951904296875, "__label__finance_business": 0.00019168853759765625, "__label__food_dining": 0.0003745555877685547, "__label__games": 0.0007586479187011719, "__label__hardware": 0.0006651878356933594, "__label__health": 0.0005350112915039062, "__label__history": 0.00015878677368164062, "__label__home_hobbies": 6.788969039916992e-05, "__label__industrial": 0.0002741813659667969, "__label__literature": 0.00028777122497558594, "__label__politics": 0.00016546249389648438, "__label__religion": 0.0003383159637451172, "__label__science_tech": 0.01496124267578125, "__label__social_life": 7.75456428527832e-05, "__label__software": 0.007843017578125, "__label__software_dev": 0.97021484375, "__label__sports_fitness": 0.0002460479736328125, "__label__transportation": 0.0002815723419189453, "__label__travel": 0.00015652179718017578}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 34958, 0.03069]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 34958, 0.35729]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 34958, 0.90731]], "google_gemma-3-12b-it_contains_pii": [[0, 5621, false], [5621, 10404, null], [10404, 15134, null], [15134, 18956, null], [18956, 22825, null], [22825, 25342, null], [25342, 31701, null], [31701, 34958, null]], "google_gemma-3-12b-it_is_public_document": [[0, 5621, true], [5621, 10404, null], [10404, 15134, null], [15134, 18956, null], [18956, 22825, null], [22825, 25342, null], [25342, 31701, null], [31701, 34958, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 34958, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 34958, null]], "pdf_page_numbers": [[0, 5621, 1], [5621, 10404, 2], [10404, 15134, 3], [15134, 18956, 4], [18956, 22825, 5], [22825, 25342, 6], [25342, 31701, 7], [31701, 34958, 8]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 34958, 0.27607]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
3fe3d0d3ed3b1fd87ab9df572cf4ac6869ba1c59
|
Problem 1 [32 points]:
Consider the following architecture.
<table>
<thead>
<tr>
<th>Functional Unit Type</th>
<th>Cycles in EX</th>
<th>Number of Functional Units</th>
<th>Pipelined</th>
</tr>
</thead>
<tbody>
<tr>
<td>Integer</td>
<td>1</td>
<td>1</td>
<td>No</td>
</tr>
<tr>
<td>FP Add/Subtract</td>
<td>3</td>
<td>1</td>
<td>Yes</td>
</tr>
<tr>
<td>FP/Integer Multiplier</td>
<td>6</td>
<td>1</td>
<td>Yes</td>
</tr>
<tr>
<td>FP/Integer Divider</td>
<td>24</td>
<td>1</td>
<td>No</td>
</tr>
</tbody>
</table>
- In this problem we will use MIPS pipeline.
- The integer functional unit performs integer addition (including effective address calculation for loads/stores), subtraction, logic operations and branch operations.
- There is full forwarding and bypassing, including forwarding from the end of an FU to the MEM stage for stores.
- Loads and stores complete in one cycle. That is, they spend one cycle in the MEM stage after the effective address calculation.
There are as many registers, both FP and integer, as you need.
There is one branch delay slot.
While the hardware has full forwarding and bypassing, it is the responsibility of the compiler to schedule such that the operands of each instruction are available when needed by each instruction.
If multiple instructions finish their EX stages in the same cycle, then we will assume they can all proceed to the MEM stage together. Similarly, if multiple instructions finish their MEM stages in the same cycle, then we will assume they can all proceed to the WB stage together. In other words, for the purpose of this problem, you are to ignore structural hazards on the MEM and WB stages.
The following code implements the DAXPY operation, $Y = aX + Y$, for a vector length 100. Initially, R1 is set to the base address of array X and R2 is set to the base address of Y. Assume initial value of R3 = 0.
```
DADDIU R4, R1, #800
FOO: L.D F2, 0(R1)
MUL.D F4, F2, F0
L.D F6, 0(R2)
ADD.D F6, F4, F6
S.D F6, 0(R2)
DADDIU R1, R1, #8
DADDIU R2, R2, #8
DSLTU R3, R1, R4 // set R3 to 1 if R1 < R4
BNEZ R3, FOO
```
**NOTE FOR GRADERS**: For any of the following parts, no points should be deducted if
*DADDUI R4, R1, #800* is included when counting the number of cycles.
**Part A. [6 points]**
Consider the role of the compiler in scheduling the code. Rewrite this loop, but let every row take a cycle. If an instruction can’t be issued on a given cycle (because the current instruction has a dependency that will not be resolved in time), write STALL instead, and move on to the next cycle to see if it can be issued then. Assume that a NOP is scheduled in the branch delay slot (effectively stalling 1 cycle after the branch). Explain all stalls, but don’t reorder instructions. How many cycles elapse before the second iteration begins? Show your work.
```
FOO: L.D F2, 0(R1)
(1) STALL RAW F2
MUL.D F4, F2, F0
L.D F6, 0(R2)
(2) STALL RAW F4, F6
(3) STALL RAW F4
```
(4) STALL RAW F4
(5) STALL RAW F4
ADD F6, F4, F6
(6) STALL RAW F6
S.D. F6, 0(R2)
DADDUI R1, R1, #8
DADDUI R2, R2, #8
SLTU R3, R1, R4
(7) STALL RAW R3
BENZ R3, FOO
NOP
17 cycles.
**Grading:** 1 point for each of stalls (1), (6), and (7). No credit without any explanation for the stall. Partial credit of ½ point if more than one stall cycle is indicated for the corresponding instruction.
2 points total for stalls (2) to (5). Partial credit is awarded as follows, assuming there is at least one correct explanation for the stall (e.g., at least one of the F4 or F6 dependence is listed for stall 2): 1 point if a stall is listed between these instructions, but the number of stalls is incorrect; ½ point if at least some of the reasons for the stalls are correct.
1 point for NOP (branch delay slot).
Negative ½ point for each unnecessary sequence of stalls.
**Part B. [6 points]**
Now reschedule the loop. You can change immediate values and memory offsets. You can reorder instructions, but don’t change anything else. Show any stalls that remain. How many cycles elapse before the second iteration begins? Show your work.
FOO:
L.D F2, 0(R1)
L.D F6, 0(R2)
MUL.D F4, F2, F0
DADDUI R1, R1, #8
DADDUI R2, R2, #8
SLTU R3, R1, R4
STALL
STALL
ADD F6, F4, F6
BNEZ R3, FOO
S.D F6, -8(R2)
11 cycles
**Grading:** Full points for any correct sequence with minimum number of stalls. Partial credit only if the sequence does the same computation and reduces some stalls. Deduct ½ point for each error (e.g., incorrect index), and deduct ½ point for each stall in excess of 2.
**Part C. [6 points]**
Now unroll and reschedule the loop the minimum number of times needed to eliminate all stalls. You can remove redundant instructions. How many times did you unroll the loop? How many cycles elapse before the next iteration of the loop begins? Don’t worry about clean-up code. Show your work.
FOO:
```
L.D F2, 0(R1)
L.D F6, 0(R2)
MUL.D F4, F2, F0
L.D F14, 8(R1)
L.D F16, 8(R2)
MUL.D F18, F14, F0
DADDUI R1, R1, #16
DADDUI R2, R2, #16
ADD F6, F4, F6
SLTU R3, R1, R4
S.D F6, -16(R2)
ADD F16, F18, F16
BNEZ R3, FOO
S.D F16, -8(R2)
```
There are two original iterations in an iteration of the new loop. 14 cycles elapse before the next iteration.
**Grading:** 1 point for the correct iteration count. Deduct ½ point for every error or stall cycle. Give partial credit (2 points) if use three iterations instead of two and the solution is correct with three iterations.
**Part D. [8 Points]**
Consider a VLIW processor in which one instruction can support two memory operations (load or store), one integer operation (addition, subtraction, comparison, or branch), one floating point add or subtract, and one floating point multiply or divide. There is no branch delay slot. Now unroll the loop four times, and schedule it for this VLIW to take as few stall cycles as possible. How many cycles do the four iterations take to complete? Use the following table template to show your work.
<table>
<thead>
<tr>
<th>MEM 1</th>
<th>MEM 2</th>
<th>INTEGER</th>
<th>FP ADD</th>
<th>FP MUL</th>
</tr>
</thead>
<tbody>
<tr>
<td>L.D F2, 0(R1)</td>
<td>L.D F6, 0(R2)</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>L.D F14, 8(R1)</td>
<td>L.D F16, 8(R2)</td>
<td></td>
<td></td>
<td></td>
</tr>
<tr>
<td>L.D F24, 16(R1)</td>
<td>L.D F26, 16(R2)</td>
<td></td>
<td></td>
<td>MUL.D F4, F2, F0</td>
</tr>
<tr>
<td>L.D F34, 24(R1)</td>
<td>L.D F36, 24(R2)</td>
<td></td>
<td>MUL.D F18, F14, F0</td>
<td></td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td>MUL.D F28, F24, F0</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td>MUL.D F38, F34, F0</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td>DADDUI R1, R1, #32</td>
</tr>
<tr>
<td></td>
<td></td>
<td></td>
<td></td>
<td>DADDUI R2, R2, #32</td>
</tr>
</tbody>
</table>
14 cycles with unrolling 4 times.
Grading: ½ point for each correct row in the above table (no penalty for cascading errors due to a row being wrong). 1 point if the scheduled code is correct and takes 14 cycles.
Part E. [6 Points]
Provide the steady-state code for a software pipelined version of the loop given in this question. Your code should give the minimum number of stalls using the minimum number of static instructions. Assume the loop will have at least four iterations. You do not have to show the start-up or finish-up code (i.e., prolog or epilog).
Solution:
(1) S.D F6, -24(R2) // x-3 instruction
(2) ADD.D F6, F4, F7 // x-2 instruction
(3a) L.D F7, -8(R2) // x-1 instruction
(3b) MUL.D F4, F2, F0 // x-1 instruction
(4) DADDIU R1, R1, #8
(5) DSLTU R3, R1, R4 // set R3 to 1 if R1 < R4
(6) L.D F2, -8(R1) // x instruction
(7) BNEZ R3, FOO
(8) DADDIU R2, R2, #8
Grading:
3 points for correct offsets of instruction (1), (3a) and (6).
½ point for (1) and (2), ¼ for (3a) and (3b), ½ for (6).
1 point if instructions at number (4), (5), (6), (7) and (8) are given in an order with no stall.
Alternate solution 1:
(1) ADD.D F6, F4, F6 // x-1 instruction
(2) L.D F2, 0(R2) // x instruction
(3) S.D F6, -8(R2) // x-1 instruction
(4) MUL.D F4, F2, F0 // x instruction
(5) DADDIU R1, R1, #8
(6) DSLTU R3, R1, R4 // set R3 to 1 if R1 < R4
(7) L.D F6, -8(R1) // x instruction
(8) BNEZ R3, FOO
(9) DADDIU R2, R2, #8
Grading:
3 points for correct offsets of instruction (2), (3) and (7).
1.5 for x instructions, ½ for x-1 instructions.
1 point if instructions at number (5), (6), (7), (8) and (9) are given in an order with no stall.
Alternate solution 2:
(1) S.D F6, 0(R2) // x instruction
(2) ADD.D F6, F4, F7 // x+1 instruction
(3) MUL.D F4, F2, F0 // x+2 instruction
(4) L.D F2, 24(R1) // x+3 instruction
(5) DADDIU R1, R1, #8
(6) DSLTU R3, R1, R4 // set R3 to 1 if R1 < R4 – see note below for loop bound
(7) L.D F7, 16(R2) // x+2 instruction
(8) BNEZ R3, FOO
(9) DADDIU R2, R2, #8
Grading:
3 points for correct offsets of instruction (1), (4) and (7).
½ point for (1), ½ for (2) and (4), ¼ for (3) and (7).
1 point if instructions at number (5), (6), (7), (8) and (9) are given in an order with no stall.
Note: Since this solution performs loads for future iterations, the loop bound (R4) needs to be reduced by 3 to avoid erroneous and out-of-bounds accesses. -½ point off if you did not do this. Points off capped at 6.
NOTE: ONLY GRADUATE STUDENTS SHOULD SOLVE THE NEXT TWO PROBLEMS.
Problem 2 - GRADUATE STUDENTS PROBLEM [10 points]
Consider the following C code fragment:
```c
for (i = 0; i < 100; i++) {
if (c == 0) {
...
c = ...;
... // code I
}
else {
...
c = ...;
... // code II
}
... // code III
}
```
The above translates to the MIPS fragment below. R5 and R6 store variables i and c, respectively.
```
Init: MOV.D R5, R0 // i = 0
If: BNEZ R6, Else // Branch1 (c == 0?)
... // Code I = 10 instructions; contains a write to R6
J Join
Else: ... // Code II = 100 instructions; contains a write to R6
Join: ... // Code III = 10 instructions
Loop: DADDI R5, R5, #1 // i++
DSUBI R7, R5, #100
BNEZ R7, If // Branch2 (i == 100?)
J Done
```
Suppose the segments “Code I” (if part), “Code II” (else part), and Code III (common part) contain 10, 100, and 10 assembly instructions respectively. You did a profile run of this program and found that on average, Branch1 is taken once in 100 iterations of the “for loop”.
Your boss suggests that you perform one of the following two transformations to speed up the above code: (1) Loop unrolling with an unrolling factor of 2. (2) Trace scheduling.
Which one of these would be more effective and why? Show the code with the more effective transformation applied. If you use trace scheduling, then include any repair code and branches into and out of it. Assume that only the values of c and i may need repair. Assume that registers R10 and higher are free for your use.
Solution:
You should perform trace scheduling. Loop unrolling would increase the code size significantly because of the huge else statement. Further, because of the if-else statement, loop unrolling will not provide any additional longer straight-line code snippet for the compiler to schedule. On the other hand, trace scheduling will be able to combine the “if” and “join” parts of the code together to provide a longer fragment of straight-line code. The large else part is moved out in repair code.
Init: MOV.D R5, R0 // i = 0
Trace: MOV.D R10, R6 // Save the old value of c
.... // Code I
.... // Code III
BNEZ R10, Repair
Loop: DADDI R5, R5, #1 // i++
DSUBI R7, R5, #1
BNEZ R7, Trace // i==100?
J Done
Repair: MOV.D R6, R10
.... // Code II
.... // Code III
J Loop
The above trace can be further increased by duplicating the loop index manipulation in the trace and repair parts. No points were taken off if this was not done.
Grading:
2 points for choosing/rejecting each option. 3 points for the trace (1 point for saving the old value of c, 1 point for combining code I and code III in the trace, 1 point for the correct branch to repair code).
3 points for the repair code (1 point for restoring c, 1 point for combining code II and III, 1 point for the jump back to the trace).
2 points for a fully correct answer.
If you answered Loop Unrolling, then you will be graded out of 4 for the unrolled code. 1 point for the unrolled loop body, 1 point for correct register usage, 1 point for correct branch index manipulation, and 1 point for a fully correct unrolled loop.
Problem 3- GRADUATE STUDENTS PROBLEM [8 points]
The example on page H-30 of the textbook uses a speculative load instruction to move a load above its guarding branch instruction. Read appendix H in the text for this problem and apply the concepts to the following code:
```
instr.1 ; arbitrary instruction
instr.2 ; next instruction in block
... ; intervening instructions
BEQZ R1, null ; check for null pointer
L.D F2, 0(R1) ; load using pointer
ADD.D F4, F0, F2 ; dependent ADD.D
... ;
null: ... ; handle null pointer
```
Part A. [4 points]
Write the above code using a speculative load (sL.D) and a speculation check instruction (SPECCK) to preserve exception behavior. Where should the load instruction move to best hide its potentially long latency?
**Solution:** The speculative load instruction defers the hardware response to a memory access fault if one occurs. In combination with the speculation check instruction this allows the load to be moved above the branch. Because the load may have long latency, it should be moved as early in the program as possible, in this case to the position of first instruction in the basic block. If the speculation check finds no deferred exceptions, computation can proceed.
```
sL.D F2, 0(R1)
instr. 1
instr. 2
...
BEQZ R1, null
SPECCK 0(R1) ; check for exception deferred by sL.D on 0(R1)
ADD.D F4, F0, F2
...
null: ...
```
Grading: 2 points for the order of the instructions and 2 points for the explanation of where to move the load.
Part B. [4 points]
Assume a speculation check instruction that branches to the recovery code. Assume that the speculative load instruction defers both terminating and non-terminating exceptions. Write the above code speculating on both the load and the dependent add. Use a speculative load, a non-speculative add, a check instruction, and the block of recovery code. How should the speculated load and add be scheduled with respect to each other?
Solution: Potentially, this problem will have several different solutions. Only one is provided here. Please note that points will not be taken off if you have a different but correct solution. Any solution that fittingly recovers the LD and ADD instructions if the SPECCK check fails will be granted full credit.
With a speculation check instruction that can branch to the recovery code, instructions dependent on the load can also be speculated. Now, if the load fails because of an exception for high latency (e.g., page fault), rather than one that is a fatal error (e.g., a memory protection access violation), the speculated use instruction can take as an operand an incorrect value from the register that is the target of the delayed load. The speculation check instruction can distinguish these types of exceptions, terminating the program in the event of a protection violation and branching to recovery code for the case of a page fault, which will yield correct load behavior given sufficient time.
```
sL.D F2, 0(R1)
instr. 1
instr. 2
...
ADD.D F4, F0, F2 ; ADD.D speculated far from load for latency
BEQZ R1, null
SPECCK 0(R1), recover ; check for exception deferred by sL.D on 0(R1) and branch to “recover” on exception
back: ...
... ; etc.
recover: L.D F2, 0(R1)
ADD.D F4, F0, F2
JUMP back ; return to original path
...
null: ...
```
Grading: 1 point for rewriting the instructions correctly, 2 points for the recovery code, 1 point for the explanation.
Note: Although repair code would be needed in the “null” section of the code for correct behavior (the values of F2 and F4 need to be restored), the question explicitly does not ask for it. Therefore, there is no penalty for writing or not writing the “null” section’s repair code.
|
{"Source-Url": "https://courses.engr.illinois.edu/cs433/assignments/cs433-sp16-hw4-solution-class.pdf", "len_cl100k_base": 4878, "olmocr-version": "0.1.53", "pdf-total-pages": 11, "total-fallback-pages": 0, "total-input-tokens": 24155, "total-output-tokens": 5255, "length": "2e12", "weborganizer": {"__label__adult": 0.000392913818359375, "__label__art_design": 0.0002980232238769531, "__label__crime_law": 0.0003795623779296875, "__label__education_jobs": 0.00179290771484375, "__label__entertainment": 7.027387619018555e-05, "__label__fashion_beauty": 0.0001678466796875, "__label__finance_business": 0.0001964569091796875, "__label__food_dining": 0.0005345344543457031, "__label__games": 0.0010404586791992188, "__label__hardware": 0.0034122467041015625, "__label__health": 0.0004622936248779297, "__label__history": 0.00027060508728027344, "__label__home_hobbies": 0.0002061128616333008, "__label__industrial": 0.0009288787841796876, "__label__literature": 0.00021982192993164065, "__label__politics": 0.00031685829162597656, "__label__religion": 0.0006389617919921875, "__label__science_tech": 0.022552490234375, "__label__social_life": 0.00010436773300170898, "__label__software": 0.003505706787109375, "__label__software_dev": 0.9609375, "__label__sports_fitness": 0.0004503726959228515, "__label__transportation": 0.000941753387451172, "__label__travel": 0.00020766258239746096}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 16360, 0.07544]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 16360, 0.28736]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 16360, 0.81327]], "google_gemma-3-12b-it_contains_pii": [[0, 1019, false], [1019, 3079, null], [3079, 4383, null], [4383, 6711, null], [6711, 8019, null], [8019, 9163, null], [9163, 10842, null], [10842, 12500, null], [12500, 14027, null], [14027, 16079, null], [16079, 16360, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1019, true], [1019, 3079, null], [3079, 4383, null], [4383, 6711, null], [6711, 8019, null], [8019, 9163, null], [9163, 10842, null], [10842, 12500, null], [12500, 14027, null], [14027, 16079, null], [16079, 16360, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, true], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 16360, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 16360, null]], "pdf_page_numbers": [[0, 1019, 1], [1019, 3079, 2], [3079, 4383, 3], [4383, 6711, 4], [6711, 8019, 5], [8019, 9163, 6], [9163, 10842, 7], [10842, 12500, 8], [12500, 14027, 9], [14027, 16079, 10], [16079, 16360, 11]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 16360, 0.06226]]}
|
olmocr_science_pdfs
|
2024-12-07
|
2024-12-07
|
7101a5d1ec8518af177d93f0a18456c8d8f0329a
|
[REMOVED]
|
{"Source-Url": "https://courses.engr.illinois.edu/cs374/sp2018/A/notes/07-mst.pdf", "len_cl100k_base": 7383, "olmocr-version": "0.1.53", "pdf-total-pages": 13, "total-fallback-pages": 0, "total-input-tokens": 43688, "total-output-tokens": 8261, "length": "2e12", "weborganizer": {"__label__adult": 0.0005469322204589844, "__label__art_design": 0.0004935264587402344, "__label__crime_law": 0.0006322860717773438, "__label__education_jobs": 0.0042877197265625, "__label__entertainment": 0.0001990795135498047, "__label__fashion_beauty": 0.0002810955047607422, "__label__finance_business": 0.00042366981506347656, "__label__food_dining": 0.0007853507995605469, "__label__games": 0.0013532638549804688, "__label__hardware": 0.0017633438110351562, "__label__health": 0.0015859603881835938, "__label__history": 0.0007677078247070312, "__label__home_hobbies": 0.0002689361572265625, "__label__industrial": 0.0009317398071289062, "__label__literature": 0.0011148452758789062, "__label__politics": 0.0004634857177734375, "__label__religion": 0.0009279251098632812, "__label__science_tech": 0.2467041015625, "__label__social_life": 0.00023853778839111328, "__label__software": 0.0060272216796875, "__label__software_dev": 0.72802734375, "__label__sports_fitness": 0.0006093978881835938, "__label__transportation": 0.0012226104736328125, "__label__travel": 0.00032448768615722656}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 28029, 0.01792]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 28029, 0.51706]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 28029, 0.8641]], "google_gemma-3-12b-it_contains_pii": [[0, 1664, false], [1664, 3806, null], [3806, 6033, null], [6033, 8013, null], [8013, 10494, null], [10494, 12180, null], [12180, 15049, null], [15049, 17102, null], [17102, 19248, null], [19248, 21452, null], [21452, 24084, null], [24084, 26270, null], [26270, 28029, null]], "google_gemma-3-12b-it_is_public_document": [[0, 1664, true], [1664, 3806, null], [3806, 6033, null], [6033, 8013, null], [8013, 10494, null], [10494, 12180, null], [12180, 15049, null], [15049, 17102, null], [17102, 19248, null], [19248, 21452, null], [21452, 24084, null], [24084, 26270, null], [26270, 28029, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 28029, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 28029, null]], "pdf_page_numbers": [[0, 1664, 1], [1664, 3806, 2], [3806, 6033, 3], [6033, 8013, 4], [8013, 10494, 5], [10494, 12180, 6], [12180, 15049, 7], [15049, 17102, 8], [17102, 19248, 9], [19248, 21452, 10], [21452, 24084, 11], [24084, 26270, 12], [26270, 28029, 13]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 28029, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-09
|
2024-12-09
|
eeca6d58f2d29fcd368ff38e6cfa2b81f4120929
|
Validation of reactive embedded systems against specification requirements
Joanna Strug, Stanisław Deniziak, Krzysztof Sapiecha∗
Cracow University of Technology, Warszawska 24, 31-155 Kraków, Poland
Abstract
In this paper a method of automatic generation of test scenarios for verification of specification requirements (temporal and functional) for reactive embedded systems is presented.
1. Introduction
The aim of design-validation is to check whether or not specification requirements (functional and temporal) imposed on a system are met [1,2]. Most of recently proposed techniques of design-validation use formal verification methods, like model checking [1,3] and theorem proving [4]. These methods typically use automata based models [4] of a system and temporal logic (TL) [5] in order to express the required temporal properties. However, temporal properties, which may be expressed in this way are limited to safety and liveness [6,3]. Some extensions of TL can capture time properties more precisely. In Timed CTL [1,2] time-bounded versions of each time operators are introduced. Real-time logic (RTL) [6] includes special predicates, which relate events that happen in a system with the time they occur. The duration calculus [7] add operators to access intervals. On the basis of these extensions it is possible to verify certain design properties including temporal requirements.
In [8] there are proposed two proof methodologies corresponding to two specification styles of real-time properties. A system is modeled as a real-time transitional one. Time properties are expressed in time-bounded logic or by explicit reference to a current time through a special clock variable. A deductive proof is then conducted to show the consistency with the specification.
The formal verification methods are limited to small and medium size designs or are restricted to some subproblems. For large systems, simulation-based validation techniques are still most popular [9]. The main problem here is to
∗ Corresponding author: e-mail address: pesapiec@cyf-kr.edu.pl
develop a set of input stimuli giving high validation accuracy. Some efficient methods of automatic generation of test scenarios to validate a system against functional requirements have already been developed [10,11]. However, there are no such satisfactory methods as far as temporal requirements are concerned. Moreover there are no efficient methods for validation of both types of specification requirements.
The aim of this paper is to present a method of automatic generation of test scenarios for validation of embedded systems [12] against temporal and functional requirements. Test scenarios are derived from system requirements and are then applied to a model or a prototype of the system. Each test scenario consists of verification sequence (sequence of stimuli to be applied to system inputs) and the expected responses which are then compared with those generated by the system while simulating. Main features of the proposed method are described in sections 2 and 3. Section 4 includes short comparison, considerations and conclusions.
2. Embedded system model
It is assumed that a designer starts with gathering functional and temporal requirements (temporal constraints) for a system. These requirements are usually described in a textual form, but it is assumed that each of the requirements has a unique identifier. Manual translation to more formal specification (e.g. SCR [13]) is then performed and a suitable model of the functional requirements is automatically developed (as described in [10]).
A model of an embedded system $S$ is defined as a couple $S = (T, G)$, where $T$ is a set of tasks\(^1\) that should be executed by the system and $G = (V, E)$ is a directed graph representing its functional requirements. Each functional requirement or its separated part (if any) and each task have unique identifiers denoted by $RId$ and $TId$ correspondingly. Execution time of a task is fixed and data-independent. $V$ is a finite set of nodes. Nodes belonging to $V$ correspond to stable states of the system. Values of state variables determine the state of $S$. A single node denoted by $v_0$ distinguished from $V$ represents initial state of the system. $E$ is a set of edges. Each edge belonging to $E$ represents transition between a given pair of nodes. Edges are labeled with stimuli, responses (if any is generated), requirements and tasks identifiers.
Graph $G$ can be a cyclic or an acyclic one. It depends on the system. Multiple edges are also enabled (in order to represent different causes of transition between the same states).
Safety Injection System (SIS) for nuclear reactor [10] serves as an example for our method. Functional requirements for the system are given in Table 1. Each of the requirements is supplemented with identifiers of tasks which are
\(^1\)Tasks are extracted from a task graph [14,15].
executed to meet the given requirement or its part. On this basis a model of the system is developed (Figure 1). The state variables and their admissible values are: $WP$ ($P$ – permitted water pressure, $TL$ – water pressure below the threshold $LOW$), $Overridden$ ($T$ – if $Block$ has been asserted and $F$ – if $Reset$ has been asserted), $TrefCnt$ (asserts counting of time, may have the values of 0, 1 and 2) and $SJ$ ($Off$ – if the valve is closed and $On$ – if the valve is opened).
Table 1. Functional requirements for SIS
<table>
<thead>
<tr>
<th>Rld</th>
<th>Description</th>
</tr>
</thead>
<tbody>
<tr>
<td>R1</td>
<td>The system shall assert $SafetyInjection$ when $WaterPres$ falls below $LOW$ (opening a valve $T1$).</td>
</tr>
<tr>
<td>R2</td>
<td>(a) A the system shall be blocked (blocking $T3$) in response to $Block$ being asserted while $Reset$ is not asserted and $WaterPres$ is below $LOW$, and shall remain blocked until either (c) $Reset$ is asserted or (b) $WaterPres$ crosses $LOW$ from a larger to smaller value (unblocking $T4$ and setting $TrefCnt$ to zero $T6$).</td>
</tr>
<tr>
<td>R3</td>
<td>Once $SafetyInjection$ is asserted, it shall remain asserted until the system becomes blocked or $WaterPres$ becomes greater than or equal to $LOW$ (closing a valve $T2$).</td>
</tr>
<tr>
<td>R4</td>
<td>When the system is blocked and $WaterPres$ is less than $LOW$, the system shall (a) start counting (increasing $TrefCnt$ $T5$) and (b) automatically unblock ($T4$ and $T6$) itself after the third timing reference event is sensed on input $Tref$.</td>
</tr>
</tbody>
</table>
Fig. 1. Functional requirements graph for SIS
It is typical for reactive systems that they interact continuously with the environment in which they operate. Hence, constraints imposed on the system by the environment (external requirements) must be considered. These constraints include input signals frequency, time separation between signals occurrences on different inputs or inputs and outputs, etc [14].
There may also exist time constraints expressing desired time relation between a system and its environment and between different tasks (some tasks or devices may require specific timing). In order to represent these constraints (internal requirements) minimal and maximal delays may be introduced. They define the amount of time allowed for execution of particular task(s). The minimal time delay determines the first possible moment of the time at which the execution of specified task(s) may be completed, whereas maximal time delay determines the time at which it must be completed. A temporal constraint is violated if the execution of task(s) is completed to early or to late. A unique Constraint Identifier (CId) is associated with each temporal requirement.
Temporal requirements imposed on SIS are given in Table 2 where: @A denotes A as an initial event for execution of tasks, ’ and ” indicate paths associated with different tasks and () and {} denote constraint associated with a particular path and marked subsets of nodes respectively. Requirement described in the second row of Table 2 belong to the requirements associated with a group of paths. The remaining requirements are associated with particular tasks.
Table 2. Temporal requirements for SIS
<table>
<thead>
<tr>
<th>Cid</th>
<th>Tmin</th>
<th>Tmax</th>
<th>Description</th>
<th>Notation</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>0</td>
<td>1</td>
<td>Time required for opening a valve (SJ=On) when water pressure falls below the allowed threshold (@WaterPres < LOW).</td>
<td>(1,2)</td>
</tr>
<tr>
<td>2</td>
<td>0</td>
<td>0,5</td>
<td>Time required for transition to a proper state (WP=P) when water pressure rise above the allowed threshold (@WaterPres >= LOW).</td>
<td>{2,3,4,5}=> {1}</td>
</tr>
<tr>
<td>3</td>
<td>0</td>
<td>2</td>
<td>Time required for manual unblocking the system and to open the valve (@Reset=On, SJ=On) when water pressure is lower than the allowed threshold (WP=TL).</td>
<td>(3,2)', (4,2), (5,2)</td>
</tr>
<tr>
<td>4</td>
<td>0</td>
<td>1,5</td>
<td>Time required for closing a valve (SJ=Off) when Block is asserted (@Block=On) and water pressure is lower than the threshold (WP=TL).</td>
<td>(2,3)</td>
</tr>
<tr>
<td>5</td>
<td>0</td>
<td>3,0</td>
<td>Time required for automatic unblocking and to open a valve (SJ=On) when the system have been blocked and three timing references have been sensed on input Tref.</td>
<td>(3,2)''</td>
</tr>
</tbody>
</table>
3. Verification sequences
A solution applied here is based on the concept of critical paths. A path $S_{ij}$ from node $v_i$ to node $v_j$ in graph $G$ is defined as a sequence of edges $<e_{i,i+1}, e_{i+1,i+2}, ..., e_{j-1,j}>$, where $e_{k,k+1}$ belonging to $E$ denotes an edge between nodes $v_k$, $v_{k+1}$ belonging to $V$. Each path, to which a temporal constraint is associated, is called critical path [16,17].
Generation of verification sequences for all critical paths results in exhaustive verification of all temporal constraints, thus reductions are necessary. In our approach a reduced set of critical paths is selected and then evaluated to check if the paths cover also all functional requirements (paths should include edges labeled with all \( RId \)). The set is then updated with one-edge paths for missing \( RId \) if necessary.
Each critical path determines a subset of tasks, which should be executed in a time given by a temporal constraint. A constraint may be imposed on a path representing given (in specification) subset of tasks. This situation allows existence of multiple paths (between different pairs of nodes), but all of them represent the same subset of tasks. An example of such a constraint is presented in Figure 2. For task \( T1 \) three critical paths (\( <e_{1,2}> \), \( <e_{3,4}> \) and \( <e_{5,4}> \)) are determined.
A constraint may be also imposed on a transition between given states of the system (referred to as source and target nodes respectively). Hence all paths between these nodes are critical ones and may represent different subsets of tasks. Such a situation is shown in Figure 3. Paths \( <e_{2,3},e_{3,4}> \), \( <e_{2,5},e_{5,4}> \) and \( <e_{2,4}> \) are all critical ones.
Design-validation based on exhaustive verification sequences is always valid. On the contrary, design-validation based on reduced verification sequences might lead to optimistic conclusions.
The goal of our work is to generate a reduced but still comprehensive set of test scenarios for a system. To this aim some assumptions are taken. These are the following:
1. each temporal constraint requires at least one verification sequence to be verified, but all tasks associated with any constraint have to be checked,
2. execution time of each of the tasks belonging to $T$ is fixed and it does not depend upon the way the task is started. Such assumption does not hold for general purpose systems but it usually holds for embedded ones. However, it is not true for tasks, whose execution time is data dependent. Then the validation results are only approximated ones, but they can be improved if we assume WCET (Worst Case Execution Time) for maximum delays or/and BCET (Best Case Execution Time) for minimal delays.
On the basis of these assumptions, the number of paths to be generated and verified can be considerably limited. However, for some systems this might be too optimistic. Temporal correctness of execution of tasks is checked rather than of a particular critical path. Nevertheless, there is at least one verification sequence covering each temporal constraint in the generated set.
The selection of critical paths to be generated and combined is based on comparison of subsets of tasks associated with these paths. Let two critical paths $P$ and $P^*$, and two sets of tasks $T$ and $T^*$, associated with $P$ and $P^*$ respectively, be given. Path $P$ covers $P^*$, if $T^*$ is a subset of $T$.
In Figure 4 draft and main procedures of the algorithm of generation of test scenarios are presented.
```python
test_scenarios_generation()
{
for (each constraint C_id) do
determine source and target nodes;
for (each C_id) do
if (constraint C_id imposed on tasks) then
chose random pair of nodes;
generate and save a path;
else
generate and save path(s) ;
combine critical paths;
evaluate the set of paths;
if (not all Rid)
update ST;
save test scenarios;
}
```
Fig. 4. An algorithm of generation of test scenarios
At first source and target nodes for possible (not yet generated) paths are determined. Next, for each constraint paths are selected and generated according to the following rules:
1. If a constraint is imposed on a subset of tasks then verification of any path containing these tasks is sufficient (they cover each other). The choice of the path to be generated is not of primary importance and may be random. For example, path $<e_{1,2}>$ in Figure 2 may be chosen. The remaining paths associated with the constraint are those rejected. Reductions preformed at this step are the most effective, because a number of paths may be significantly limited without their generation.
2. If only source and target nodes are specified, paths are generated and associated with them subsets of tasks are determined and compared (covered paths are rejected). The minimal subset of paths associated with a given constraint consists of paths representing execution of different subsets of tasks. In Figure 3 path $<e_{2,4}>$ representing task $T3$ and $<e_{2,3},e_{3,4}>$ representing tasks $T1$ and $T2$ belong to the minimal set for the constraint. Path $<e_{2,5},e_{5,4}>$ may be dropped as a covered one.
The execution of this step produces a reduced set of critical paths. It is the smallest set that includes critical paths representing all different subsets of tasks.
Two path generation algorithms are used. The first one searches for all possible paths between specified nodes. The second one makes it possible to determine edges belonging to a path if tasks to be executed are specified. Both algorithms use similar techniques. During the generation of critical paths a Paths Tree ($PT$) is built and accepted nodes are added to it. The acceptance functions prevent us from exploring already visited nodes. Combination of the generated paths allows for further reductions. Minimal coverage of generated paths is reached in a similar way as in [10], e.g. a Scenarios Tree ($ST$) is built and paths are added to it. In the next step the set is evaluated to determine whether all functional requirements are covered by paths from this set or not. It relies on checking if all $Rld$ are represented by labels of edges in $ST$. In the case that not all $Rld$ have been found, a procedure similar to that in [10] is started. It explores the state space of $G$ and adds one-edge path labeled with missing $Rld$ to $ST$. The algorithm of test scenarios generation ends after saving stimuli and responses labeling edges of $ST$.
In Table 3 the final result of the application of the algorithm to SIS is given ($^{(Pld)}$ $S_{ij}$ ($^{|Cld|}$ denotes a critical path; Path Identifier ($Pld$) is introduced to distinguish paths generated for $Cld$ time constraint). At the beginning ten critical paths were founded. Next, four of them were rejected during the generation process and another one during combination of the remaining paths. Because these paths did not cover the $R2c$ requirement one extra edge was added to satisfy this requirement. Finally, a set of four test scenarios was produced.
(Table 3). Experimentally calculated verification quality $Q_v$\textsuperscript{2}[17] for verification sequences from this set equals 1. It means that all errors, temporal as well as functional, randomly injected into the model were correctly detected.
<table>
<thead>
<tr>
<th>No</th>
<th>Test scenarios</th>
<th>(Pid)Sst(Cid)</th>
</tr>
</thead>
<tbody>
<tr>
<td>1</td>
<td>WaterPres < LOW/SafetyInjection = On</td>
<td>1S1,2(1), 2S2,1(2)</td>
</tr>
<tr>
<td></td>
<td>WaterPres >= LOW/SafetyInjection = Off</td>
<td></td>
</tr>
<tr>
<td>2</td>
<td>WaterPres < LOW/SafetyInjection = On</td>
<td>1S1,2(1), 1S2,3(4), 3S4,1(2)</td>
</tr>
<tr>
<td></td>
<td>Block = On/SafetyInjection = Off</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Tref/</td>
<td></td>
</tr>
<tr>
<td></td>
<td>WaterPres >= LOW/</td>
<td></td>
</tr>
<tr>
<td>3</td>
<td>WaterPres < LOW/SafetyInjection = On</td>
<td>1S1,2(1), 1S2,3(4), 1S3,2(3)</td>
</tr>
<tr>
<td></td>
<td>Block = On/SafetyInjection = Off</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Reset = On/ SafetyInjection = On</td>
<td></td>
</tr>
<tr>
<td>4</td>
<td>WaterPres < LOW/SafetyInjection = On</td>
<td>1S1,2(1), 1S2,3(4), 1S5,2(5)</td>
</tr>
<tr>
<td></td>
<td>Block = On/SafetyInjection = Off</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Tref/</td>
<td></td>
</tr>
<tr>
<td></td>
<td>Tref /.SafetyInjection = On</td>
<td></td>
</tr>
</tbody>
</table>
The exhaustive set of test scenarios used for experimental evaluation of the reduced one consists of eight scenarios. The total length of all verification sequences belonging to the exhaustive set equals 31 stimuli, whereas the length of verification sequences in the reduced set is equal to only 14 stimuli.
4. Conclusions
Actually an embedded system designer may choose one of the following approaches to verification specification requirements: time budget-based [14], formal [1-4,8] and simulation-based verification [10,11].
Some knowledge about time budgets for execution of tasks can help the designer to keep correctness of the system under control throughout the whole design flow. Though, it does not guarantee that any design error will occur. Moreover, usually calculation really true budgets is not easy.
Formal verification techniques require the system specification requirements to be described in a form of logical expressions (formulas). It is assumed that the PRES+ model [1,2] is generated from an implementation of a system and it reflects exactly time relations in the real system. Such model may represent data and control flow, as well as concurrency. This is an advantage with respect to
---
\textsuperscript{2} Verification quality ($Q_v$) is defined as follows: $Q_v = 1 - C_0/C$, where $C_0$ is the number of optimistic verification conclusions (GO instead of NOGO), and $C$ is the total number of verifications [17].
other approaches. But to start the verification one requires an access to exact execution times for tasks and thus may be conducted very late in the design flow.
Test scenarios generation for simulation-based verification does not require any time information and can be performed very early in the design process. Test scenarios can be reused for validation of the system (or its model) on multiple levels of design description and multiple design alternatives.
In the paper a simulation-based method for validation of embedded systems against specification requirements is presented. Test scenarios obtained with the help of the method can be used for verification both, functional and temporal requirements. The method is easy to use in practice and verification sequences are short. Automating test scenarios generation makes the method fast and flexible.
Our solution is inspired by the method presented in [10] which addresses only the problem of functional validation. We extended this method with the possibility of verification of temporal requirements. Distinguishing of tasks gives us an insight into internal behavior of the system and helps for appropriate selection of paths to be verified.
Although, the method should usually provide good validation results there are some problems to be remembered. Reductions which are performed to get a set of paths and of test scenarios assume rejection of covered paths. In some situations (covered path represents fewer tasks than the covering one) it may lead to undetected violation of a temporal constraint, because the covering paths can compensate for the time. It must be also taken into consideration that if execution time of each task is not constant then the verification sequences are only rough ones.
References
|
{"Source-Url": "http://journals.umcs.pl/ai/article/download/2971/2167", "len_cl100k_base": 4941, "olmocr-version": "0.1.53", "pdf-total-pages": 10, "total-fallback-pages": 0, "total-input-tokens": 25528, "total-output-tokens": 5850, "length": "2e12", "weborganizer": {"__label__adult": 0.0005822181701660156, "__label__art_design": 0.0008702278137207031, "__label__crime_law": 0.0007257461547851562, "__label__education_jobs": 0.0010318756103515625, "__label__entertainment": 0.00013244152069091797, "__label__fashion_beauty": 0.00029349327087402344, "__label__finance_business": 0.0004820823669433594, "__label__food_dining": 0.0005478858947753906, "__label__games": 0.00118255615234375, "__label__hardware": 0.00986480712890625, "__label__health": 0.0009293556213378906, "__label__history": 0.0004210472106933594, "__label__home_hobbies": 0.00023221969604492188, "__label__industrial": 0.0015773773193359375, "__label__literature": 0.0003218650817871094, "__label__politics": 0.0004878044128417969, "__label__religion": 0.0008044242858886719, "__label__science_tech": 0.2939453125, "__label__social_life": 0.00010788440704345704, "__label__software": 0.006946563720703125, "__label__software_dev": 0.67578125, "__label__sports_fitness": 0.0005631446838378906, "__label__transportation": 0.00205230712890625, "__label__travel": 0.00027823448181152344}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 24023, 0.0188]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 24023, 0.58762]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 24023, 0.90385]], "google_gemma-3-12b-it_contains_pii": [[0, 2081, false], [2081, 4942, null], [4942, 6822, null], [6822, 9486, null], [9486, 10811, null], [10811, 13138, null], [13138, 16229, null], [16229, 19650, null], [19650, 22574, null], [22574, 24023, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2081, true], [2081, 4942, null], [4942, 6822, null], [6822, 9486, null], [9486, 10811, null], [10811, 13138, null], [13138, 16229, null], [16229, 19650, null], [19650, 22574, null], [22574, 24023, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 24023, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 24023, null]], "pdf_page_numbers": [[0, 2081, 1], [2081, 4942, 2], [4942, 6822, 3], [6822, 9486, 4], [9486, 10811, 5], [10811, 13138, 6], [13138, 16229, 7], [16229, 19650, 8], [19650, 22574, 9], [22574, 24023, 10]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 24023, 0.2314]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
07c047d94d162fe4d842528473ed71f00cfa5fee
|
The Better Comparison between PHP, Python-web & Node.js
Harkirat Brar *1, TaranPreet Kaur *2, Yash Rajoria *3
*1Teacher, CSE Department, MIMIT, Malout, Punjab, India (Font Size -11)
*2Student, CSE Department, MIMIT, Malout, Punjab, India
*3Student, CSE Department, MIMIT, Malout, Punjab, India
ABSTRACT
Large scale, high concurrency, and a vast quantity of data are important trends for the new age of websites. Node.js becomes popular and flourishing to build data-intensive web applications. To analyze and examine the performance of Node.js, Python-Web, and PHP, we used benchmark tests and scenario tests. The test results yield some valuable enforcement data, showing that PHP and Python-Web manage much fewer requests than that Node.js in a certain time. In conclusion, our results demonstrate that Node.js is quite lightweight and effective, which is an ideal fit for I/O intense websites among the three, while PHP is only fitting for small and middle scale applications, and Python-Web is developer-friendly and good for large web structures. To the best of our experience, to judge these Web programming technologies with both objective methodical tests (benchmark) and realistic user behavior tests (scenario), especially taking Node.js as the main topic to talk about.
Keywords: Development; Performance Evaluation; Node.js; Benchmark Test; Scenario Test
I. INTRODUCTION
The Web today, many sites are faced with new obstacles, such as the problem of multiuser requests and high concurrency. The dynamic scripting language JavaScript has become enormously attractive for clients and is widely used in Web development. Node.js stands for one new technology in JavaScript. Node.js is a stage built on Chrome's JavaScript runtime for simply building fast, scalable network applications [1]. Node.js uses an event-driven, non-blocking I/O model that makes it lightweight and effective, ideal for data-intensive real-time applications that run across distributed devices [1]. Node.js popularity reviews conducted by the official website indicate that the average downloads are over 35,000 since version 0.10 was released in March 2013. Corporations are quickly realizing the importance of Node.js and five major PAAS providers have supported Node.js [2]. Nowadays, JavaScript has been the first popular language in GitHub. [3]. And expressing about the evaluation of Web technologies' performance, many researchers have done the related work. But our work differs from others in these two features. Firstly, we consider from both objective methodical tests (benchmark) and realistic user behavior tests (scenario) two sides and use the most innovative commercial testing tool LoadRunner. Secondly, we mainly concern about the performance of holding concurrent users to meet the demand for IO-intensive real-time websites.
This paper concentrates on the impact on Web performance from three separate Web technologies: Node.js, PHP, and Python-Web. The protection and scalability issues are beyond the range of the paper. We mainly use benchmark tests and scenario tests. In computing, one universal method of Web development technique's evaluation based on the completion comparison is proposed in the paper, which can be used to evaluate any new Web technology. The main supplements of this paper are listed as follows. (1)We consider new web technology Node.js in our experiment and examine the results of it. Then we examine it with PHP and Python-Web, making a judgment of which situation they ought to be used.
(2) Through benchmark tests and scenario tests, we can judge performance from both objective systematic experiments (benchmark) and realistic user behavior tests (scenario). There is often a dual impact on Web server performance, from the estimate, and the number of users. Our research has taken each of these effects into account.
The rest of this paper is organized as follows. Section 2 discusses related work. Section 3 describes the testbed and configurations in our experiment. Section 4 details our methodology and experiential design of tests. Section 5 presents and examines the results of all tests. Section 6 makes a result of the paper with a summary of our study and a future direction.
II. METHODOLOGY
The experiment assessed the results from two respects, one from the server to do benchmark tests, the other from the client to simulate the behavior of users to do scenario tests. In all the tests, we must follow a one-factor-at-a-time experimental study [16] to secure the exactness and effectiveness of the tests.
A. Benchmark Test 1) Benchmark Test Methodology
According to a one-factor-at-a-time experimental design, we make three fundamental tests – "THE TEST", "Use of Sorting", and "Select Operation of DB". "THE TEST" module is a basic module to build a good Web server, then output "hello world" and distinguish the differences of those three technologies. The "Use of Sorting" module is to calculate some values of Sorting and assess the completion under compute-intensive tests. The "Select Operation of DB" module is to compare various performances through querying some value of DB in the IO-intensive situation. Under all benchmark tests, we keep requests 10000, and then we turn users from 10 to 1000. TABLE ĉ summarizes the factors in our experiments.
2) Benchmark Test Configuration
In the process of the test, we found results of the same module are similar. For example, we choose PHP to make three tests under requests 10000 and users 100. With the number of calls from 0 to10000, the results of the three tests are as shown in Fig.2. The acknowledgment time doesn't have much variation in the three tests with the increase of concurrency requests. The average time of the three tests is separate 0.311ms, 0.305ms, and 0.319ms. So, we use one test result to evaluate performance in our experiment. In addition, we reboot a server in every test to make sure fairer in the whole experiment.
Fig. 2. Three tests of PHP under user 100
In starting tests, we found some interrupt when concurrent requests increased to 200 in Python-Web. Thus, we modified the code of Ab and replaced some codes of line 1449 with codes shown in TABLE ĉ.
TABLE II. THE MODIFIED CODE OF AB
<table>
<thead>
<tr>
<th>bad++;</th>
</tr>
</thead>
<tbody>
<tr>
<td>close connection(c); return;</td>
</tr>
</tbody>
</table>
Meanwhile, we changed the Linux Kernel parameters in case that the system was regarded as an SYN flood attack under high concurrency. The parameters modified were shown in TABLE ĉ.
TABLE III. THE MODIFIED CODE OF LINUX KERNEL PARAMETERS
| net.ipv4.conf.default.rp_filter = 1 |
| net.ipv4.conf.all.rp_filter = 1 |
| net.ipv4.tcp_syncookies = 0 |
| net.ipv6.conf.all.disable_ipv6 = 1 |
| net.ipv4.tcp_max_syn_backlog = 819200 |
| net.ipv4.tcp_synack_retries = 1 |
| net.ipv4.tcp_max_tw_buckets = 819200 |
| net.ipv4.tcp_tw_reuse = 1 |
| net.ipv4.tcp_tw_recycle = 1 |
ensure Apache can deal with requests as large as possible. We modified “MaxClients” to 5000, “Server Limit” to 5000, and “MaxRequestsPerChild” to 0 in the PreFork mode.
B. Scenario Test 1) Scenario Test Methodology
The scenario test aims to simulate realistic user behavior. In our analysis, we divide scenario tests into two parts, one is the "Login" situation and the other is the "Encryption" situation. In terms of the "Login" situation, it mainly affects concurrent users to log in at the same time, and then analyzes the performance of three Web technologies in the real IO-intensive situation. In the test, we choose 500 users as rendezvous because it appears some errors when users rise to 500. That also means the pressure is beyond the highest range Web server can stand. We then use correct results when users are 500. In the test, we make statistics of throughput, the average transaction response time, and “hits per second” to compare the performance of those three technologies. Throughput displays the volume of data in bytes the Vusers received from the server at any given second. “Hits per second” displays the number of hits made on the Web server by Vusers during each second of the load test. The "Encryption" situation is to simulate a process to encrypt users’ login passwords when user’s login in. It’s mainly to compare performance in the simple real compute-intensive situation. We choose the same rendezvous as the "Login” scenario.
2) Scenario Test Configuration
To make enough fair in our research, we make the same arrangement as benchmark tests. We reboot the server in every situation test.
III. MODELING AND ANALYSIS
A. Outcomes and Analyses of Benchmark Tests 1) "THE TEST" module
With the increase of users, the execution of the three technologies shows an aim of increasing before contracting when keeping the requests at 10000. As FIG.3 and FIG.4 are shown, the “mean requests per second” is the highest which rises to 3703.5 times per second when the users of Node.js are 100. Meanwhile, the "meantime per request" is the lowest which is 0.27ms. Then the "mean requests per second" slows down and maintains a steady-state around 2700. As to Python Web, the "mean requests per second" keeps stable at around 500 and the highest is 559.42. At this time, the "meantime per request" is the shortest which is 1.788ms. To PHP, it is also at a peak when users are 100. The “mean requests per second” is 2977.54 at that time. The "meantime per request" is the shortest 0.336ms. With users rising, the” mean” requests per second” decreases to 200 and remains stable.


In short, the execution of Node.js is better than two others at the same quantity of users. The performance of Node.js is two times larger than PHP on the basic performance measurement and six to seven times larger than Python-Web. In addition, the current users that Node.js can hold are far more than PHP, let alone Python-Web. So its performance is much better than PHP and Python-Web when there are lots of users. decrease 1.5 times compared with the same condition of "THE TEST" benchmark tests. But to Python-Web, this module has similar results as the last module, even better decisions, at the same condition. PHP is also developed to top value when users' number is up to 100, the "mean requests per second" at 3127.98. There is not much difference between it and the "THE TEST" module.


The results and discussion may be combined into a common section or obtainable separately. They may also be broken into subsets with short, revealing captions. An easy way to comply with the conference paper formatting requirements is to use this document as a template and simply type your text into it. This section should be typed in character size 10pt Times New Roman.
The reduce much in performance, especially PHP which “mean Web technologies all don’t adapt to compute-intensive requests per second” drops from 2000 to 2. Besides PHP, application. However, Node.js performs better among the three Python-Web reduces from 600 to 3. Node.js also reduces in that test. much from 2500 to 60. This phenomenon means those three
| TABLE IV. RESULTS FOR "Use of Sorting (10/20/30)"
<table>
<thead>
<tr>
<th></th>
</tr>
</thead>
<tbody>
<tr>
<td>Web development technology</td>
</tr>
<tr>
<td>-----------------------------</td>
</tr>
<tr>
<td>Node.js</td>
</tr>
<tr>
<td>Python-Web</td>
</tr>
<tr>
<td>PHP</td>
</tr>
</tbody>
</table>
2) "Use of Sorting" module
FIG.5 and FIG.6 show the Web performance to estimate the tenth value of sorting when requests are 10000 and users' number rises on and on. In terms of Node.js, the "mean requests per second" is the highest value reaching up to 2777.72 times per second, and the "meantime per request" is 0.36ms when users are 100. In addition, the "mean requests per second" of Node.js keeps from 2000 to 2800. The top requests per second "Use of Sorting (10)" tests. But we found they all are very alike to the "THE TEST" module in the same condition, so
we choose other values of Sorting to validate the outcomes. We calculate the twentieth and thirtieth values of Sorting when users are 10. TABLE Ċ shows the results of the tests above. The performance of the three Web technologies decreases to a different degree with the increase of Values of Sorting. Considering the “Use of Sorting (30)” test, all the tests According to the results above, we make several tests with Node.js to find performance differences in various concurrent users as the calculation rises. FIG.7 and FIG.8 show the results of “Use of Sorting (10/20/30)” as the users grow from 10 to 1000.

Fig. 7. Results for "Use of Sorting (10/20/30)" means requests per second with Node.js

Fig. 8. Results for "Use of Sorting(10/20/30)" mean time per request with Node.js
Although there is a little variation between the results of various concurrent users at the same calculation, the whole trend keeps stable. The “mean requests per second” of Sorting (10) is between 2000 and 2800. Sorting (20) is between 1300 and 2000. Sorting (30) is around 60. Meanwhile, the test of Sorting (30) is interrupted when users are up to 500. From the above, the increment of calculation brings much more effective than the larger users. We make a simple conclusion that Node.js is more adapted to IO-intensive applications, not the compute-intensive application because compute-intensive applications don’t exploit the good advantages of Node.js.
3) “Select Operation of DB” module
To validate the conclusion to prove Node.js is adapted to IO-intensive applications, we design a "Select Operation of DB" module. FIG.9 and FIG.10 show the results of this module.

Fig. 9. Results for “Select Operation of DB” mean requests per second
The max "means requests per second" of Node.js is 3164.46, 20 times larger than Python-Web and 2 times larger than PHP. In expanding, the peak value of the "Select Operation of DB" module including Node.js doesn't have much difference from the "THE TEST" module. The "mean requests per second" of Python Web keep around 150 and supports stability as users rise. But to PHP, the “mean requests per second” slows down and the "meantime per request" is longer. It explains that Node.js is more suitable for IO-intensive applications among the three, while PHP applies to small-scale websites.
B. Outcomes and Judgments of Situation Tests
To validate the results on benchmark tests, we choose two situations as follows.
1) A Login” situation
We choose peak users at 500 to do tests in the "Login" situation. We mainly observe “hits per second”, throughput and common performance acknowledgment time as users go up. From FIG.11 to FIG.14, the results of the “Login” situation are exhibited. The horizontal axis represents the number of parallel users, the vertical axis representing hits per second, throughput, throughput trend, common action response time.
FIG.11 shows “hits per second” for different concurrent users. “Hits per second” measures the number of HTTP requests sent to the Web server from virtual users per second in performance tests. “Hits per second” is longer and the importance to the Web server is larger. It can be seen from FIG.11 that “hits per second” reduces to a large degree when users are up to 150. It is to say the system can’t hold so many users.
In expanding, throughput in FIG.13 slows down simply when users are 150. Meanwhile, the average transaction acknowledgment time in FIG.14 extends much and it means the load of the system is at a peak. It leads to a higher response time with the development of users, and it appears some users’ timeout. So “hits per second” in FIG.11 reductions.
The throughput of Node.js is about 5,000,000 byte/s in FIG.12. However, the throughput of PHP and Python-Web are both below 1,000,000 byte/s. We can see more detailed data from FIG.13 that the throughput of Node.js is between 4,000,000 and 5,000,000 byte/s. The throughput of PHP stays 500,000 byte/s when the users are less than 150 and it’s similar to Python-Web after that, keeping to 70,000 byte/s. In general, the throughput of Node.js is far more than PHP and Python, more adapted to IO-intensive requests. On the other hand, Node.js is more suited to the concurrent situation, while PHP applies to middle and small-scale websites.
The common transaction reply time of PHP and Node.js are very close within the first 150 users, even the time of PHP is less than Node.js in FIG.14. At the same time, the common transaction response time of PHP and Node.js are both less than 4s. After that, the response time of PHP is much larger than Node.js with users rising. However, Node.js stays steady growth trend because its rate to deal with requests is higher than PHP with the concurrent users are up. Thus Node.js takes a better place when user requests increase.
2) “Encryption” scenario
We simulate a real situation to encrypt the password on basis of the "Login" situation to validate the performance of three technologies in a compute-intensive situation. It also makes experiments when users are up to 500 at max.
FIG.15 shows the results of “hits per second”. It can be seen from the figure that “hits per second” reduces to a large degree when users are up to 50. The decreased time moves up compared to it in the "Login" scenario because the new “encryption” situation is more complex and reduces the performance of PHP. The extension server can undertake is to limit when the users are up to 50. It also can be seen in FIG.17 that the trend of throughput is relative to “hits per second” for PHP.
In FIG.16, the throughput of Node.js is 4,000,000 byte/s, going down 1,000,000 byte/s contrasting with FIG.12. From the trend, the throughput of Node.js is kept between 3,000,000 byte/s and 4,000,000 byte/s. All at once, it's steadily falling as users expand. PHP is similar to it in the "Login" scenario when users are less than 50 and the throughput of PHP stays 500,000 byte/s now. Then it goes down by a large degree less than 1,000 byte/s. Python is alike with its performance of the "Login" situation to keep stable but has a slight decline of around 50,000 byte/s. In short, three technologies aren’t adapted to the compute-intensive application, especially PHP. However, the "Encryption” situation brings PHP the least effective among the three. Node.js is more suitable for IO-intensive applications rather than compute-intensive sites.
The common transaction response time is shown in FIG.18 and the time of PHP shows very unstably with the growing users. On one hand, it’s due to the effect of the “Encryption” situation. On the other hand, it’s the mode of multi-process in PHP. Nevertheless, the response time of PHP and Node.js are in a good slowly rising trend with the increasing users.
The universal method
To sum up, PHP applies to small-scale but non-compute-intensive sites, while Node.js is more proper for the IO-intensive and more users’ websites. Although Python isn’t suitable for compute-intensive and IO-intensive websites, it’s maybe the best choice for developers to build large websites due to lots of famous bands like google use it. Node.js can quickly respond to IO requests with its mechanism. It uses an event model based on asynchronous IO, not like multi-processor, responding quickly.
From the research, we achieve a universal way to compare the performance of various Web development technologies. Firstly, we must define the purpose to compare, that’s what kind of Web technologies we want to match. After that, we use significant tests to do experiments according to the way one-
factor-at-a-time trial design, we can design different conditions in the experiment according to kinds of objectives. Then we compare results in various conditions and modify the experimental ways. At last, we make a judgment what situation those technologies prefer to be used. In addition, we can find the neck of Web development technologies and enhance our method so that we could make the performance better.
IV. CONCLUSION
This paper presents an analytical study of three Web techniques. Too much of our knowledge, this is the to examine and test the performance of different Web development technologies including latest technology Node.js from both outside systematic tests (benchmark) and realistic user behavior tests (situation) two aspects. It can get rid of the deviation of a single kind of test and make the results more referenced and practical.
In short, Node.js performs much better than the old technique PHP in high concurrency situations, no matter in benchmark tests or situation tests. PHP handles small requests well but struggles with large requests. Besides, Node.js prefers to be used in the IO-intensive situation, not compute-intensive sites. Python-Web is also not suitable for compute-intensive websites.
In general, Python-Web has many mature frames to develop large-scale websites, like YouTube and Source Forge. Node.js is an emerging technology and has many benefits in an IO-intensive situation, but it’s a little hard for developers who don’t familiar with asynchronous programming. As to PHP, it’s an old technique and popular to be used in short and middle-scale sites.
In our experiments, we only use the most major tests to compare and evaluate the performance of Web technologies. We just consider the technologies, not including the construction design. So our future work is focused on the construction and tries to improve the performance. The paper mainly concerns the comparison of performance, but security and extensibility also require further investigation. With the
REFERENCES
|
{"Source-Url": "http://www.ijres.org/papers/Volume-9/Issue-7/Series-1/E09072937.pdf", "len_cl100k_base": 4965, "olmocr-version": "0.1.50", "pdf-total-pages": 9, "total-fallback-pages": 0, "total-input-tokens": 22855, "total-output-tokens": 6371, "length": "2e12", "weborganizer": {"__label__adult": 0.0002799034118652344, "__label__art_design": 0.0002663135528564453, "__label__crime_law": 0.0002357959747314453, "__label__education_jobs": 0.0006532669067382812, "__label__entertainment": 6.461143493652344e-05, "__label__fashion_beauty": 9.97781753540039e-05, "__label__finance_business": 0.00023233890533447263, "__label__food_dining": 0.0002655982971191406, "__label__games": 0.0003440380096435547, "__label__hardware": 0.0008397102355957031, "__label__health": 0.0003497600555419922, "__label__history": 0.00014698505401611328, "__label__home_hobbies": 5.65648078918457e-05, "__label__industrial": 0.00023698806762695312, "__label__literature": 0.0001589059829711914, "__label__politics": 0.0001506805419921875, "__label__religion": 0.0002868175506591797, "__label__science_tech": 0.012481689453125, "__label__social_life": 6.377696990966797e-05, "__label__software": 0.007965087890625, "__label__software_dev": 0.97412109375, "__label__sports_fitness": 0.00018584728240966797, "__label__transportation": 0.0002779960632324219, "__label__travel": 0.00014662742614746094}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 24581, 0.03946]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 24581, 0.30799]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 24581, 0.89347]], "google_gemma-3-12b-it_contains_pii": [[0, 4230, false], [4230, 7065, null], [7065, 9630, null], [9630, 12538, null], [12538, 14545, null], [14545, 17638, null], [17638, 19229, null], [19229, 20407, null], [20407, 24581, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4230, true], [4230, 7065, null], [7065, 9630, null], [9630, 12538, null], [12538, 14545, null], [14545, 17638, null], [17638, 19229, null], [19229, 20407, null], [20407, 24581, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 24581, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 24581, null]], "pdf_page_numbers": [[0, 4230, 1], [4230, 7065, 2], [7065, 9630, 3], [9630, 12538, 4], [12538, 14545, 5], [14545, 17638, 6], [17638, 19229, 7], [19229, 20407, 8], [20407, 24581, 9]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 24581, 0.16364]]}
|
olmocr_science_pdfs
|
2024-12-01
|
2024-12-01
|
a5a75c036a497c79bddc7755a6b2da9861b6b7f4
|
Deterministic Parallel DPLL ($DP^2 LL$)
Youssef Hamadi\textsuperscript{1,2}, Said Jabbour\textsuperscript{3}, Cédric Piette\textsuperscript{3}, and Lakhdar Saïs\textsuperscript{3}
\begin{itemize}
\item \textsuperscript{1} Microsoft Research, Cambridge United Kingdom
\item \textsuperscript{2} LIX École Polytechnique, F91128 Palaiseau, France
\item \textsuperscript{3} Université Lille-Nord de France, Artois, CRIL, CNRS UMR 8188, F-62307 Lens
\end{itemize}
youssefh@microsoft.com
{jabbour,piette,sais}@cril.fr
Abstract. Parallel Satisfiability is now recognized as an important research area. The wide deployment of multicore platforms combined with the availability of open and challenging SAT instances are behind this recognition. However, the current parallel SAT solvers suffer from a non-deterministic behavior. This is the consequence of their architectures which rely on weak synchronizing in an attempt to maximize performance. This behavior is a clear downside for practitioners, who are used to both runtime and solution reproducibility. In this paper, we propose the first Deterministic Parallel DPLL engine. It is based on a state-of-the-art parallel portfolio architecture and relies on a controlled synchronizing of the different threads. Our experimental results clearly show that our approach preserves the performance of the parallel portfolio approach while ensuring full reproducibility of the results.
Keywords: SAT solving, Parallelism
1 Introduction
Parallel SAT solving has received a lot of attention in the last three years. This comes from several factors like the wide availability of cheap multicore platforms combined with the relative performance stall of sequential solvers. Unfortunately, the demonstrated superiority of parallel SAT solvers comes at the price of non reproducible results in both runtime and reported solutions. This behavior is the consequence of their architectures which rely on weak synchronizing in an attempt to maximize performance.
Today SAT is the workhorse of important application domains like software and hardware verification, theorem proving, computational biology, and automated planning. These domains need reproducibility, and therefore cannot take advantage of the improved efficiency coming from parallel engines. For instance, a software verification tool based on a parallel SAT engine can report different bugs within different runtimes when executed against the same piece of code; an unacceptable situation for a software verification engineer. More broadly in science and engineering, reproducibility of the
results is fundamental, and in order to benefit to a growing number of application domains parallel solvers have to be made deterministic. More specifically, evaluating or comparing highly non-deterministic SAT solvers is clearly problematic. This problem led the organizers of the recent SAT race and competitions to explicitly mention their preferences for deterministic solvers and to introduce specific rules for the evaluation of parallel SAT solvers. For example, we can find the following statement at the SAT race 2008 "In order to obtain reproducible results, SAT solvers should refrain from using non-deterministic program constructs as far as possible. For many parallel solver implementations it is very hard to achieve reproducible runtimes. Therefore, this requirement does not apply to the parallel track. As runtimes are highly deviating for parallel SAT solvers, each solver was run three times on each instance. An instance was considered solved, if it could be solved in at least one of the three runs of a solver". For the parallel track, different evaluation rules are used in the SAT 2009 competition and SAT race 2010, illustrating the recurring problems of the evaluation of parallel SAT solvers.
In this work, we propose a deterministic parallel SAT solver. Its results are fully reproducible both in terms of reported solution and runtime, and its efficiency is equivalent to a state-of-the-art non-deterministic parallel solver. It defines a controlled environment based on a total ordering of solvers’ interactions through synchronization barriers. To maximize efficiency, information exchange (conflict-clauses) and check for termination are performed on a regular basis. The frequency of these exchanges greatly influences the performance of our solver. The paper explores the trade off between frequent synchronizing which allows the fast integration of foreign conflict-clauses at the cost of more synchronizing steps, and infrequent synchronizing which avoids costly synchronizing at the cost of delayed foreign conflict-clauses integration.
The paper is organized as follows. We detail the main features of modern sequential and parallel SAT solvers in Section 2 and provides motivation of this work in Section 3. Our main contribution is depicted in Section 4. In Section 5, our implementation is fully evaluated. Before the conclusive discussion in Section 7, a comprehensive overview of previous works is given in Section 6.
2 Technical Background
2.1 Modern SAT solvers
The SAT problem consists in finding an assignment to each variable of a propositional formula expressed in conjunctive normal form (CNF). Most of the state-of-the-art SAT solvers are based on a reincarnation of the historical Davis, Putnam, Logemann, and Loveland procedure, commonly called DPLL [1]. A modern sequential DPLL solver performs a backtrack search; selecting at each node of the search tree, a decision variable which is set to a Boolean value. This assignment is followed by an inference step that deduces and propagates some forced unit literal assignments. This is recorded in the implication graph, a central data-structure, which records the partial assignment together with its implications. This branching process is repeated until finding a feasible assignment (model), or reaching a conflict. In the first case, the formula is answered to be satisfiable, and the model is reported, whereas in the second case, a conflict clause
(called asserting clause) is generated by resolution following a bottom-up traversal of the implication graph [2,3]. The learning process stops when a conflict clause containing only one literal from the current decision level is generated. Such a conflict clause (or learnt clause) expresses that such a literal is implied at a previous level. The solver backtracks to the implication level and assigns that literal to true. When an empty conflict clause is generated, the literal is implied at level 0, and the original formula can be reported unsatisfiable.
In addition to this basic scheme, modern solvers use additional components such as an activity based heuristics, and a restart policy. Let us give some details on these last two important features. The activity of each variable encountered during each resolution process is increased. The variable with greatest activity is selected to be assigned next. This corresponds to the so-called VSIDS variable branching heuristic [3]. During branching after a certain amount of conflicts, a cutoff limit is reached and the search is restarted. The evolution of the cutoff value is controlled by a restart policy (e.g. [4]). Thanks to the activity of the variables cumulated during the previous runs, the objective of the restarts is to focus early (top of the tree) on important areas of the search space. This differs from the original purposes behind randomization and restarts introduced in [5]. For an extensive overview of current techniques to solve SAT, the reader is refered to [6].
2.2 Parallel SAT solvers
There are two approaches to parallel SAT solving. The first one implements the historical divide-and-conquer idea, which incrementally divides the search space into subspaces, successively allocated to sequential DPLL workers. These workers cooperate through some load balancing strategy which performs the dynamic transfer of subspaces to idle workers, and through the exchange of interesting learnt clauses [7,8].
The parallel portfolio approach was introduced in 2008. It exploits the complementarity between different sequential DPLL strategies to let them compete and cooperate on the same formula. Since each worker deals with the whole formula, there is no need to introduce load balancing overheads, and cooperation is only achieved through the exchange of learnt clauses. With this approach, the crafting of the strategies is important, especially with a small number of workers. In general, the objective is to cover the space of good search strategies in the best possible way.
3 Motivations
As mentioned above, all the current parallel SAT solvers suffer from a non-deterministic behavior. This is the consequence of their architectures which rely on weak synchronizing in an attempt to maximize performance. Conflict-clauses sharing usually exploited in parallel SAT solvers to improve their performance accentuates even more their non-deterministic behavior i.e., non reproducibility in terms of runtime, reported solutions or unsatisfiability proofs.
To illustrate the variations in terms of solutions and runtimes, we ran several times the same parallel solver ManySAT (version 1.1) [9] on all the satisfiable instances used
in the SAT Race 2010 and report several measures. Each instance was tested 10 times and the time limit for each run was set to 40 minutes. The obtained results are depicted in Table 3. For each instance, we give the number of variables ($nbVars$), the number of successful runs ($nbModels$) and the number of different models ($diff$) obtained by those runs. Two models are different if their hamming distance is not equal to 0. We also report the normalized average hamming distance $n\bar{H} = \frac{\bar{H}}{nbVars}$ where $\bar{H}$ is the average of the pairwise hamming distance between the different models.
<table>
<thead>
<tr>
<th>Instance</th>
<th>nbVars</th>
<th>nbModels (diff)</th>
<th>$n\bar{H}$</th>
<th>avgTime (σ)</th>
</tr>
</thead>
<tbody>
<tr>
<td>12pipe_bug8</td>
<td>117526</td>
<td>10 (1)</td>
<td>0</td>
<td>2.63 (53.32)</td>
</tr>
<tr>
<td>ACG-20-10p1</td>
<td>381708</td>
<td>10 (10)</td>
<td>1.42</td>
<td>1452.24 (40.61)</td>
</tr>
<tr>
<td>AProVE09-20</td>
<td>33054</td>
<td>10 (10)</td>
<td>33.84</td>
<td>19.5 (9.03)</td>
</tr>
<tr>
<td>dated-10-13-s</td>
<td>181082</td>
<td>10 (10)</td>
<td>0.67</td>
<td>6.25 (9.30)</td>
</tr>
<tr>
<td>gss-16-s100</td>
<td>31248</td>
<td>10 (1)</td>
<td>38.77 (18.75)</td>
<td></td>
</tr>
<tr>
<td>gss-19-s100</td>
<td>31435</td>
<td>10 (1)</td>
<td>441.75 (35.78)</td>
<td></td>
</tr>
<tr>
<td>gss-20-s100</td>
<td>31503</td>
<td>10 (1)</td>
<td>681 (58.27)</td>
<td></td>
</tr>
<tr>
<td>itox_ve1138</td>
<td>150680</td>
<td>10 (10)</td>
<td>26.62</td>
<td>0.65 (22.99)</td>
</tr>
<tr>
<td>md5_47_4</td>
<td>65604</td>
<td>10 (10)</td>
<td>34.8</td>
<td>173.9 (31.03)</td>
</tr>
<tr>
<td>md5_48_1</td>
<td>66892</td>
<td>10 (10)</td>
<td>34.76</td>
<td>704.74 (74.65)</td>
</tr>
<tr>
<td>md5_48_3</td>
<td>66892</td>
<td>10 (10)</td>
<td>34.16</td>
<td>489.02 (68.96)</td>
</tr>
<tr>
<td>safe-30-h30-sat</td>
<td>135786</td>
<td>10 (10)</td>
<td>22.32</td>
<td>0.37 (0.79)</td>
</tr>
<tr>
<td>sha0_35_1</td>
<td>48689</td>
<td>10 (10)</td>
<td>33.18</td>
<td>45.4 (21.88)</td>
</tr>
<tr>
<td>sha0_35_2</td>
<td>48689</td>
<td>10 (10)</td>
<td>33.25</td>
<td>61.65 (29.93)</td>
</tr>
<tr>
<td>sha0_35_3</td>
<td>48689</td>
<td>10 (10)</td>
<td>32.76</td>
<td>72.21 (21.93)</td>
</tr>
<tr>
<td>sha0_35_4</td>
<td>48689</td>
<td>10 (10)</td>
<td>33.2</td>
<td>105.8 (35.22)</td>
</tr>
<tr>
<td>sha0_36_5</td>
<td>50073</td>
<td>10 (10)</td>
<td>34.19</td>
<td>488.16 (58.58)</td>
</tr>
<tr>
<td>sortnet-8-ipc5-h19-sat</td>
<td>361125</td>
<td>4 (4)</td>
<td>15.86</td>
<td>2058.39 (47.5)</td>
</tr>
<tr>
<td>total-10-19-s</td>
<td>331631</td>
<td>10 (10)</td>
<td>0.5</td>
<td>5.31 (6.75)</td>
</tr>
<tr>
<td>UCG-20-10p1</td>
<td>259258</td>
<td>10 (10)</td>
<td>2.12</td>
<td>768.17 (31.63)</td>
</tr>
<tr>
<td>vmpc_27</td>
<td>729</td>
<td>10 (2)</td>
<td>2.53</td>
<td>11.95 (32.62)</td>
</tr>
<tr>
<td>vmpc_28</td>
<td>784</td>
<td>10 (2)</td>
<td>3.67</td>
<td>34.61 (25.92)</td>
</tr>
<tr>
<td>vmpc_31</td>
<td>961</td>
<td>8 (1)</td>
<td>0</td>
<td>583.36 (88.65)</td>
</tr>
</tbody>
</table>
Table 1. Non-deterministic behavior of ManySAT
As expected, the solver exhibits a non-deterministic behavior in term of reported models. Indeed, on many instances, one can see that each run leads to a different model. These differences are illustrated in many cases by a large $n\bar{H}$. For example, on the sha0_∗_∗_∗ family, the number of different models is 10 and the $n\bar{H}$ is around 30% i.e., the models differ in the truth value of about 30% of the variables. Finally, the average time ($avgTime$) and the standard deviation ($σ$) illustrate the variability of the parallel solver in term of solving time. The 12pipe_bug8 instance illustrates an extreme case. Indeed, to find the same model ($diff = 1$) the standard deviation of the runtime is about...
Algorithm 1: Deterministic Parallel DPLL
Data: A CNF formula $F$;
Result: true if $F$ is satisfiable; false otherwise
begin
1. $\langle \text{inParallel}, 0 \leq i < \text{nbCores} \rangle$
2. $\text{answer}[i] = \text{search}( ext{core}_i)$;
3. for ($i = 0; i < \text{nbCores}; i++$) do
4. if ($\text{answer}[i] \neq \text{unknown}$) then
5. return $\text{answer}[i]$;
end
53.32 seconds while the average runtime value is 2.63 seconds. This first experiment clearly illustrates to which extent the non-deterministic behavior of parallel SAT solvers can influence the non reproducibility of the results.
4 Deterministic Parallel DPLL
In this section, we present the first deterministic portfolio based parallel SAT solver. As sharing clauses is proven to be important for the efficiency of parallel SAT solving, our goal is to design a deterministic approach while maintaining at the same time clause sharing. To this end, our determinization approach is first based on the introduction of a barrier directive ($\langle \text{barrier} \rangle$) that represents a synchronization point at which a given thread will wait until all the other threads reach the same point. This barrier is introduced to synchronize both clauses sharing between the different computing units and termination detection (Section 4.1). Secondly, to enter the barrier region, a synchronization period for clause sharing is introduced and dynamically adjusted (Section 4.2).
4.1 Static Determinization
Let us now describe our determinization approach of non-deterministic portfolio based parallel SAT solvers. Let us recall that a portfolio based parallel SAT solver runs different incarnations of a DPLL-engine on the same instance. Lines 2 and 3 of the algorithm 1 illustrate this portfolio aspect by running in parallel these different search engines on the available cores. To avoid non determinism in term of a reported solution or an unsatisfiability proof, a global data structure called $\text{answer}$ is used to record the satisfiability answer of these different cores. The different threads or cores are ordered according to their threads ID (from 0 to nbCores-1). Algorithm 1 returns the result obtained by the first core in this ordering who answered the satisfiability of the formula (lines 4-6). This is a necessary but not a sufficient condition for the reproducibility of the results. To achieve a complete determinization of the parallel solver, let us take a closer look to the DPLL search engine associated to each core (Algorithm 2). In addition to the usual description of the main component of DPLL based SAT solvers, we can see that two
Algorithm 2: search(core_i)
Data: A CNF formula $F$
Result: $answer[i] = true$ if $F$ is satisfiable; $false$ if $F$ is unsatisfiable, $unknown$ otherwise
1 begin
2 nbConflicts=0;
3 while (true) do
4 if (!propagate()) then
5 nbConflicts++;
6 if (topLevel) then
7 answer[i] = false;
8 goto barrier1;
9 learntClause=analyze();
10 exportExtraClause(learntClause);
11 backtrack();
12 if (nbConflicts % period == 0) then
13 barrier1; <barrier>
14 if ($\exists j | answer[j]! = unknown$) then
15 return answer[i];
16 updatePeriod();
17 importExtraClauses();
18 <barrier>
19 else
20 if (!decide()) then
21 answer[i] = true;
22 goto barrier1;
23 end
successive synchronization barriers (<barrier>, lines 13 and 18) are added to the algorithm. To understand the role of these synchronizing points, we need to note both their placement inside the algorithm and the content of the region circumscribed by these two barriers. First, the barrier labeled $barrier_1$ (line 13) is placed just before any thread can return a final statement about the satisfiability of the tested CNF. This barrier prevents cores from returning their solution (i.e. model or refutation proof) in an anarchic way, and forces them to wait for each other before stating the satisfiability of the formula (line 14 and 15). This is why the search engine of each core goes to the first barrier when the unsatisfiability is proved (backtrack to the top level of the search tree, lines 6-8), or when a model is found (lines 20-22). At line 14, if the satisfiability of the formula is answered by one of the cores ($\exists j | answer[j]! = unknown$), the algorithm returns its own $answer[i]$. If no thread can return a definitive answer, they all share information by importing conflict clauses generated by the other cores during the last period (line 17). After each one of them has finished to import clauses (second barrier, line 18), they continue to explore the search space looking for a solution. This second synchronization barrier is integrated to prevent each core from leaving the synchronization region before the others. In other words, when a given core enter this
second barrier, it waits for all other cores until all of them have finished importing the foreign clauses. As different clauses ordering will induce different unit propagation ordering and consequently different search trees, the clauses learnt by the other cores are imported (line 17) while following a fixed order of the cores w.r.t. their thread ID.
To complete this detailed description, let us just specify the usual functions of the search engine. First, the \texttt{propagate()} function (line 4) applies classical unit propagation and returns \texttt{false} if a conflict occurs, and \texttt{true} otherwise. In the first case, a clause is learnt by the function \texttt{analyze()} (line 9), such a clause is added to the formula and exported to the other cores (line 10, \texttt{exportExtraClause()} function). These learned clauses are periodically imported in the synchronization region (line 17). In the second case, the \texttt{decide()} function chooses the next decision variable, assigns it and returns \texttt{true}, otherwise it returns \texttt{false} as all the variable are assigned i.e., a model is found.
Note that to maximize the dynamics of information exchange, each core can be synchronized with the other ones after each conflict, importing each learnt clause right after it has been generated. Unfortunately, this solution proves empirically inefficient, since a lot of time is wasted by the thread waiting. To avoid this problem, we propose to only synchronize the threads after some fixed number of conflicts \texttt{period} (line 10). This approach, called \texttt{(DP)\textsuperscript{2}LL\_static(period)}, does not update the period during search (no call to the function \texttt{updatePeriod()}, line 16). However, even if we have the "optimal" value of the parameter \texttt{period}, the problem of thread waiting at the synchronization barrier can not be completely eliminated. Indeed, as the different cores usually present different search behaviors (different search strategies, different size of the learnt databases, etc.), using the same value of the \texttt{period} for all cores, leads inevitably to wasted waiting time at the first barrier.
In the next section, we propose a speed-based dynamic synchronization approach that exploits the current size of the formula to deduce a specific value of the \texttt{period} for each core (call to \texttt{updatePeriod()} function, line 16).
### 4.2 Speed-based Dynamic Synchronization
As mentioned above, the DPLL search engines used by the different cores develop different search trees, hence thread waiting can not be avoided even if the static value is optimally fine tuned. In this section, we propose a speed-based dynamic synchronization of the value of the period. Our goal is to reduce as much as possible the time wasted by the different cores at the synchronization barrier. The time needed by each core to perform the same number of conflicts is difficult to estimate in advance; however we propose an interesting approximation measure that exploits the current state of the search engine. As decisions and unit propagations are two fundamental operations that dominate the SAT solver run time, estimating their cost might lead us to a better approximation of the progression speed of each solver. More precisely, as the different cores are run on the same formula, to measure the speedup of each core, we exploit the size of its current learnt set of clauses as an estimation of the cost of these basic operations. Consequently, our speed-based dynamic synchronization of the period is a function of this important information.
Let us formally describe our approach. In our dynamic synchronization strategy, for each core or computing unit \( u_i \), we consider a synchronization-time sequence as a
set of steps $t_i^k$ with $t_i^0 = 0$ and $t_i^k = t_i^{k-1} + period_i^k$ where $period_i^k$ represents the time window defined in term of number of conflicts between $t_i^{k-1}$ and $t_i^k$. Obviously, this synchronization-time sequence is different for all the computing units $u_i$ ($0 \leq i < nbCores$). Let us define $\Delta_i^k$ as the set of clauses currently in the learnt database of $u_i$ at step $t_i^k$. In the sequel, when there is no ambiguity, we sometimes note $t_i^k$ simply $k$.
Let us now formally describe the dynamic computation of these synchronization-time sequences. Let $m = \max_{u_i}(|\Delta_i^k|)$, where $0 \leq i < nbCores$, be the size of the largest learnt clauses database and $S_i^k = \frac{1}{\Delta_i^k}$ the ratio between the size of the learnt clauses database of $u_i$ and $m$. This ratio $S_i^k$ represents the speedup of $u_i$. When this ratio tends to one, the progression of the core $u_i$ is closer to the slowest core, while when it tends to 0, the core $u_i$ progresses more quickly than the slowest one. For $k = 0$ and for each $u_i$, we set $period_i^0$ to $\alpha$, where $\alpha$ is a natural number. Then, at a given time step $k > 0$, and for each $u_i$, the next value of the period is computed as follows: $period_i^{k+1} = \alpha + (1 - S_i^k) \times \alpha$, where $0 \leq i < nbCores$. Intuitively, the core with the highest speedup $S_i^k$ (tending to 1) should have the lowest period. On the contrary, the core with the lowest speedup $S_i^k$ (tending to 0) should have the highest value of the period.
5 Evaluation
<table>
<thead>
<tr>
<th>period</th>
<th>solving time</th>
<th>waiting time</th>
<th>waiting/solving time (in %)</th>
</tr>
</thead>
<tbody>
<tr>
<td>static(1)</td>
<td>10,276</td>
<td>4,208</td>
<td>40.9</td>
</tr>
<tr>
<td>static(100)</td>
<td>9,715</td>
<td>2,559</td>
<td>26.3</td>
</tr>
<tr>
<td>static(10000)</td>
<td>9,124</td>
<td>1,605</td>
<td>17.6</td>
</tr>
</tbody>
</table>
Table 2. Waiting time w.r.t. period synchronizing
All the experimentations have been conducted on Intel Xeon 3GHz under Linux CentOS 4.1. (kernel 2.6.9) with a RAM limit of 2GB. Our deterministic DPLL algorithm has been implemented on top of the portfolio-based parallel solver ManySAT (version 1.1). The timeout was set to 900 seconds for each instance, and if no answer was delivered within this amount of time, the instance was said unsolved. We used the 100 instances proposed during the recent SAT Race 2010, and we report for each experiment the number of solved instances (x-axis) together with the total needed time (y-axis) to solve them. Each parallelized solver is running using 4 threads. Note that in the following experiments, we consider the real time used by the solvers, instead of the classic CPU time. Indeed, in most architectures, the CPU time is not increased when the threads are asleep (e.g. waiting time at the barriers), so taking the CPU time into account would give an illegitimate substantial advantage to our deterministic solvers.
5.1 Static Period
In a first experiment, we have evaluated the performance of our Deterministic Parallel DPLL ((DP)\(^2\)LL) with various static periods. Figure 1 presents the obtained results. First, a sequential version of the solver has been used. Unsurprisingly, this version obtains the worst global results by only solving 68 instances in more than 11,000 seconds. This result enables to show the improvement obtained by the use of parallelized engines. We also report the results obtained by the non-deterministic solver ManySAT 1.1. Note that as shown in Section 3, executing several times this version may lead to different results. This non-deterministic solver has been able to solve 75 instances within 8,850 seconds. Next, we ran a deterministic version of ManySAT where each core synchronizes with the other ones after each clause generation ((DP)2LL\_static(1)). We can observe that the synchronization barrier is computationally heavy. Indeed, the deterministic version is clearly less efficient than the non-deterministic one, by only solving 72 instances out of 100 in more than 10,000 seconds.
This negative result is mainly due to the time wasted by the cores waiting for each others on a (very) regular basis. To overcome this issue, we also tried to synchronize the different threads only after a given number of conflicts (100, 10000). Figure 1 shows that those versions outperform the "period=1" one, but they stay far from the results obtained by the non-deterministic version.
The barriers that enable our algorithm to be deterministic are clearly a drawback to its efficiency. Therefore, we evaluated the amount of time spent by the threads waiting for each others. In Table 2, we report the rate of time spent waiting for synchronization with respect to the total solving time for the different versions of (DP)2LL. As the results show, this waiting time can be really significant when waiting after each conflict.
Indeed, in this case, more than 40% of the time is spent by the threads waiting for each other. As expected, this rate is reduced when the synchronization is achieved less often, but is never insignificant. Note that even if the "period=10000" version wastes less time than "period=100" waiting, it obtains worst global results by only solving 72 instances. The "period=100" version obtains the best results (discarding the non-deterministic version). Based on extensive experiments, this parameter seems to be a good tradeoff between the time spent at the barrier, and the dynamics of information exchange. Indeed, we believe that information is exchanged too late in the "period=10000" version, which explains its less satisfying behavior.
Accordingly, we tried to reduce the waiting time of the threads by adopting a Speed-based dynamic strategy (presented Section 4.2). Empirical results about this dynamic technique are presented in the next section.
5.2 Dynamic Period
In a second experiment, we tried to empirically evaluate our dynamic strategy. We compare the results of this version with the ones obtained by ManySAT 1.1, and with the results of the best static version (100), and of the sequential one too. The results are reported in Figure 2. The dynamic version is run with parameter $\alpha = 300$.

This experiment empirically confirms the intuition that each core should have a different period value, w.r.t. the size of its own learnt clauses database, which heuristically indicates its unit propagation speed. Indeed, we can observe in Figure 2 that the "solving curve" of this dynamic version is really close to the one of ManySat 1.1. This
means that the 2 solvers are able to solve about the same amount of instances within about the same time. Moreover, this adaptive version is able to solve 2 more instances than the non-deterministic one, which makes it the most efficient version tested during our experiments.
6 Previous Works
In the last two years, portfolio based parallel solvers became prominent, and we are not aware of a recently developed divide-and-conquer approach (the latest being [8]). We describe here all the parallel solvers qualified during the 2010 SAT Race\(^4\). We believe that these parallel portfolio approaches represent the current state-of-the-art in parallel SAT.
In plingeling,[10] the original SAT instance is duplicated by a boss thread and allocated to worker threads. The strategies used by these workers are mainly differentiated by the amount of pre-processing, random seeds, and variables branching. Conflict clause sharing is restricted to units which are exchanged through the boss thread. This solver won the parallel track of the 2010 SAT Race.
ManySAT [9] was the first parallel SAT portfolio. It duplicates the instance of the SAT problem to solve, and runs independent SAT solvers differentiated on their restart policies, branching heuristics, random seeds, conflict clause learning, etc. It exchanges clauses through various policies. Two versions of this solver were presented at the 2010 SAT Race, they finished respectively second and third.
In SArTagnan, [11] different SAT algorithms are allocated to different threads, and differentiated with respect to, restarts policies, and VSIDS heuristics. Some threads apply a dynamic resolution process [12], or exploit reference points [13]. Some others try to simplify a shared clauses database by performing dynamic variable elimination or replacement. This solver finished fourth.
In Antom, [14] the SAT algorithms are differentiated on decision heuristic, restart strategy, conflict clause detection, lazy hyper binary resolution [12], and dynamic unit propagation lookahead. Conflict clause sharing is implemented. This solver finished fifth.
7 Discussion
In this paper, we tackle the important issue of non determinism in parallel SAT solvers by proposing \((DP)^2\)LL, a first implementation of a deterministic parallelized procedure for SAT. To this purpose, a simple but efficient idea is presented; it mainly consists in introducing two synchronization barriers to the algorithm. We propose different synchronizing strategies, and show that this deterministic version proves empirically very efficient; indeed, it can compete against a recent non-deterministic algorithm.
This first implementation opens many interesting research perspectives. First, the barrier added to the main loop of the parallelized CDCL for making the algorithm deterministic can be seen as a drawback of the procedure. Indeed, every thread that has
\(^4\)http://baldur.iti.uka.de/sat-race-2010
terminated its partial computation has to wait for the all other threads to finish. Nev-
ertheless, our synchronization policy proves effective while keeping the heart of the
parallelized architecture: clause exchanges. Moreover, we think that it is possible to
take even better advantage of this synchronization step. New ways for the threads of
extection to interact can be imagined at this particular point; we think that this is a
promising path for future research.
References
1. Davis, M., Logemann, G., Loveland, D.W.: A machine program for theorem-proving. Com-
munications of the ACM 5(7) (1962) 394–397
|
{"Source-Url": "https://www.microsoft.com/en-us/research/wp-content/uploads/2011/04/det.pdf", "len_cl100k_base": 7711, "olmocr-version": "0.1.50", "pdf-total-pages": 12, "total-fallback-pages": 0, "total-input-tokens": 31381, "total-output-tokens": 8797, "length": "2e12", "weborganizer": {"__label__adult": 0.0004229545593261719, "__label__art_design": 0.0003962516784667969, "__label__crime_law": 0.0006928443908691406, "__label__education_jobs": 0.0021305084228515625, "__label__entertainment": 0.00016796588897705078, "__label__fashion_beauty": 0.00025010108947753906, "__label__finance_business": 0.0005731582641601562, "__label__food_dining": 0.00047969818115234375, "__label__games": 0.0017843246459960938, "__label__hardware": 0.0015277862548828125, "__label__health": 0.0007233619689941406, "__label__history": 0.000652313232421875, "__label__home_hobbies": 0.00016176700592041016, "__label__industrial": 0.0010232925415039062, "__label__literature": 0.00043487548828125, "__label__politics": 0.0005879402160644531, "__label__religion": 0.0007309913635253906, "__label__science_tech": 0.435302734375, "__label__social_life": 0.00017940998077392578, "__label__software": 0.01424407958984375, "__label__software_dev": 0.53564453125, "__label__sports_fitness": 0.0006384849548339844, "__label__transportation": 0.001163482666015625, "__label__travel": 0.0002853870391845703}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 33048, 0.08417]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 33048, 0.42698]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 33048, 0.89581]], "google_gemma-3-12b-it_contains_pii": [[0, 2592, false], [2592, 6055, null], [6055, 9273, null], [9273, 12558, null], [12558, 15200, null], [15200, 17461, null], [17461, 21270, null], [21270, 24286, null], [24286, 26232, null], [26232, 27957, null], [27957, 30904, null], [30904, 33048, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2592, true], [2592, 6055, null], [6055, 9273, null], [9273, 12558, null], [12558, 15200, null], [15200, 17461, null], [17461, 21270, null], [21270, 24286, null], [24286, 26232, null], [26232, 27957, null], [27957, 30904, null], [30904, 33048, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 33048, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 33048, null]], "pdf_page_numbers": [[0, 2592, 1], [2592, 6055, 2], [6055, 9273, 3], [9273, 12558, 4], [12558, 15200, 5], [15200, 17461, 6], [17461, 21270, 7], [21270, 24286, 8], [24286, 26232, 9], [26232, 27957, 10], [27957, 30904, 11], [30904, 33048, 12]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 33048, 0.18987]]}
|
olmocr_science_pdfs
|
2024-11-30
|
2024-11-30
|
3295b35e9368ef7198e9f5c742c956609399b470
|
[REMOVED]
|
{"len_cl100k_base": 6938, "olmocr-version": "0.1.50", "pdf-total-pages": 17, "total-fallback-pages": 0, "total-input-tokens": 38333, "total-output-tokens": 9607, "length": "2e12", "weborganizer": {"__label__adult": 0.0006136894226074219, "__label__art_design": 0.0006132125854492188, "__label__crime_law": 0.00304412841796875, "__label__education_jobs": 0.0005812644958496094, "__label__entertainment": 8.171796798706055e-05, "__label__fashion_beauty": 0.00021529197692871096, "__label__finance_business": 0.0006542205810546875, "__label__food_dining": 0.0003819465637207031, "__label__games": 0.0005369186401367188, "__label__hardware": 0.0008797645568847656, "__label__health": 0.0008997917175292969, "__label__history": 0.0003643035888671875, "__label__home_hobbies": 0.0001170039176940918, "__label__industrial": 0.0004992485046386719, "__label__literature": 0.00037217140197753906, "__label__politics": 0.0008473396301269531, "__label__religion": 0.0004041194915771485, "__label__science_tech": 0.06591796875, "__label__social_life": 0.00015544891357421875, "__label__software": 0.0293121337890625, "__label__software_dev": 0.892578125, "__label__sports_fitness": 0.0002282857894897461, "__label__transportation": 0.0005183219909667969, "__label__travel": 0.0002053976058959961}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 42646, 0.03244]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 42646, 0.26136]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 42646, 0.86518]], "google_gemma-3-12b-it_contains_pii": [[0, 2331, false], [2331, 5258, null], [5258, 8545, null], [8545, 11595, null], [11595, 14537, null], [14537, 17238, null], [17238, 20211, null], [20211, 22267, null], [22267, 23664, null], [23664, 25986, null], [25986, 29082, null], [29082, 31280, null], [31280, 34534, null], [34534, 37840, null], [37840, 41375, null], [41375, 42544, null], [42544, 42646, null]], "google_gemma-3-12b-it_is_public_document": [[0, 2331, true], [2331, 5258, null], [5258, 8545, null], [8545, 11595, null], [11595, 14537, null], [14537, 17238, null], [17238, 20211, null], [20211, 22267, null], [22267, 23664, null], [23664, 25986, null], [25986, 29082, null], [29082, 31280, null], [31280, 34534, null], [34534, 37840, null], [37840, 41375, null], [41375, 42544, null], [42544, 42646, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 42646, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 42646, null]], "pdf_page_numbers": [[0, 2331, 1], [2331, 5258, 2], [5258, 8545, 3], [8545, 11595, 4], [11595, 14537, 5], [14537, 17238, 6], [17238, 20211, 7], [20211, 22267, 8], [22267, 23664, 9], [23664, 25986, 10], [25986, 29082, 11], [29082, 31280, 12], [31280, 34534, 13], [34534, 37840, 14], [37840, 41375, 15], [41375, 42544, 16], [42544, 42646, 17]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 42646, 0.0]]}
|
olmocr_science_pdfs
|
2024-12-02
|
2024-12-02
|
52c7db4ac0300eb82ed64adaad10fac84c99b9e6
|
Implementation of Invariant Code Motion Optimization in an Embedded Processor Oriented Compiler Infrastructure
Ivan Považan, Marko Krnjetin, Miodrag Dukić, Miroslav Popović
Abstract—In order to achieve better performance results for the generated code various compiler optimizations are performed during the compilation process. Even though it is expected that optimization would improve the code quality, for embedded processor with small amount of resources applying certain optimizations can sometimes lead to generation of larger number of instructions (i.e. larger code size and slower execution). One such optimization is a loop optimization — invariant code motion. In this paper we present improvements to the implementation of this optimization in an embedded processor oriented compiler infrastructure which give good results in cases of high register pressure. The enclosed test results for translated code with and without proposed improvements show that our new approach generates code with better performance in all cases.
Index Terms—Compiler optimization, invariant code motion, high register pressure
I. INTRODUCTION
Having a high quality compiler for certain processor is not an uncommon practice. It is actually quite the opposite and nowadays there is a high demand for fast, robust and reliable compilers that can generate code with high quality. Quality of the code most often refers to code size (number of instructions in memory) or code speed (number of cycles needed for the code execution).
In the embedded systems domain both characteristics are highly desirable and an ideally generated code has the performance near to the hand-written assembly code. In other words, the generated code is small and fast. However, designing and developing such compilers is not a trivial job to do. Especially if the quality of the source code is taken into account. Compiler has to understand and recognize certain constructs from the source code and to perform the best compilation strategy in order to generate the best code, despite the way those are written in the starting language. There are two approaches for dealing with this problem:
• Source code adaptations
• Compiler optimizations
First approach relates to adapting the source code for certain compiler according to compiler specific coding guidelines which can help compiler in code generation.
On the other hand, more ideally, compiler should automatically perform the same and should not depend on the source code. Therefore compilers implement various optimizations which are performed automatically on the source code, or are available through compiler switches to the user, in order to achieve the best performance. Compiler optimizations are not independent and applying one can affect the other which can result in better or worse code. Choosing the best combination of optimizations which would generate the best code, is a complex problem. Furthermore, compilation passes like instruction selection, resource allocation and instruction scheduling also need to be interleaved and interconnected for embedded processors to achieve better results, which additionally complicates choosing the best compilation strategy [1]. Clearly for DSP applications the most critical part of the source code, which need to be condensed down as much as possible, are loops. Optimizations like loop unrolling, software pipelining, loop tiling, invariant code motion, induction variable detection, hardware loop detection and similar, generally improve the quality of the generated code by significant percentage. However, the question is: is this always true?
Most of mentioned loop optimizations are machine independent optimizations and are performed on high level of intermediate representation of the translated code. This means that some target architecture specifics like: available resources or available machine instructions are not considered at that point in the compilation process, and applying such optimization can have undesirable effects. Moving the code outside of the loop usually increases the number of interferences between operands and can cause spills which increases the overall number of instructions. In this paper we present improvements for the implementation of invariant code motion optimization which solve these problems.
The paper is organized in six sections. This section gives an introduction to the problem domain. Next section includes description of the used compiler framework, while the third describes initial algorithm for invariant code motion and introduced improvements. Following section encloses test results and discussion about improvements acquired with modifications. Fifth section includes related work and finally...
the last section gives a conclusion.
II. RTCC
Implementation of invariant code motion optimization, described in this paper is a part of embedded processor oriented compiler infrastructure called RTCC [2]. It is a class library which includes compiler modules for creating backends for embedded processor compilers. The framework defines sets of modules for:
- analyses – control flow, data dependency, call graph, liveness, alias, single static assignment;
- optimizations – common sub-expression elimination, dead code elimination, constant propagation, hardware loop detection, invariant code motion, induction variable detection, copy propagation
- compilation passes – instruction selection, resource allocation, instruction scheduling, code generation
- modelling target architecture – resources, instructions, hazards, latencies, parallelization rules
- IR (intermediate representation) – translation from FE IR (front-end intermediate representation), definition of BE IR (back-end intermediate representation)
IR is defined in form of graph of connected basic blocks that include list of operations. HLIR - high level intermediate representation includes high level operations which are independent of the target architecture, while LLIR - low level intermediate representation or low level operations include target specific instructions. Creating a custom compiler requires using an existing front-end and modelling the target processor with RTCC modules, where some of them can be used directly while others can be derived and adjusted according to target architecture specifics.
Source code translation is performed in the compiler’s FE until IR is reached. After that FE IR is transformed into RTCC HLIR and compilation resumed in the BE of the compiler. Control flow, data dependency, call graphs, and other analyses are then performed on the HLIR in order to provide additional information used by other compilation passes and high level code optimizations. Afterwards, HLIR is lowered to LLIR with instruction selection module by applying rewriting rules which translate abstract representation into target specific instructions. When LLIR is reached, low level optimizations, instruction scheduling and resource allocation are performed. Finally, the assembly code is generated.
One of the available optimization modules, within RTCC library, is invariant code motion module described in this paper.
III. INVENTARY CODE MOTION
A. HLICM – High level invariant code motion
Invariant code motion is a loop optimization technique which detects computation within a loop which result does not change over loop iterations, and moves that computation from the loop body in order to improve performance of the generated code. This motion is also called hoisting. The technique is most often used on HLIR in order to hoist invariant expressions, but can be also used during low level passes. Initially, RTCC included HLICM described in paper [3] which was implemented to work only as high level optimization.
In order to describe the HLICM algorithm we first need to define how to detect invariant computation; then where we can move the code; and finally how to determine if code is movable. These three procedures are defined according to [4] as follows:
**Definition 1:** The definition \(d: t \leftarrow a_1 \text{ op } a_2\) is loop-invariant within loop \(L\) if, for each source operand \(a_i\):
1. \(a_i\) is a constant,
2. or all the definitions of \(a_i\) that reach \(d\) are outside the loop
3. or only one definition of \(a_i\) reaches \(d\), and that definition is loop-invariant
4. \(op\) some operation
**Definition 2:** The preheader basic block \(p\) of the loop \(L\) is immediate dominator of head basic block for the loop \(L\)
**Definition 3:** The definition \(d: t \leftarrow a_1 \text{ op } a_2\) can be hoisted to the loop preheader \(p\) if:
1. \(d\) dominates all loop exists at which \(t\) is live-out
2. and there is only one definition of \(t\) in the loop
3. and \(t\) is not live-out of the loop preheader \(p\)
Fig. 1 shows simple example with invariant code which can be detected on HLIR. The output generated after HLICM module is displayed in Fig. 2 which shows that the algorithm can successfully detect such scenarios.


However, there are scenarios where HLICM does not work as expected. Fig. 3 shows slightly modified example which is not covered by HLICM and will not be optimized as it can be. The reason for this is that global variables are not covered in the initial algorithm, and if they are volatile or changed over their address, the optimization cannot detect if they are...
invariant, due to the lack of information. In this particular example $t = \text{mem}[a]$ is invariant, because $a$ is changed after the loop.
\begin{verbatim}
a = 0x101 // global scope
- L1:
ptr = &a
i = 0
L2:
if (i > 10) jmp L3
t = \text{mem}[a]
array[i] = t \& b
i++
jmp L2
L3:
*ptr = 10
\end{verbatim}
Fig. 3. ICM example with a global variable
Furthermore, Fig. 4 shows slightly different situation where high level comparison ($i < 200$) cannot be hoisted due to dependency of one of the source operands.
\begin{verbatim}
L1:
i = 0
L2:
if (i > 300) jmp L3
a = 100
if (i < 200)
array[i] = a \& b
i++
jmp L2
L3:
\end{verbatim}
Fig. 4. High level representation of ICM example with a comparison
\begin{verbatim}
L1:
load 0, i
load 300, t0 // upper bound
L2:
cmp i, t0
// gt - greater than
jmp gt L3
load 100, a
load 199, t1
cmp i, t1
and a, b, t2
// le - less or equal than
storec le t2, \text{mem}[array+i]
add, i, 1, i
jmp L2
L3:
\end{verbatim}
Fig. 5. Low level representation of ICM example with a comparison
If we observe Fig. 5 we can see low level representation of the example from Fig. 4. Focusing on comparison and its low level representation we can see that there are additional opportunities for increasing code efficiency:
\begin{verbatim}
HLIR: if (i < 200)
LLIR: load 199, t1 // invariant
cmp i, t1
\end{verbatim}
Low level load becomes another potential operation for invariant code motion. Following subsection proposes improvements for these cases.
B. LLICM – Low level invariant code motion
Fig. 3 shows an example where global variable is used in invariant computation. In order to detect invariant code in such examples initial approach needs to be extended with information from alias analysis module which determines if a variables location has been accessed, and whether its value has been changed over its address or not. According to this we are adding an extension to the Definition 1 as follows:
5. if there is $a_n$ ($n \in i$) that is a static storage duration variable - $a_n$ must not be not volatile, and its value is not changed over address before usage
By implementing this extension the LLICM can detect and safely hoist loading of $a$ variable outside of the loop in the given example, since its value is changed over the address after the loop. However if the change *ptr = 10 was placed inside of the loop body, LLICM would not hoist the $t = \text{mem}[a]$ since the alias analysis would provide information about the change for variable $a$.
To answer the problem of not detecting hoistable code from Fig. 4 we simply need to perform LLICM algorithm on low level representation as well, just before instruction scheduling and resource allocation, in order to enable hoisting low level instructions that are invariant.
Clearly with both proposed improvements LLICM can further decrease the size of the loop body.
C. RPICM – Register pressure invariant code motion
Hoisting of invariant computations is not harmful if the resulting operands are not used within a loop. If that is not the case, number of live operands will grow for each motion which can lead to increased pressure on registers. When the optimization is applied on HLIR there is no information about used resources since the allocation is not yet performed. However, with our modifications introduced for LLICM we have gained the opportunity to control when to perform the hoisting.
In order to do this we introduce RPCM algorithm as a modified version of LLICM which clones the LLIR before
hoisting, performs the code motion and stores indication about hosted operands. Afterwards, instruction scheduling and resource allocation are performed. If a spill occurs during resource allocation, the module checks if the spilled operand is in interference with hoisted operand. If that is the case the current LLIR is replaced with LLICM clone and the process is repeated without hoisting the offending operand. In this way we will not hoist operands that would introduce spills otherwise. Fig. 6 shows the algorithm.
IV. RESULTS
Described improvements are added to existing port of RTCC compiler for a custom DSP class processor. Some of the important features of this DSP are: Harvard, load/store architecture, VLIW, two memory banks, two arithmetic units which allow instruction level parallelism, address generation unit, with limited amount of data registers: 4 general purpose, 3 temporary and 4 accumulators which makes it a good candidate for testing high pressure on registers.
In order to test the performance with and without applying the modifications to invariant code motion optimization for testing and verification we have used following test and measured number of cycles required for the code execution:
- Execute group of tests that include loops from DejaGnu test suite – 293 tests
- DSP benchmark projects which include:
- FFT algorithm
- CDMA algorithm
- Bi-quad filter
- FIR filter
<table>
<thead>
<tr>
<th>DejaGnu - 293 tests</th>
<th>HLICM</th>
<th>LLICM</th>
<th>RPICM</th>
</tr>
</thead>
<tbody>
<tr>
<td>No effect</td>
<td>249</td>
<td>181</td>
<td>189</td>
</tr>
<tr>
<td>Worse code</td>
<td>16</td>
<td>29</td>
<td>16</td>
</tr>
<tr>
<td>Improvement</td>
<td>28</td>
<td>83</td>
<td>88</td>
</tr>
</tbody>
</table>
**TABLE I**
Comparison results in number of tests that affected the generated code positively or negatively for DejaGnu test suite.
<table>
<thead>
<tr>
<th>DejaGnu - 293 tests</th>
<th>HLICM vs LLICM</th>
<th>HLICM vs RPICM</th>
</tr>
</thead>
<tbody>
<tr>
<td>Worse code</td>
<td>4.44%</td>
<td>0.00%</td>
</tr>
<tr>
<td>Improvement</td>
<td>18.77%</td>
<td>20.48%</td>
</tr>
</tbody>
</table>
**TABLE II**
Comparison results of overall efficiency reduction and improvement between proposed techniques for DejaGnu test suite.
<table>
<thead>
<tr>
<th>DSP applications</th>
<th>HLICM</th>
<th>LLICM</th>
<th>RPICM</th>
</tr>
</thead>
<tbody>
<tr>
<td>FFT</td>
<td>56587</td>
<td>55989</td>
<td>55057</td>
</tr>
</tbody>
</table>
**TABLE IV**
Comparison results of overall improvement between proposed techniques for tested DSP applications (% means faster code)
Table I shows comparison results between initial implementation of ICM – HLICM, and two new proposed techniques LLICM and RPICM in number of DejaGnu tests that:
- did not have any effect after ICM was applied
- generated slower code after ICM was applied
- generated faster code after ICM was applied
We can see that the best performance is gained with the RPICM and the worse result with the initial implementation of ICM. However, Table I also shows that LLICM introduces worse performance for 13 test cases. This happens because applying ICM on low level instructions increases the opportunity for detecting and hoisting invariant code. Table II emphasizes these results furthermore, by giving an overall comparison of reduction and improvement of performance compared to the total amount of tests between proposed approaches. It can be concluded again that RPICM gives the best results and also fixes the issues introduced with low level variant of the optimization. On the other hand, Tables III and IV include execution cycle count comparison results for DSP application tests, between ICM techniques and show overall gain in percent. By analyzing the displayed results it can be concluded that new techniques generate overall better results for tested DSP applications.
Tables also show that even though RPICM is the best technique overall, it still does not solve the problem of spilling operands hoisted during high level invariant code motion.
V. RELATED WORK
It is known that expression or instruction hoisting methods can de-optimize the translated code. This especially comes to the fore on target architectures with limited amount of resources and optimizations like common sub-expression elimination or invariant code motion. Such code movements extend the liveness of operands and increase the number of interferences between them causing a potential spill to happen – RP (register pressure). There have been many attempts to deal with this problem.
Some of them target the solution of the problem on high level passes which can cause high register pressure. Ma and Carr propose predictive algorithm for register pressure on a
loop in [5]. The algorithm works on a high level before unroll-and-jam loop optimization is applied and manages to get improvement in performance by reducing the number of potential spilling.
On the other hand changes of the low level passes can also improve performance. Proposed techniques like live range splitting and rematerialization presented in [6] and [7] are resource allocation techniques that deal better with high register pressure. Similar is noticeable with proposed register pressure-aware instruction scheduling proposed in [8]. Likewise, our approach includes modifications to the low level compilation, but does not include changes to resource allocation nor to instruction scheduling algorithms. By providing feedback information about hoisted operands, spill candidates and their interferences, and by allowing invariant code motion, instruction scheduling and resource allocation to be invoked multiple times we can achieve better results in high register pressure scenarios which is shown in enclosed results.
VI. CONCLUSION
Results demonstrate that improvements introduced to invariant code motion optimization give overall much better performance results than the initial implementation. Also results for the proposed technique for controlling when to perform the optimization show that in cases of high pressure on registers compiler can successfully detect solution which gives the best performance in all test cases.
Due to the fact that there are cases when the optimization introduces overhead in the generated code, described implementation can still be improved. This is true for cases when invariant code motion happens on HLIR and therefore one of the future improvements would be tracking the hoisted operands on HLIR and applying similar algorithm to the one described in this paper. However, that approach would probably have a larger impact on compilation time.
ACKNOWLEDGMENT
This research was partially funded by the Ministry of Ministry of Education, Science and Technological Development of the Republic of Serbia under project No: TR-32031. This research was performed in cooperation with the RT-RK Institute for Computer Based Systems.
REFERENCES
|
{"Source-Url": "https://www.etran.rs/common/pages/proceedings/IcETRAN2017/RTI/IcETRAN2017_paper_RTI1_4.pdf", "len_cl100k_base": 4231, "olmocr-version": "0.1.53", "pdf-total-pages": 5, "total-fallback-pages": 0, "total-input-tokens": 19669, "total-output-tokens": 4900, "length": "2e12", "weborganizer": {"__label__adult": 0.0006070137023925781, "__label__art_design": 0.00046753883361816406, "__label__crime_law": 0.0006504058837890625, "__label__education_jobs": 0.0003447532653808594, "__label__entertainment": 8.392333984375e-05, "__label__fashion_beauty": 0.0002675056457519531, "__label__finance_business": 0.00024044513702392575, "__label__food_dining": 0.0006275177001953125, "__label__games": 0.0009541511535644532, "__label__hardware": 0.0098114013671875, "__label__health": 0.0008096694946289062, "__label__history": 0.0003199577331542969, "__label__home_hobbies": 0.0001735687255859375, "__label__industrial": 0.0013151168823242188, "__label__literature": 0.0002005100250244141, "__label__politics": 0.00047087669372558594, "__label__religion": 0.0009140968322753906, "__label__science_tech": 0.062744140625, "__label__social_life": 6.985664367675781e-05, "__label__software": 0.00499725341796875, "__label__software_dev": 0.91162109375, "__label__sports_fitness": 0.0006213188171386719, "__label__transportation": 0.0014429092407226562, "__label__travel": 0.0003066062927246094}, "weborganizer_max": "__label__software_dev", "avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_v1__avg_fraction_numbers_in_line_ratio": [[0, 21087, 0.05362]], "fineweb_edu_fasttext_gt2__fineweb_edu_fasttext_gt2__score": [[0, 21087, 0.66437]], "ft_lang_id_en_doc_v2__ft_lang_id_en_doc_v2__en": [[0, 21087, 0.88602]], "google_gemma-3-12b-it_contains_pii": [[0, 4751, false], [4751, 9516, null], [9516, 13096, null], [13096, 17676, null], [17676, 21087, null]], "google_gemma-3-12b-it_is_public_document": [[0, 4751, true], [4751, 9516, null], [9516, 13096, null], [13096, 17676, null], [17676, 21087, null]], "google_gemma-3-4b-it_v2tag__is_academic_paper": [[0, 5000, true], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_class_syllabus": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_completion_certificate": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_court_notice": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_homework_assignment": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_news_article": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_public_order": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_resume_cv": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_test_or_quiz": [[0, 5000, false], [5000, 21087, null]], "google_gemma-3-4b-it_v2tag__is_textbook": [[0, 5000, false], [5000, 21087, null]], "pdf_page_numbers": [[0, 4751, 1], [4751, 9516, 2], [9516, 13096, 3], [13096, 17676, 4], [17676, 21087, 5]], "pipe_delimited_lines_v1__pipe_delimited_lines_v1__pipe_delimited_lines_ratio": [[0, 21087, 0.075]]}
|
olmocr_science_pdfs
|
2024-12-10
|
2024-12-10
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.