| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : http://www.w3.org/1998/Math/MathML\http://www.w3.org/1999/xlink\, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , , ], : null, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : null, : null, : , : null, : null}, : {: null, : [], : null, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : null, : , : , : null, : null}, : {: , : [, , , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : null, : null, : , : null, : null}, : {: , : [], : null, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : , : [, , , , ], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , , , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , ], : null, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : null, : null, : , : null, : null}, : {: null, : [], : null, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : null, : null, : , : null, : null}, : {: null, : [], : null, : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , ], : , : [], : , : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : null, : , : null, : null}, : {: , : [, , , ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : , : [], : null, : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, , ], : null, : [], : , : null}, : {: , : []}, : 0, : 0, : [], : []} |
| {: {: , : null, : , : , : , : null, : null}, : {: , : [, ], : null, : [], : is\ninvolved in two links because it is the best source word for both \\miembr\.\nThe best alignment here would be f0-0,1-2,2-1,3-3g. The cost is \u0000log\u001f2(see Sect. 3.1).\n2.1 Baseline Search\nWith Moore's search strategy, which will be referred to as baseline search, links\nof the example of Fig. 1 are arranged as depicted in Fig. 2 (left \fgure). Thus\nin the baseline search the possible links, sorted in function of their cost, are\narranged one link per stack, together with the \\empty\" link set ;. Baseline search\nalways begins with the empty alignment (alignment stack 0).4This hypothesis is\nexpanded with each link of link stack 1 forming two new hypotheses (the empty\nalignment and the alignment containing the link 1-2) which are copied into\nalignment stack 1. Each hypothesis of alignment stack iis expanded with each\nlink of link stack i+ 1. Histogram and/or threshold pruning are applied to the\nalignment hypothesis stack to reduce complexity. The dashed line in alignment\nstack 2 illustrates the histogram pruning threshold for a beam size of 3.\nIn our view, the main drawback of the baseline search strategy is that the \fnal\nalignment depends on the order in which links are introduced. To understand this\nbetter, consider a very simple system with a word association feature, a distortion\nfeature and an unlinked word penalty feature. Distortion costs are caused by\ncrossings between links. Each time some unlinked word becomes linked, the\nunlinked word penalty decreases. When a hypothesis is expanded with a new\nlink, if the word association cost for this link plus a possible distortion cost is\nsmaller than a possible decrease in the unlinked word penalty, the hypothesis\nwith the new link is better than the previous one. In the example of Fig. 2 (left\n\fgure), suppose that this was the case successively for links 1-2, 3-3 and 0-0, so\nthat the best alignment hypothesis is f1-2, 3-3, 0-0g. Now if this hypothesis is\nexpanded with link 2-2, the association cost is compensated by the decrease of\nthe unlinked feature cost for \\state\", and the new best hypothesis will include\nlink 2-2. Expanding now this last hypothesis with link 2-1, the unlinked feature\ngain for \\pais\" cannot compensate for the distortion feature cost (due to crossing\n4Melamed [3] also starts with the empty alignment and links are added from most to\nleast probable.\n12th EAMT conference, 22-23 September 2008, Hamburg, Germany\n98\nFig. 2. Left: Baseline search: link-by-link search following word association score or-\nder [1]. Right: \\source-word-score\" search strategy.\nwith \\member-miembr\") plus the association cost. Thus link 2-1 is not included\nin the \fnal hypothesis. On the contrary, if we would expand the hypotheses with\nlink 2-1 \frst, the double unlinked feature gain (for \\pais\" and \\state\") would\ncompensate for the other costs, and link 2-1 would appear in the \fnal hypothesis.\nThus in the previous case, a probable but incorrect link (2-2) introduced\n\frst prevented the correct link (2-1) from being in the \fnal alignment, because\nof the unlinked feature. In other situations, this may occur with the distortion\nfeature, the presence of the incorrect link causing a crossing with the correct\none. Actually in many cases, when introducing link 2-1, both the new hypothesis\n(with link 2-2) and the former one (without it) will be in the stack. However,\nwhen introducing a link, it can happen that all hypotheses which do not contain\na previously introduced link have been pruned out. In this case all hypotheses\nwould contain the link 2-2 when expanding hypotheses with link 2-1, and the\nproblem described above would happen.\n2.2 Proposed Improvements\nTo help overcome this problem, we perform successive iterations of the alignment\nalgorithm. In the second one, we start from the \fnal alignment of the \frst\niteration instead of the empty alignment. Expanding a hypothesis with some\nlink still means introducing this link in the alignment hypothesis if it is not\npresent yet, but also means removing it if it is already present. Thus alignment\nhypotheses now always contain a reasonable set of links for this sentence pair:\nthe \frst iteration's \fnal links at the start, which are then updated link by link\nduring search. When a hypothesis is expanded with an incorrect link, this link\nis typically situated (considering the alignment matrix) apart from the rest of\nlinks in the hypothesis, causing a distortion cost. If a hypothesis containing no\n12th EAMT conference, 22-23 September 2008, Hamburg, Germany\n99\nlink would be expanded with this incorrect link, it would not be penalised by\nany distortion cost.\nAnother idea to alleviate the problem is to let links to the same word compete\non a fair basis, considering them at the same time instead of successively in the\nalignment hypotheses. In this scheme, possible links are organised in one stack\nfor each source (or target) word,5as in Fig. 2 (right \fgure). This is a one-stack-\nper-word strategy, whereas the baseline search is a one-stack-per-link strategy.\nThe links of each stack are used to expand the same hypotheses. Thus, in our\nexample, expanding hypothesis f1-2, 3-3, 0-0g, 2-1 would have been preferred\nover 2-2.\nIn Fig. 2 (right \fgure), link stacks are sorted according to the cost of the best\nlink in the stack. We will refer to this strategy as \\source-word-score\ (SWP) search.\nThe total number of alignment hypotheses created during search is the same\nwith both baseline and one-stack-per-word strategies, since the number of \\possi-\nble links\ system, this feature was substituted by a penalty proportional to\nmodel 1 NULL link probability, yielding a gain of 2 points in precision and 1\npoint in recall.\n7This result may be surprising at \frst sight. In fact, it makes sense. To take the same\nexample as Moore [11], in our corpus, singletons appearing in each side of the same\nsentence pair constitute a very signi\fcant event. The IBM model 1 probability in\nthis case is actually equal to 1, and the \u001f2score is also the best possible. Although\nno word can have a higher LLR score with a singleton than another singleton, the\nLLR score between more frequent words can be much higher. This makes a di\u000berence\nbecause the alignment hypotheses are expanded with the most probable links \frst.\nThus compared to \u001f2, the LLR score gives a relatively higher importance to links\ninvolving frequent words, which may be stop words, and a relatively lower importance\nto links involving less frequent words, which often are content words. Both e\u000bects\nproduce noisier alignments.\n12th EAMT conference, 22-23 September 2008, Hamburg, Germany\n101\nTable 1. Recall (Rs), Precision (Pp) and AER for various types of association scores\n(for stems) and search strategies. The values shown are the average and standard error\n(in parentheses) of three feature weights optimisations (from di\u000berent starting points).\nLine Rs Pp AER\nScore used as association feature (baseline search, one iteration)\n1 \u001f262.4 (0.8) 86.7 (1.5) 27.1 (0.1)\n2 LLR 59.4 (0.1) 75.7 (0.5) 33.2 (0.3)\n3 IBM1 65.9 (0.7) 90.3 (1.4) 23.5 (0.3)\n4 IBM1+UM 67.1 (0.3) 92.5 (0.4) 21.9 (0.3)\nSource-word-score (SWS) and source-word-position (SWP) searches\n5 IBM1+UM, SWS 67.1 (0.2) 93.5 (0.5) 21.6 (0.0)\n6 IBM1+UM, SWP 66.3 (0.5) 91.5 (0.4) 22.8 (0.1)\n7 IBM1+UM, SWP 2 it. 66.7 (0.5) 93.2 (0.6) 21.9 (0.1)\n8 IBM1+UM, SWP 3 it. 67.3 (0.4) 93.2 (0.4) 21.5 (0.1)\n3.2 Search\nThe three beam-search strategies described in Sect. 2 were implemented with\ndynamic programming and are compared in Table 1 (lines 4, 5 and 6). In\nthe \\source-word-position\ (SWS), in which links to\nthe same words are compared fairly, but keeping the idea of introducing the best\nlinks \frst. This strategy allows to gain 1 point in precision over the baseline,\nwithout loss in recall.\nIn lines 1 to 6, only one iteration of the alignment algorithm was run. Lines 7\nand 8 show the e\u000bect of running two and three iterations for the SWP search. The\ninitial alignment is the best alignment obtained in the previous iteration. After\nthree iterations, the SWP search achieves comparable performance as SWS after\none iteration. SWS and baseline search AER results are actually only improved\nby 0.2 after the second iteration, and not improved by a third iteration.\n4 Conclusions\nOur results suggest that the log-likelihood ratio is not an adequate word as-\nsociation measure to be used in a discriminative word alignment system. We\nalso observed that even the simplest IBM model probabilities allow a signi\fcant\nimprovement of alignment quality with respect to word association measures. Fi-\nnally, we compared three beam-search strategies. We showed that starting from\nthe empty alignment is not the best choice, and that it is more \rexible and ac-\ncurate to let links to the same word compete together, than to introduce them\nsequentially in the alignment hypotheses.\n12th EAMT conference, 22-23 September 2008, Hamburg, Germany\n102\nReferences\n1. Moore, R.C.: A discriminative framework for bilingual word alignment. In: Proc.\nof Human Language Technology Conference. (2005) 81{88\n2. Liu, Y., Liu, Q., Lin, S.: Log-linear models for word alignment. In: Proc. of the\n43rd Annual Meeting of the Assoc. for Computational Linguistics. (2005) 459{466\n3. Melamed, I.D.: Models of translational equivalence among words. Computational\nLinguistics 26(2) (2000) 221{249\n4. Lambert, P., de Gispert, A., Banchs, R.E., Mari~ no, J.B.: Guidelines for word\nalignment evaluation and manual alignment. Language Resources and Evaluation\n39(4) (2005) 267{285\n5. Och, F.J., Ney, H.: A systematic comparison of various statistical alignment mod-\nels. Computational Linguistics 29(1) (March 2003) 19{51\n6. Cherry, C., Lin, D.: A probability model to improve word alignment. In: Proc. of\n41th Annual Meeting of the Assoc. for Computational Linguistics. (2003) 88{95\n7. Lambert, P., Banchs, R.E., Crego, J.M.: Discriminative alignment training with-\nout annotated data for machine translation. In: Proc. of the Human Language\nTechnology Conference of the NAACL. (2007) 85{88\n8. Gale, W.A., Church, K.W.: Identifying word correspondences in parallel texts. In:\nDARPA Speech and Natural Language Workshop. (1991)\n9. Dunning, T.: Accurate methods for the statistics of surprise and coincidence.\nComputational Linguistics 19(1) (1993) 61{74\n10. Brown, P.F., Della Pietra, S.A., Della Pietra, V.J., Mercer, R.L.: The mathe-\nmatics of statistical machine translation: Parameter estimation. Computational\nLinguistics 19(2) (1993) 263{311\n11. Moore, R.C.: On log-likelihood-ratios and the signi\fcance of rare events. In: Proc.\nof Conf. on Empirical Methods in Natural Language Processing. (2004) 333{340\n12th EAMT conference, 22-23 September 2008, Hamburg, Germany\n103main_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidhZzDZQrY4MNyearvenueEAMT 2011pdf_linkforum_linkhttps://openreview.net/forum?id=hZzDZQrY4MNarxiv_iddoipapertitleDeriving translation units using small additional corporaauthorsCarlos A. Henríquez Q.José B. MariñoRafael E. Banchsabstractkeywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataiduQzMEGaKy0qyearvenueEC2016pdf_linkforum_linkhttps://openreview.net/forum?id=uQzMEGaKy0qarxiv_iddoipapertitleWhere to Sell: Simulating Auctions From Learning Algorithms.authorsHamid NazerzadehRenato Paes LemeAfshin RostamizadehUmar SyedabstractAd exchange platforms connect online publishers and advertisers and facilitate the sale of billions of impressions every day. We study these environments from the perspective of a publisher who wants to find the profit-maximizing exchange in which to sell his inventory. Ideally, the publisher would run an auction among exchanges. However, this is not usually possible due to practical business considerations. Instead, the publisher must send each impression to only one of the exchanges, along with an asking price. We model the problem as a variation of the multi-armed bandits problem in which exchanges (arms) can behave strategically in order to maximizes their own profit. We propose e mechanisms that find the best exchange with sub-linear regret and have desirable incentive properties.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidX00Gr5qqGQmyearvenueITP 2018pdf_linkforum_linkhttps://openreview.net/forum?id=X00Gr5qqGQmarxiv_iddoipapertitleProofWatch: Watchlist Guidance for Large Theories in EauthorsZarathustra GoertzelJan JakubuvStephan SchulzJosef UrbanabstractWatchlist (also hint list) is a mechanism that allows related proofs to guide a proof search for a new conjecture. This mechanism has been used with the Otter and Prover9 theorem provers, both for interactive formalizations and for human-assisted proving of open conjectures in small theories. In this work we explore the use of watchlists in large theories coming from first-order translations of large ITP libraries, aiming at improving hammer-style automation by smarter internal guidance of the ATP systems. In particular, we (i) design watchlist-based clause evaluation heuristics inside the E ATP system, and (ii) develop new proof guiding algorithms that load many previous proofs inside the ATP and focus the proof search using a dynamically updated notion of proof matching. The methods are evaluated on a large set of problems coming from the Mizar library, showing significant improvement of E’s standard portfolio of strategies, and also of the previous best set of strategies invented for Mizar by evolutionary methods.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid5S_3RwSMh-lyearvenueCoRR 2018pdf_linkhttp://arxiv.org/pdf/1802.04007v2forum_linkhttps://openreview.net/forum?id=5S_3RwSMh-larxiv_iddoipapertitleProofWatch: Watchlist Guidance for Large Theories in EauthorsZarathustra GoertzelJan JakubuvStephan SchulzJosef UrbanabstractWatchlist (also hint list) is a mechanism that allows related proofs to guide a proof search for a new conjecture. This mechanism has been used with the Otter and Prover9 theorem provers, both for interactive formalizations and for human-assisted proving of open conjectures in small theories. In this work we explore the use of watchlists in large theories coming from first-order translations of large ITP libraries, aiming at improving hammer-style automation by smarter internal guidance of the ATP systems. In particular, we (i) design watchlist-based clause evaluation heuristics inside the E ATP system, and (ii) develop new proof guiding algorithms that load many previous proofs inside the ATP and focus the proof search using a dynamically updated notion of proof matching. The methods are evaluated on a large set of problems coming from the Mizar library, showing significant improvement of E's standard portfolio of strategies, and also of the previous best set of strategies invented for Mizar by evolutionary methods.", "keywords": [], "raw_extracted_content": "arXiv:1802.04007v2 [cs.AI] 19 May 2018ProofWatch:\nWatchlist Guidance for Large Theories in E\nZarathustra Goertzel1, Jan Jakub˚ uv1, Stephan Schulz2, and Josef Urban1⋆\n1Czech Technical University in Prague\n2DHBW Stuttgart\nAbstract. Watchlist (also hint list) is a mechanism that allows relate d\nproofs to guide a proof search for a new conjecture. This mech anism\nhas been used with the Otter and Prover9 theorem provers, bot h for\ninteractive formalizations and for human-assisted provin g of open con-\njectures in small theories. In this work we explore the use of watchlists in\nlarge theories coming from first-order translations of larg e ITP libraries,\naiming at improving hammer-style automation by smarter int ernal guid-\nance of the ATP systems. In particular, we (i) design watchli st-based\nclause evaluation heuristics inside the E ATP system, and (i i) develop\nnew proof guiding algorithms that load many previous proofs inside the\nATP and focus the proof search using a dynamically updated no tion\nof proof matching. The methods are evaluated on a large set of prob-\nlems coming from the Mizar library, showing significant impr ovement of\nE’s standard portfolio of strategies, and also of the previo us best set of\nstrategies invented for Mizar by evolutionary methods.\n1 Introduction: Hammers, Learning and Watchlists\nHammer -style automation tools connecting interactive theorem provers ( ITPs)\nwith automated theorem provers(ATPs) haverecently led to a sign ificant speed-\nup for formalization tasks [5]. An important component of such tools ispremise\nselection [1]: choosing a small number of the most relevant facts that are give n\nto the ATPs. Premise selection methods based on machine learning fr om many\nproofs available in the ITP libraries typically outperform manually spec ified\nheuristics [1,17,19,7,4,2]. Given the performance of such ATP-external guidance\nmethods, learning-based internal proof search guidance methods have started to\nbe explored, both for ATPs [36,18,15,23,8] and also in the context of tactical\nITPs [10,12].\nIn this work we develop learning-based internal proof guidance met hods for\nthe E [30] ATP system and evaluate them on the large Mizar Mathemat ical\nLibrary [11]. The methods are based on the watchlist (alsohint list) technique\ndeveloped by Veroff [37], focusing proofsearch towardslemmas ( hints) that were\n⋆Supported by the AI4REASON ERC Consolidator grant number 649043, and by the\nCzech project AI&Reasoning CZ.02.1.01/0.0/0.0/15 003/0000466 and the European\nRegional Development Fund.\nuseful in related proofs. Watchlists have proved essential in the A IM project [21]\ndone with Prover9 [25] for obtaining very long and advanced proofs of open\nconjectures. Problems in large ITP libraries however differ from one another\nmuch morethan the AIM problems,makingit morelikelyforunrelated w atchlist\nlemmas tomislead the proofsearch.Also, Prover9lacksa number of large-theory\nmechanisms and strategies developed recently for E [16,13,15].\nTherefore, we first design watchlist-based clause evaluation heur istics for E\nthat can be combined with other E strategies. Second, we compleme nt the inter-\nnalwatchlistguidancebyusingexternalstatisticalmachinelearnin gtopre-select\nsmaller numbers of watchlist clauses relevant for the current prob lem. Finally,\nwe use the watchlist mechanism to develop new proof guiding algorithm s that\nload many previous proofs inside the ATP and focus the search using adynam-\nicallyupdated heuristic representation of proof search state based on matching\nthe previous proofs.\nThe rest of the paper is structured as follows. Section 2 briefly sum marizes\nthe work of saturation-style ATPs such as E. Section 3 discusses h euristic repre-\nsentation of search state and its importance for learning-based p roof guidance.\nWe propose an abstract vectorial representation expressing sim ilarity to other\nproofs as a suitable evolving characterization of saturation proof searches. We\nalso proposea concreteimplementation based on proof completion ratios tracked\nby the watchlist mechanism. Section 4 describes the standard ( static) watchlist\nmechanism implemented in E and Section 5 introduces the new dynamic watch-\nlist mechanisms and its use for guiding the proof search. Section 6 ev aluates\nthe static and dynamic watchlist guidance combined with learning-bas ed pre-\nselection on the Mizar library. Section 7 shows several examples of n ontrivial\nproofs obtained by the new methods, and Section 8 discusses relat ed work and\npossible extensions.\n2 Proof Search in Saturating First-Order Provers\nThe state of the art in first-order theorem proving is a saturating prover based\non a combination of resolution/paramodulation and rewriting, usually imple-\nmenting a variant of the superposition calculus [3]. In this model, the proof state\nis represented as a set of first-order clauses (created from the axioms and the\nnegated conjecture), and the system systematically adds logical consequences to\nthe state, trying to derive the empty clause and hence an explicit co ntradiction.\nAll current saturating first-order provers are based on variant s of thegiven-\nclause algorithm . In this algorithm, the proof state is split into two subsets of\nclauses, the processed clauses P(initially empty) and the unprocessed clauses\nU. On each iteration of the algorithm, the prover picks one unproces sed clause\ng(the so-called given clause ), performs all inferences which are possible with g\nand all clauses in Pas premises, and then moves gintoP. The newly generated\nconsequencesareaddedto U.Thismaintainsthecoreinvariantthatallinferences\nbetween clauses in Phave been performed. Provers differ in how they integrate\nsimplification and redundancy into the system, but all enforce the v ariant that\nPis maximally simplified (by first simplifying gwith clauses in P, then back-\nsimplifying Pwithg) and that Pcontains neither tautologies nor subsumed\nclauses.\nThecorechoicepointofthe given-clausealgorithmis the selectionof the next\nclause to process. If theoretical completeness is desired, this ha s to befair, in\nthe sense that no clause is delayed forever. In practice, clauses a re ranked using\none or more heuristic evaluation functions, and are picked in order o f increasing\nevaluation (i.e. small values aregood). The most frequent heuristic s arebased on\nsymbol counting, i.e., the evaluation is the number of symbol occurr ences in the\nclause, possibly weighted for different symbols or symbols types. Mo st provers\nalso support interleaving a symbol-counting heuristic with a first-in- first-out\n(FIFO) heuristic. E supports the dynamic specification of an arbitr ary number\nofdifferentlyparameterizedpriorityqueuesthatareprocessedin weightedround-\nrobbin fashion via a small domain-specific language for heuristics.\nPrevious work [28,31] has both shown that the choice of given clause s is\ncritical for the success rate of a prover, but also that existing he uristics are still\nquite bad - i.e. they select a large majority of clauses not useful for a given proof.\nPositively formulated, there still is a huge potential for improvemen t.\n3 Proof Search State in Learning Based Guidance\nA good representation of the current stateis crucial for learning-based guidance.\nThis is quite clear in theorem proving and famously so in Go and Chess [3 2,33].\nFor example, in the TacticToe system [10] proofs are composed fr om pre-pro-\ngrammed HOL4 [34] tactics that are chosen by statistical learning b ased on sim-\nilarity of the evolving goal state to the goal states from related proofs. Similarly,\nin the learning versions of leanCoP [26] – (FE)MaLeCoP [36,18] – the ta bleau\nextension steps are guided by a trained learner using similarity of the evolving\ntableau(the ATP proof search state ) tomanyothertableauxfromrelatedproofs.\nSuch intuitive and compact notion of proof search state is however hard to\nget when working with today’s high-performance saturation-style ATPs such as\nE [30] and Vampire [22]. The above definition of saturation-style proo f state\n(Section 2) as either one or two (processed/unprocessed) large sets of clauses is\nveryunfocused.Existinglearning-basedguidingmethodsforE[15,2 3]practically\nignore this. Instead, they use only the original conjecture and its features for\nselecting the relevant given clauses throughout the whole proof se arch.\nThis is obviously unsatisfactory, both when compared to the evolvin g search\nstate in the case of tableau and tactical proving, and also when com pared to the\nway humans select the next steps when they search for proofs. T he proof search\nstate in our mind is certainly an evolving concept based on the search done so\nfar, not a fixed set of features extracted just from the conjec ture.\n3.1 Proof Search State Representation for Guiding Saturati on\nOne of the motivations for the work presented here is to produce a n intuitive,\ncompactandevolvingheuristicrepresentationofproofsearchst ateinthecontext\nof learning-guided saturation proving. As usual, it should be a vecto r of (real-\nvalued) features that are either manually designed or learned. In a high-level\nway, our proposed representation is a vector expressing an abstract similarity\nof the search state to (possibly many) previous related proo fs. This can be im-\nplemented in different ways, using both statistical and symbolic meth ods and\ntheir combinations. An example and motivation comes again from the w ork of\nVeroff, where a search is considered promising when the given clause s frequently\nmatch hints. The gaps between the hint matchings may correspond to the more\nbrute-force bridges between the different proof ideas expresse d by the hints.\nOur first practical implementation introduced in Section 5 is to load up on\nthe search initialization Nrelated proofs Pi, and for each Pikeep track of the\nratio of the clauses from Pithat have already been subsumed during the search.\nThe subsumption checking is using E’s watchlist mechanism (Section 4) . The\nN-long vector pof suchproof completion ratios is our heuristic representation\nof the proof search state, which is both compact and typically evolv ing, making\nit suitable for both hard-coded and learned clause selection heurist ics.\nIn this work we start with fast hard-coded watchlist-style heurist ics for fo-\ncusing inferences on clauses that progress the more finished proo fs (Section 5).\nHowevertraininge.g.astatisticalENIGMA-style[15] clauseevaluat ionmodelby\naddingpto the currently used ENIGMA features is a straightforwardexte nsion.\n4 Static Watchlist Guidance and its Implementation in E\nE originally implemented a watchlist mechanism as a means to force direc t,\nconstructiveproofsinfirstorderlogic.Forthisapplication,thewa tchlistcontains\na number of goal clauses (corresponding to the hypotheses to be proven), and all\nnewlygeneratedandprocessedclausesarecheckedagainstthew atchlist.Ifoneof\nthewatchlistclausesissubsumedbyanewclause,theformerisremo vedfromthe\nwatchlist. The proof search is complete, once all clauses from the w atchlist have\nbeen removed. In contrast to the normal proof by contradiction , this mechanism\nis not complete. However, it is surprisingly effective in practice, and it produces\na proof by forward reasoning.\nIt was quickly noted that the basic mechanism of the watchlist can als o be\nused to implement a mechanism similar to the hintssuccessfully used to guide\nOtter [24] (and its successor Prover9 [25]) in a semi-interactive ma nner [37].\nHints in this sense are intermediate results or lemmas expected to be useful in a\nproof. However,they arenot providedas part ofthe logicalprem ises, but haveto\nbe derived during the proofsearch.While the hints are specified whe n the prover\nis started, they are only used to guide the proof search - if a clause matches a\nhint, it is prioritized for processing. If all clauses needed for a proo f are provided\nas hints, in theory the prover can be guided to prove a theorem with out any\nsearch, i.e. it can replaya previous proof. A more general idea, explored in this\npaper, is to fill the watchlist with a large number of clauses useful in p roofs of\nsimilar problems.\nIn E, the watchlist is loaded on start-up, and is stored in a feature v ector\nindex [29] that allowsforefficient retrievalofsubsumed (and subsu ming) clauses.\nBy default, watchlist clauses are simplified in the same way as process ed clauses,\ni.e. they are kept in normal form with respect to clauses in P. This increases the\nchance that a new clause (which is always simplified) can match a similar w atch-\nlist clause. If used to control the proof search, subsumed clause s can optionally\nremain on the watchlist.\nWe haveextended E’sdomain-specificlanguagefor searchheuristic swith two\npriority functions to access information about the relationship of c lauses to the\nwatchlist - the function PreferWatchlist gives higher rank to clauses that sub-\nsume at least one watchlist clause, and the dual function DeferWatchlist ranks\nthem lower. Using the first, we have also defined four built-in heurist ics that\npreferably process watchlist clauses. These include a pure watchlis t heuristic,\na simple interleaved watch list function (picking 10 out of every eleven clauses\nfrom the watchlist, the last using FIFO), and a modification of a stro ng heuristic\nobtained from a genetic algorithm [27] that interleaves several diffe rent evalu-\nation schemes and was modified to prefer watchlist clauses in two of it s four\nsub-evaluation functions.\n5 Dynamic Watchlist Guidance\nIn addition to the above mentioned static watchlist guidance , we propose and ex-\nperiment with an alternative: dynamic watchlist guidance . With dynamic watch-\nlist guidance, several watchlists, as opposed to a single watchlist, a re loaded on\nstart-up.Separatewatchlists aresupposedto groupclauseswh ich aremore likely\nto appear together in a single proof. The easiest way to produce wa tchlists with\nthis property is to collect previously proved problems and use their p roofs as\nwatchlists. This is our current implementation, i.e., each watchlist cor responds\nto a previous proof. During a proof search, we maintain for each wa tchlist its\ncompletion status , i.e. the number of clauses that were already encountered. The\nmain idea behind our dynamic watchlist guidance is to prefer clauses wh ich ap-\npear on watchlists that are closer to completion. Since watchlists no w exactly\ncorrespond to previous refutational proofs, completion of any w atchlist implies\nthat the current proof search is finished.\n5.1 Watchlist Proof Progress\nLet watchlists W1,...,Wnbe given for a proof search. For each watchlist Wiwe\nkeep awatchlist progress counter , denoted progress(Wi), which is initially set to\n0. Whenever a clause Cis generated during the proof search, we have to check\nwhether Csubsumes some clause from some watchlist Wi. WhenCsubsumes\na clause from Wiwe increase progress(Wi) by 1. The subsumed clause from\nWiis then marked as encountered, and it is not considered in future wa tchlist\nsubsumption checks.3Note that a single generated clause Ccan subsume several\nclauses from one or more watchlists, hence several progress cou nters might be\nincreased multiple times as a result of generating C.\n5.2 Standard Dynamic Watchlist Relevance\nThe easiest way to use progress counters to guide given clause sele ction is to as-\nsign the(standard) dynamic watchlist relevance to each generated clause C, de-\nnotedrelevance 0(C), as follows. Whenever Cis generated, we check it againstall\nthe watchlists for subsumption and we update watchlist progress c ounters. Any\nclauseCwhichdoesnotsubsumeanywatchlistclauseisgiven relevance 0(C) = 0.\nWhenCsubsumes some watchlist clause, its relevance is the maximum watchlis t\ncompletion ratio over all the matched watchlists. Formally, let us writ eC⊑Wi\nwhen clause Csubsumessomeclausefromwatchlist Wi.Fora clause Cmatching\nat least one watchlist, its relevance is computed as follows.\nrelevance 0(C) = max\nW∈{Wi:C⊑Wi}/parenleftBigprogress(W)\n|W|/parenrightBig\nThe assumption is that a watchlist Wthat is matched more is more relevant\nto the current proof search. In our current implementation, the relevance is\ncomputed at the time of generation of Cand it is not updated afterwards. As\nfuture work, we propose to also update the relevance of all gener ated but not yet\nprocessed clauses from time to time in order to reflect updates of t he watchlist\nprogresscounters.Notethat thisisexpensive,asthe numberof generatedclauses\nis typically high. Suitable indexing could be used to lower this cost or eve n to\ndo the update immediately just for the affected clauses.\nTo use the watchlist relevance in E, we extend E’s domain-specific lang uage\nfor search heuristics with two priority functions PreferWatchlistRelevant and\nDeferWatchlistRelevant . The first priority function ranks higher the clauses\nwith higherwatchlistrelevance4, andthe otherfunction doesthe opposite.These\npriority functions can be used to build E’s heuristics just like in the cas e of the\nstatic watchlist guidance. As a results, we can instruct E to proces s watchlist-\nrelevant clauses in advance.\n5.3 Inherited Dynamic Watchlist Relevance\nThe previous standard watchlist relevance prioritizes only clauses s ubsuming\nwatchlist clauses but it behaves indifferently with respect to other c lauses. In\n3Alternatively, the subsumed watchlist clause D∈Wican be considered for future\nsubsumption checks but the watchlist progress counter progress (Wi) should not be\nincreased when Dis subsumed again. This is because we want the progress count er\nto represent the number of different clauses from Wiencountered so far.\n4Technically, E’s priority function returns an integer prio rity, and clauses with smaller\nvalues are preferred. Hence we compute the priority as 1000 ∗(1−relevance 0(C)).\nordertoprovidesomeguidanceevenforclauseswhichdonotsubsu meanywatch-\nlist clause, we can examine the watchlist relevance of the parents of each gener-\natedclause,andprioritizeclauseswithwatchlist-relevantparents .Letparents(C)\ndenote the set of previously processed clauses from which Chave been derived.\nInherited dynamic watchlist relevance , denoted relevance 1, is a combination of\nthe standarddynamicrelevancewiththe averageofparentsrelev ancesmultiplied\nby adecayfactorδ <1.\nrelevance 1(C) =relevance 0(C)+δ∗avg\nD∈parents (C)/parenleftbig\nrelevance 1(D)/parenrightbig\nClearly, the inherited relevance equals to the standard relevance f or the initial\nclauses with no parents. The decay factor ( δ) determines the importance of par-\nents watchlist relevances.5Note that the inherited relevances of parents(C) are\nalready precomputed at the time of generating C, hence no recursive computa-\ntion is necessary.\nWith the above relevance 1we compute the average of parents inherited rel-\nevances, hence the inherited watchlist relevance accumulates rele vance of all the\nancestors. As a result, relevance 1(C) is greater than 0 if and only if Chas some\nancestor which subsumed a watchlist clause at some point. This might have an\nundesirable effect that clauses unrelated to the watchlist are comp letely ignored\nduring the proofsearch.In practice, however,it seems importan t to consideralso\nwatchlist-unrelated clauses with some degree in order to prove new conjectures\nwhich do not appear on the input watchlist. Hence we introduce two threshold\nparameters αandβwhich resets the relevance to 0 as follows. Let length(C)\ndenote the length of clause C, counting occurrences of symbols in C.\nrelevance 2(C) =/braceleftBigg\n0 iff relevance 1(C)< αandrelevance 1(C)\nlength(C)< β\nrelevance 1(C) otherwise\nParameter αis a threshold on the watchlist inherited relevance while βcombines\nthe relevance with the clause length.6As a result, shorter watchlist-unrelated\nclauses are preferred to longer (distantly) watchlist-related clau ses.\n5In our experiments, we use δ= 0.1\n6In our experiments, we use α= 0.03 andβ= 0.009. These values have been found\nuseful by a small grid search over a random sample of 500 probl ems.\n6 Experiments with Watchlist Guidance\nFor our experiments we construct watchlists from the proofs fou nd by E on\na benchmark of 57897 Mizar40 [19] problems in the MPTP dataset [35].7 8.\nThese initial proofs were found by an evolutionarily optimized [14] ens emble\nof 32 E strategies each run for 5 s. These are our baseline strategies. Due to\nlimited computational resources, we do most of the experiments wit h the top 5\nstrategies that (greedily) cover most solutions ( top 5 greedy cover ). These are\nstrategies number 2, 8, 9, 26 and 28, henceforth called A,B,C,D,E. In 5 s\n(in parallel) they together solve 21122 problems. We also evaluate th ese five\nstrategies in 10 seconds, jointly solving 21670 problems. The 21122 proofs yield\nover 100000 unique proof clauses that can be used for watchlist-b ased guidance\nin our experiments. We also use smaller datasets randomly sampled fr om the\nfull set of 57897 problems to be able to explore more methods. All pr oblems are\nrun on the same hardware9and with the same memory limits.\nEach E strategy is specified as a frequency-weighted combination o f parame-\nterizedclause evaluation functions (CEF) combined with a selection of inference\nrules. Below we show a simplified example strategy specifying the term order-\ningKBO, and combining (with weights 2 and 4) two CEFs made up of weight\nfunctions Clauseweight andFIFOWeight and priority functions DeferSOS and\nPreferWatchlist .\n-tKBO -H(2*Clauseweight(DeferSoS,20,9999,4),4*FIFOWe ight(PreferWatchlist))\n6.1 Watchlist Selection Methods\nWe have experimented with several methods for creation of static and dynamic\nwatchlists. Typically we use only the proofs found by a particular bas eline strat-\negy to construct the watchlists used for testing the guided versio n of that strat-\negy. Using all 100000+ proof clauses as a watchlist slows E down to 6 g iven\nclauses per second. This is comparable to the speed of Prover9 with similarly\nlarge watchlists, but there are indexing methods that could speed t his up. We\nhave run several smaller tests, but do not include this method in the evalua-\ntion due to limited computational resources. Instead, we select a s maller set of\nclauses. The methods are as follows:\n(art)Use all proof clauses from theorems in the problem’s Mizar article10. Such\nwatchlist sizes range from 0 to 4000, which does not cause any signifi cant\nslowdown of E.\n7Precisely, we have used the small ( bushy , re-proving) ver-\nsions, but without ATP minimization. They can be found at\nhttp://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/MPTP2/problems_small_consis t.tar.gz\n8Experimental results and code can be found at\nhttps://github.com/ai4reason/eprover-data/tree/mast er/ITP-18 .\n9Intel(R) Xeon(R) CPU E5-2698 v3 @ 2.30GHz with 256G RAM.\n10Excluding the current theorem.\n(freq)Use high-frequency proof clauses for static watchlists, i.e., clause s that ap-\npear in many proofs.\n(kNN-st) Usek-nearest neighbor ( k-NN) learning to suggest useful static watchlists\nfor each problem, based on symbol and term-based features [20] of the con-\njecture. This is very similar to the standard use of k-NN and other learners\nfor premise selection. In more detail, we use symbols, walks of length 2 on\nformula trees and common subterms (with variables and skolem symb ols\nunified). Each proof is turned into a multi-label training example, whe re the\nlabels are the (serially numbered) clauses used in the proof, and the features\nare extracted from the conjecture.\n(kNN-dyn) Usek-NN in a similar way to suggest the most related proofs for dynamic\nwatchlists. This is done in two iterations.\n(i)In the firstiteration,only the conjecture-basedsimilarityisused t oselect\nrelated problems and their proofs.\n(ii)The seconditerationthen uses datamined from the proofs obtaine dwith\ndynamicguidanceinthe firstiteration.Fromeachsuchproof Pwecreate\na training example associating P’s conjecture features with the names of\nthe proofs that matched (i.e., guided the inference of) the clauses needed\ninP. On this dataset we again train a k-NN learner, which recommends\nthe most useful related proofs for guiding a particular conjectur e.\n6.2 Using Watchlists in E Strategies\nAs described in Section 4, watchlist subsumption defines the PreferWatchlist\npriority function that prioritizes clauses that subsume at least one watchlist\nclause.Belowwedescribeseveralwaystousethis priorityfunction andthe newly\ndefined dynamic PreferWatchlistRelevant priority function and its relevance-\ninheriting modifications. Each of them can additionally take the “no-r emove”\noption, to keep subsumed watchlist clauses in the watchlist, allowing r epeated\nmatching by different clauses. Preliminary testing has shown that ju st adding a\nsingle watchlist-based clause evaluation function ( CEF) to the baseline CEFs11\nis not as good as the methods defined below. In the rest of the pape r we provide\nshort names for the methods, such as prefA(baseline strategy Amodified by the\nprefmethod described below).\n1.evo: the default heuristic strategy (Section 4) evolved (genetically [2 7]) for\nstatic watchlist use.\n2.pref:replaceallpriorityfunctionsinabaselinestrategywiththe PreferWatch-\nlistpriority function. The resulting strategies look as follows:\n-H(2*Clauseweight(PreferWatchlist,20,9999,4),\n4*FIFOWeight(PreferWatchlist))\n11Specifically we tried adding Defaultweight(PreferWatchli st) and ConjectureRela-\ntiveSymbolWeight(PreferWatchlist) with frequencies 1 ,2,5,10,20 times that of the\nrest of the CEFs in the strategy.\n3.const: replace all priority functions in a baseline strategy with ConstPrio ,\nwhich assignsthe same priorityto allclauses,soall rankingis doneby weight\nfunctions alone.\n4.uwl: always prefer clauses that match the watchlist, but use the base line\nstrategy’s priority function otherwise12.\n5.ska: modify watchlist subsumption in E to treat all skolem symbols of the\nsame arity as equal, thus widening the watchlist guidance. This can be used\nwith any strategy. In this paper it is used with pref.\n6.dyn:replaceallpriorityfunctionsinabaselinestrategywith PreferWatchlist-\nRelevant , which dynamically weights watchlist clauses (Section 5.2).\n7.dyndec: add the relevance inheritance mechanisms to dyn(Section 5.3).\n6.3 Evaluation\nFirst we measure the slowdown caused by larger static watchlists on the best\nbaseline strategyand a random sample of10000problems.The resu lts are shown\nin Table 1. We see that the speed significantly degrades with watchlist s of size\n10000, while 500-big watchlists incur only a small performance penalt y.\nSize 10 100 256 512 1000 10000\nproved 3275 3275 3287 3283 3248 2912\nPPS 8935 9528 8661 7288 4807 575\nTable 1. Tests of the watchlist size influence (ordered by frequency) on a random\nsample of 10000 problems using the ”no-remove” option and on e static watchlist with\nstrategy prefA . PPS is average processed clauses per second, a measure of E’ s speed.\nTable 2 shows the 10 s evaluation of several static and dynamic meth ods on\na random sample of 5000 problems using article-based watchlists (me thodart\nin Section 6.1). For comparison, E’s autostrategy proves 1350 of the problems\nin 10 s and its auto-schedule proves 1629. Given 50 seconds the auto-schedule\nproves 1744 problems compared to our top 5 cover’s 1964.\nThe first surprisingresult is that constsignificantly outperforms the baseline.\nThis indicates that the old-style simple E priority functions may do mor e harm\nthan good if they are allowed to override the more recent and sophis ticated\nweight functions. The skastrategy performs best here and a variety of strategies\nprovide better coverage. It’s interesting to note that skaandprefoverlap only\non 1893 problems. The original evostrategy performs well, but lacks diversity.\nTable3brieflyevaluates k-NNselectionofwatchlistclauses(method kNN-st\nin Section 6.1) on a single strategy prefA. Next we use k-NN to suggest watchlist\nproofs13(method kNN-dyn.i ) forprefanddyn. Table 4 evaluates the influence\nof the number of related proofs loaded for the dynamic strategies . Interestingly,\n12uwlis implemented in E’s source code as an option.\n13All clauses in suggested proofs are used.\nStrategy baseline const pref ska dyn evo uwl\nA 1238 1493 1503 1510 1500 1303 1247\nB 1255 1296 1315 1330 1316 1300 1277\nC 1075 1166 1205 1183 1201 1068 1097\nD 1102 1133 1176 1190 11751330 1132\nE 11381141 1141 1153 1139 1070 1139\ntotal 1853 1910 1931 1933 1922 1659 1868\nTable 2. Article-based watchlist benchmark. A top 5 greedy cover pro ves 1964 prob-\nlems (in bold).\nWatchlist size 16 64 256 1024 2048\nProved 1518 1531 1528 1532 1520\nTable 3. Evaluation of kNN-st on prefA\nprefoutperforms dynalmost everywhere but dyn’s ensemble of strategies A-E\ngenerally performs best and the top 5 cover is better. We conclude thatdyn’s\ndynamic relevance weighting allows the strategies to diversify more.\nTable 5 evaluates the top 5 greedy cover from Table 4 on the full Miza r\ndataset,alreadyshowingsignificantimprovementoverthe21670p roofsproduced\nby the 5 baseline strategies. Based on proof data from a full-run of the top-5\ngreedy cover in Table 5, new k-NN proof suggestions were made (me thodkNN-\ndyn.ii) anddyn’s grid search re-run, see Table 6 and Table 7 for k-NN round 2\nresults.\nWe also test the relevance inheriting dynamic watchlist feature ( dyndec),\nprimarily to determine if different proofs can be found. The results a re shown\nin Table 8. This version adds 8 problems to the top 5 greedy cover of a ll the\nstrategiesrunonthe 5000problemdataset,makingituseful in asc heduledespite\nlowerperformancealone.Table9showsthisgreedycover,andthe nits evaluation\non the full dataset. The 23192 problems proved by our new greedy cover is a 7%\nimprovement over the top 5 baseline strategies.\n7 Examples\nThe Mizar theorem YELLOW5:3614states De Morgan’s laws for Boolean lattices:\ntheorem Th36: :: YELLOW_5:36\nfor L being non empty Boolean RelStr for a, b being Element of L\nholds ( ’not’ (a \" ∨\" b) = (’not’ a) \" ∧\" (’not’ b)\n& ’not’ (a \" ∧\" b) = (’not’ a) \" ∨\" (’not’ b) )\nUsing 32 related proofs results in 2220 clauses placed on the watchlis ts. The\ndynamically guided proof search takes 5218 (nontrivial) given clause loops done\nin 2 s and the resulting ATP proof is 436 inferences long. There are 19 4 given\nclauses that match the watchlist during the proof search and 120 ( 61.8%) of\n14http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/yellow_5#T36\nsize dynA dynB dynC dynD dynE total\n4 1531 1352 1235 1194 1165 1957\n8 1543 1366 1253 1188 1170 1956\n16 1529 1357 1224 1218 1185 1951\n321546 1373 1240 1218 1188 1962\n64 15351376 1216 1215 1166 1935\n128 1506 1351 1195 1214 1147 1907\n1024 1108 963 710 943 765 1404\nsize prefA prefB prefC prefD prefE total\n4 1539 1369 1210 1220 1159 1944\n8 1554 1385 1219 1240 1168 1941\n161572 1405 1225 1254 1180 1952\n32 1568 1412 1231 1271 1190 1958\n64 1567 1402 1228 1262 1172 1952\n1281552 1388 1210 1248 1160 1934\n1024 1195 1061 791 991 806 1501\nTable 4. k-NN proof recommendation watchlists ( kNN-dyn.i ) fordyn pref . Size is\nnumber of proofs, averaging 40 clauses per proof. A top 5 gree dy cover of dynproves\n1972 and pref proves 1959 (in bold).\ndynA 32 dynC 8 dynD 16 dynE 4 dynB 64\nadded 17964 2531 1024 760 282\ntotal 17964 14014 14294 13449 16175\nTable 5. K-NN round 1 greedy cover on full dataset and proofs added by e ach suc-\ncessive strategy for a total of 22579. dynA 32 means strategy dynA using 32 proof\nwatchlists.\nsize dyn2A dyn2B dyn2C dyn2D dyn2E total round 1 total\n4 15391368 1235 1209 1179 1961 1957\n8 1554 1376 1253 1217 1183 1971 1956\n161565 13821256 1221 1181 1972 1951\n32 1557 1383 1252 1227 1182 1968 1962\n64 1545 1385 1244 1222 1171 1963 1935\n128 1531 1374 1221 1227 1171 1941 1907\nTable 6. Problems proved by round 2 k-NN proof suggestions ( kNN-dyn.ii ). The\ntop 5 greedy cover proves 1981 problems (in bold). dyn2A meansdynA run on the 2nd\niteration of k-NN suggestions.\ndyn2A 16 dyn2C 16 dyn2D 32 dyn2E 4 dyn2B 4\ntotal 18583 14486 14720 13532 16244\nadded 18583 2553 1007 599 254\nTable 7. K-NN round 2 greedy cover on full dataset and proofs added by e ach succes-\nsive strategy for a total of 22996\nsize dyndec2A dyndec2B dyndec2C dyndec2D dyndec2E total\n4 1432 1354 1184 1203 1152 1885\n16 1384 1316 1176 1221 1140 1846\n32 1381 1309 1157 1209 1133 1820\n128 1326 1295 1127 1172 1082 1769\nTable 8. Problems proved by round 2 k-NN proof suggestions with dyndec . The top 5\ngreedy cover proves 1898 problems (in bold).\ntotal dyn2A 16 dyn2C 16 dyndec2D 16 dyn2E 4 dyndec2A 128\n2007 1565 230 97 68 47\n23192 18583 2553 1050 584 422\n23192 18583 14486 14514 13532 15916\nTable 9. Top: Cumulative sum of the 5000 test set greedy cover. The k-N N based\ndynamic watchlist methods dominate, improving by 2 .1% over the baseline and article-\nbased watchlist strategy greedy cover of 1964 (Table 2). Bot tom: Greedy cover run on\nthe full dataset, cumulative and total proved.\nthem end up being part of the proof. I.e., 27.5% of the proof consist s of steps\nguided by the watchlist mechanism. The proof search using the same settings,\nbut without the watchlist takes 6550 nontrivial given clause loops (2 5.5% more).\nThe proof of the theorem WAYBEL1:8515is considerably used for this guidance:\ntheorem :: WAYBEL_1:85\nfor H being non empty lower-bounded RelStr st H is Heyting hol ds\nfor a, b being Element of H holds ’not’ (a \" ∧\" b) >= (’not’ a) \" ∨\" (’not’ b)\nNote that this proof is done under the weaker assumptions of H bein g lower\nbounded and Heyting, rather than being Boolean. Yet, 62 (80.5%) o f the 77\nclauses from the proof of WAYBEL1:85are eventually matched during the proof\nsearch. 38 (49.4%) of these 77 clauses are used in the proof of YELLOW5:36. In\nTable 10 we show the final state of proof progress for the 32 loade d proofs after\nthe last non empty clause matched the watchlist. For each we show b oth the\ncomputed ratio and the number of matched and all clauses.\nAn example of a theorem that can be proved in 1.2 s with guidance but\ncannot be proved in 10 s with any unguided method is the following theo rem\nBOOLEALG:6216about the symmetric difference in Boolean lattices:\nfor L being B_Lattice\nfor X, Y being Element of L holds (X \\+\\ Y) \\+\\ (X \" ∧\" Y) = X \" ∨\" Y\nUsing 32 related proofs results in 2768 clauses placed on the watchlis ts. The\nproof search then takes 4748 (nontrivial) given clause loops and th e watchlist-\nguided ATP proof is 633 inferences long. There are 613 given clauses that match\nthe watchlist during the proof search and 266 (43.4%) of them end u p being\npart of the proof. I.e., 42% of the proof consists of steps guided b y the watchlist\n15http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/waybel_1#T85\n16http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/boolealg#T62\n0 0.438 42/96 1 0.727 56/77 2 0.865 45/52 3 0.360 9/25\n4 0.750 51/68 5 0.259 7/27 6 0.805 62/77 7 0.302 73/242\n8 0.652 15/23 9 0.286 8/28 10 0.259 7/27 11 0.338 24/71\n12 0.680 17/25 13 0.509 27/53 14 0.357 10/28 15 0.568 25/44\n16 0.703 52/74 17 0.029 8/272 18 0.379 33/87 19 0.424 14/33\n20 0.471 16/34 21 0.323 20/62 22 0.333 7/21 23 0.520 26/50\n24 0.524 22/42 25 0.523 45/86 26 0.462 6/13 27 0.370 20/54\n28 0.411 30/73 29 0.364 20/55 30 0.571 16/28 31 0.357 10/28\nTable 10. Final state of the proof progress for the (serially numbered ) 32 proofs loaded\nto guide the proof of YELLOW5:36. We show the computed ratio and the number of\nmatched and all clauses.\nmechanism. Among the theorems whose proofs are most useful fo r the guidance\nare the following theorems LATTICES:2317,BOOLEALG:3318andBOOLEALG:5419\non Boolean lattices:\ntheorem Th23: :: LATTICES:23\nfor L being B_Lattice\nfor a, b being Element of L holds (a \" ∧\" b)‘ = a‘ \" ∨\" b‘\ntheorem Th33: :: BOOLEALG:33\nfor L being B_Lattice for X, Y being Element of L holds X \\ (X \" ∧\" Y) = X \\ Y\ntheorem :: BOOLEALG:54\nfor L being B_Lattice for X, Y being Element of L\nst X‘ \" ∨\" Y‘ = X \" ∨\" Y & X misses X‘ & Y misses Y‘\nholds X = Y‘ & Y = X‘\nFinally, we show several theorems20–23with nontrivial Mizar proofs and\nrelatively long ATP proofs obtained with significant guidance. These t heorems\ncannot be proved by any other method used in this work.\ntheorem :: BOOLEALG:68\nfor L being B_Lattice for X, Y being Element of L\nholds (X \\+\\ Y)‘ = (X \" ∧\" Y) \"∨\" ((X‘) \" ∧\" (Y‘))\ntheorem :: CLOSURE1:21\nfor I being set for M being ManySortedSet of I\nfor P, R being MSSetOp of M st P is monotonic & R is monotonic\nholds P ** R is monotonic\ntheorem :: BCIALG_4:44\nfor X being commutative BCK-Algebra_with_Condition(S)\nfor a, b, c being Element of X st Condition_S (a,b) c= Initial_ section c holds\nfor x being Element of Condition_S (a,b) holds x <= c \\ ((c \\ a) \\ b)\ntheorem :: XXREAL_3:67\nfor f, g being ext-real number holds (f * g)\"=(f\") * (g\")\n17http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/lattices#T23\n18http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/boolealg#T33\n19http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/boolealg#T54\n20http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/boolealg#T68\n21http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/closure1#T21\n22http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/bcialg_4#T44\n23http://grid01.ciirc.cvut.cz/ ~mptp/7.13.01_4.181.1147/html/xxreal_3#T67\n8 Related Work and Possible Extensions\nThe closest related work is the hint guidance in Otter and Prover9. O ur focus is\nhoweveronlargeITP-styletheorieswithlargesignaturesandhete rogeneousfacts\nand proofsspanning variousareasofmathematics. This motivates usingmachine\nlearning for reducing the size of the static watchlists and the impleme ntation of\nthe dynamic watchlist mechanisms. Several implementations of inter nal proof\nsearch guidance using statistical learning have been mentioned in Se ctions 1 and\n3. In both the tableau-based systems and the tactical ITP syste ms the statistical\nlearning guidance benefits from a compact and directly usable notion of proof\nstate, which is not immediately available in saturation-style ATP.\nBy delegating the notion of similarity to subsumption we are relying on f ast,\ncrisp and well-known symbolic ATP mechanisms. This has advantages a s well as\ndisadvantages.Comparedto the ENIGMA[15] andneural[23] sta tisticalguiding\nmethods, the subsumption-based notion of clause similarity is not fe ature-based\nor learned. This similarity relation is crisp and sparser compared to th e similar-\nity relations induced by the statistical methods. The proof guidanc e is limited\nwhen no derived clauses subsume any of the loaded proof clauses. T his can be\ncountered by loading a high number of proofs and widening (or softe ning) the\nsimilarity relation in various approximate ways. On the other hand, su bsump-\ntion is fast compared to the deep neural methods (see [23]) and en joys clear\nguarantees of the underlying symbolic calculus. For example, when a ll the (non\nempty) clauses from a loaded related proof have been subsumed in t he current\nproof search, it is clear that the current proof search is success fully finished.\nA clear novelty is the focusing of the proof search towards the (po ssibly im-\nplausible) inferencesneeded forcompletingthe loadedproofs.Exis tingstatistical\nguiding methods will fail to notice such opportunities, and the static watchlist\nguidance has no way of distinguishing the watchlist matchers that lea d faster to\nproof completion. In a way this mechanism resembles the feedback o btained by\nMonte Carlo exploration, where a seemingly statistically unlikely decisio n can\nbe made, based on many rollouts and averaging of their results. Ins tead, we rely\nhere on a database of previous proofs, similar to previously played a nd finished\ngames. The newly introduced heuristic proof search (proof progr ess) representa-\ntion may however enable further experiments with Monte Carlo guida nce.\n8.1 Possible Extensions\nSeveral extensions have been already discussed above. We list the most obvious.\nMore sophisticated progress metrics :The current proof-progress criterion\nmay be too crude. Subsuming all the initialclauses of a related proof is unlikely\nuntil the empty clause is derived. In general, a large part of a relate d proof may\nnot be needed once the right clauses in the “middle of the proof” are subsumed\nby the current proof search. A better proof-progress metric w ould compute the\nsmallest number of proof clauses that are still needed to entail the contradiction.\nThis is achievable, however more technically involved, also due to issue s such as\nrewriting of the watchlist clauses during the current proof search .\nClause re-evaluation based on the evolving proof relevance :As more\nand more watchlist clauses are matched, the proof relevance of th e clauses gen-\nerated earlier should be updated to mirror the current state. This is in general\nexpensive, so it could be done after each Ngiven clause loops or after a sig-\nnificant number of watchlist matchings. An alternative is to add corr esponding\nindexing mechanisms to the set of generated clauses, which will immed iately\nreorder them in the evaluation queues based on the proof relevanc e updates.\nMore abstract/approximate matching :Instead of the strict notion of sub-\nsumption, more abstract or heuristic matching methods could be us ed. An inter-\nesting symbolic method to consider is matching modulo symbol alignmen ts [9].\nA number of approximate methods are already used by the above me ntioned\nstatistical guiding methods.\nAdding statistical methods for clause guidance :Instead of using only\nhard-coded watchlist-style heuristics for focusing inferences, a statistical (e.g.\nENIGMA-style) clause evaluation model could be trained by adding th e vector\nof proof completion ratios to the currently used ENIGMA features .\n9 Conclusion\nTheportfolioofnewproofguidingmethodsdevelopedheresignifican tlyimproves\nE’s standard portfolio of strategies, and also the previous best se t of strategies\ninvented for Mizar by evolutionary methods. The best combination o f five new\nstrategiesrunin parallelfor10seconds(areasonablehammeringt ime) will prove\nover 7% more Mizar problems than the previous best combination of fi ve non-\nwatchlist strategies. Improvement over E’s standard portfolio is m uch higher.\nEven though we focus on developing the strongest portfolio rathe r than a single\nbest method, it is clear that the best guided versions also significant ly improve\nover their non-guided counterparts. This improvement for the be st new strategy\n(dyn2Aused with 16 most relevant proofs) is 26.5% (= 18583 /14693). These are\nrelatively high improvements in automated theorem proving.\nWe have shown that the new dynamic methods based on the idea of pr oof\ncompletionratiosimproveoverthestaticwatchlistguidance.We hav ealsoshown\nthat as usual with learning-based guidance, iterating the methods to produce\nmore proofs leads to stronger methods in the next iteration. The fi rst experi-\nments with widening the watchlist-based guidance by relatively simple in heri-\ntance mechanisms seem quite promising, contributing many new proo fs. A num-\nber of extensions and experiments with guiding saturation-style pr oving have\nbeen opened for future research. We believe that various extens ions of the com-\npact and evolving heuristic representation of saturation-style pr oof search as\nintroduced here will turn out to be of great importance for furthe r development\nof learning-based saturation provers.\n10 Acknowledgments\nWe thank Bob Veroff for many enlightening explanations and discussio ns of\nthe watchlist mechanisms in Otter and Prover9. His “industry-grad e” projects\nthat prove open and interesting mathematical conjectures with h ints and proof\nsketches have been a great sort of inspiration for this work.\nReferences\n1. J. Alama, T. Heskes, D. K¨ uhlwein, E. Tsivtsivadze, and J. Urban. Premise selection\nfor mathematics by corpus analysis and kernel methods. J. Autom. Reasoning ,\n52(2):191–213, 2014.\n2. A. A. Alemi, F. Chollet, N. E´ en, G. Irving, C. Szegedy, and J. Urban. DeepMath\n- deep sequence models for premise selection. In D. D. Lee, M. Sugiyama, U. V.\nLuxburg, I. Guyon, and R. Garnett, editors, Advances in Neural Information Pro-\ncessing Systems 29: Annual Conference on Neural Informatio n Processing Systems\n2016, December 5-10, 2016, Barcelona, Spain , pages 2235–2243, 2016.\n3. L. Bachmair and H. Ganzinger. Rewrite-Based Equational T heorem Proving with\nSelection and Simplification. Journal of Logic and Computation , 3(4):217–247,\n1994.\n4. J. C. Blanchette, D. Greenaway, C. Kaliszyk, D. K¨ uhlwein , and J. Urban. A\nlearning-based fact selector for Isabelle/HOL. J. Autom. Reasoning , 57(3):219–\n244, 2016.\n5. J. C. Blanchette, C. Kaliszyk, L. C. Paulson, and J. Urban. Hammering towards\nQED.J. Formalized Reasoning , 9(1):101–148, 2016.\n6. T. Eiter and D. Sands, editors. LPAR-21, 21st International Conference on Logic\nfor Programming, Artificial Intelligence and Reasoning, Ma un, Botswana, May 7-\n12, 2017 , volume 46 of EPiC Series in Computing . EasyChair, 2017.\n7. M. F¨ arber and C. Kaliszyk. Random forests for premise sel ection. In C. Lutz and\nS. Ranise, editors, Frontiers of Combining Systems - 10th International Sympo-\nsium, FroCoS 2015, Wroclaw, Poland, September 21-24, 2015. Proceedings , volume\n9322 ofLecture Notes in Computer Science , pages 325–340. Springer, 2015.\n8. M. F¨ arber, C. Kaliszyk, and J. Urban. Monte Carlo tableau proof search. In\nL. de Moura, editor, Automated Deduction - CADE 26 - 26th International Con-\nference on Automated Deduction, Gothenburg, Sweden, Augus t 6-11, 2017, Proceed-\nings, volume 10395 of Lecture Notes in Computer Science , pages 563–579. Springer,\n2017.\n9. T. Gauthier and C. Kaliszyk. Matching concepts across HOL libraries. In S. M.\nWatt, J. H. Davenport, A. P. Sexton, P. Sojka, and J. Urban, ed itors,CICM’15 ,\nvolume 8543 of LNCS , pages 267–281. Springer, 2014.\n10. T. Gauthier, C. Kaliszyk, and J. Urban. TacticToe: Learn ing to reason with HOL4\ntactics. In Eiter and Sands [6], pages 125–143.\n11. A. Grabowski, A. Korni/suppress lowicz, and A. Naumowicz. Mizar i n a nutshell. J. For-\nmalized Reasoning , 3(2):153–245, 2010.\n12. T. Gransden, N. Walkinshaw, and R. Raman. SEPIA: search f or proofs using\ninferred automata. In Automated Deduction - CADE-25 - 25th International Con-\nference on Automated Deduction, Berlin, Germany, August 1- 7, 2015, Proceedings ,\npages 246–255, 2015.\n13. J. Jakubuv and J. Urban. Extending E prover with similari ty based clause selection\nstrategies. In M. Kohlhase, M. Johansson, B. R. Miller, L. de Moura, and F. W.\nTompa, editors, Intelligent Computer Mathematics - 9th International Conf erence,\nCICM 2016, Bialystok, Poland, July 25-29, 2016, Proceeding s, volume 9791 of\nLecture Notes in Computer Science , pages 151–156. Springer, 2016.\n14. J. Jakubuv and J. Urban. BliStrTune: hierarchical inven tion of theorem proving\nstrategies. In Y. Bertot and V. Vafeiadis, editors, Proceedings of the 6th ACM\nSIGPLAN Conference on Certified Programs and Proofs, CPP 201 7, Paris, France,\nJanuary 16-17, 2017 , pages 43–52. ACM, 2017.\n15. J. Jakubuv and J. Urban. ENIGMA: efficient learning-based inference guiding\nmachine. In H. Geuvers, M. England, O. Hasan, F. Rabe, and O. T eschke, editors,\nIntelligent Computer Mathematics - 10th International Con ference, CICM 2017,\nEdinburgh, UK, July 17-21, 2017, Proceedings , volume 10383 of Lecture Notes in\nComputer Science , pages 292–302. Springer, 2017.\n16. C. Kaliszyk, S. Schulz, J. Urban, and J. Vyskocil. System description: E.T. 0.1.\nIn A. P. Felty and A. Middeldorp, editors, Automated Deduction - CADE-25 -\n25th International Conference on Automated Deduction, Ber lin, Germany, August\n1-7, 2015, Proceedings , volume 9195 of Lecture Notes in Computer Science , pages\n389–398. Springer, 2015.\n17. C. Kaliszyk and J. Urban. Learning-assisted automated r easoning with Flyspeck.\nJ. Autom. Reasoning , 53(2):173–213, 2014.\n18. C. Kaliszyk and J. Urban. FEMaLeCoP: Fairly efficient mach ine learning connec-\ntion prover. In M. Davis, A. Fehnker, A. McIver, and A. Voronk ov, editors, Logic\nfor Programming, Artificial Intelligence, and Reasoning - 2 0th International Con-\nference, LPAR-20 2015, Suva, Fiji, November 24-28, 2015, Pr oceedings , volume\n9450 ofLecture Notes in Computer Science , pages 88–96. Springer, 2015.\n19. C. Kaliszyk and J. Urban. MizAR 40 for Mizar 40. J. Autom. Reasoning , 55(3):245–\n256, 2015.\n20. C. Kaliszyk, J. Urban, and J. Vyskoˇ cil. Efficient semanti c features for automated\nreasoning over large theories. In Q. Yang and M. Wooldridge, editors,IJCAI’15 ,\npages 3084–3090. AAAI Press, 2015.\n21. M. K. Kinyon, R. Veroff, and P. Vojtechovsk´ y. Loops with a belian inner mapping\ngroups: An application of automated deduction. In M. P. Bona cina and M. E.\nStickel, editors, Automated Reasoning and Mathematics - Essays in Memory of\nWilliam W. McCune , volume 7788 of LNCS , pages 151–164. Springer, 2013.\n22. L. Kov´ acs and A. Voronkov. First-order theorem proving and Vampire. In\nN. Sharygina and H. Veith, editors, CAV , volume 8044 of LNCS , pages 1–35.\nSpringer, 2013.\n23. S. M. Loos, G. Irving, C. Szegedy, and C. Kaliszyk. Deep ne twork guided proof\nsearch. In Eiter and Sands [6], pages 85–105.\n24. W. McCune and L. Wos. Otter: The CADE-13 Competition Inca rnations. Journal\nof Automated Reasoning , 18(2):211–220, 1997. Special Issue on the CADE 13 ATP\nSystem Competition.\n25. W. W. McCune. Prover9 and Mace4. http://www.cs.unm.edu/ ~mccune/prover9/ ,\n2005–2010. (acccessed 2016-03-29).\n26. J. Otten and W. Bibel. leanCoP: lean connection-based th eorem proving. J. Symb.\nComput. , 36(1-2):139–161, 2003.\n27. S. Sch¨ afer and S. Schulz. Breeding theorem proving heur istics with genetic algo-\nrithms. In G. Gottlob, G. Sutcliffe, and A. Voronkov, editors ,Global Conference\non Artificial Intelligence, GCAI 2015, Tbilisi, Georgia, Oc tober 16-19, 2015 , vol-\nume 36 of EPiC Series in Computing , pages 263–274. EasyChair, 2015.\n28. S. Schulz. Learning Search Control Knowledge for Equati onal Theorem Proving. In\nF. Baader, G. Brewka, and T. Eiter, editors, Proc. of the Joint German/Austrian\nConference on Artificial Intelligence (KI-2001) , volume 2174 of LNAI , pages 320–\n334. Springer, 2001.\n29. S. Schulz. Simple and Efficient Clause Subsumption with Fe ature Vector Indexing.\nIn M. P. Bonacina and M. E. Stickel, editors, Automated Reasoning and Mathe-\nmatics: Essays in Memory of William W. McCune , volume 7788 of LNAI , pages\n45–67. Springer, 2013.\n30. S. Schulz. System description: E 1.8. In K. L. McMillan, A . Middeldorp, and\nA. Voronkov, editors, LPAR , volume 8312 of LNCS , pages 735–743. Springer, 2013.\n31. S. Schulz and M. M¨ ohrmann. Performance of clause select ion heuristics for\nsaturation-based theorem proving. In N. Olivetti and A. Tiw ari, editors, Proc.\nof the 8th IJCAR, Coimbra , volume 9706 of LNAI , pages 330–345. Springer, 2016.\n32. D. Silver, A. Huang, C. J. Maddison, A. Guez, L. Sifre, G. v an den Driessche,\nJ. Schrittwieser, I. Antonoglou, V. Panneershelvam, M. Lan ctot, S. Dieleman,\nD. Grewe, J. Nham, N. Kalchbrenner, I. Sutskever, T. P. Lilli crap, M. Leach,\nK. Kavukcuoglu, T. Graepel, and D. Hassabis. Mastering the g ame of go with\ndeep neural networks and tree search. Nature , 529(7587):484–489, 2016.\n33. D. Silver, T. Hubert, J. Schrittwieser, I. Antonoglou, M . Lai, A. Guez, M. Lanctot,\nL. Sifre, D. Kumaran, T. Graepel, T. P. Lillicrap, K. Simonya n, and D. Hassabis.\nMastering chess and shogi by self-play with a general reinfo rcement learning algo-\nrithm.CoRR , abs/1712.01815, 2017.\n34. K. Slind and M. Norrish. A brief overview of HOL4. In O. A. M ohamed, C. A.\nMu˜ noz, and S. Tahar, editors, Theorem Proving in Higher Order Logics, 21st In-\nternational Conference, TPHOLs 2008, Montreal, Canada, Au gust 18-21, 2008.\nProceedings , volume 5170 of LNCS , pages 28–32. Springer, 2008.\n35. J. Urban. MPTP 0.2: Design, implementation, and initial experiments. J. Autom.\nReasoning , 37(1-2):21–43, 2006.\n36. J. Urban, J. Vyskoˇ cil, and P. ˇStˇ ep´ anek. MaLeCoP: Machine learning connection\nprover. In K. Br¨ unnler and G. Metcalfe, editors, TABLEAUX , volume 6793 of\nLNCS , pages 263–277. Springer, 2011.\n37. R. Veroff. Using hints to increase the effectiveness of an a utomated reasoning\nprogram: Case studies. Journal of Automated Reasoning , 16(3):223–239, 1996.", "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "0fpw5bOU_82", "year": null, "venue": "EC 2018", "pdf_link": null, "forum_link": "https://openreview.net/forum?id=0fpw5bOU_82", "arxiv_id": null, "doi": null}, "paper": {"title": "Prophet Secretary: Surpassing the 1-1/e Barrier", "authors": ["Yossi Azar", "Ashish Chiplunkar", "Haim Kaplan"], "abstract": "In the Prophet Secretary problem, samples from a known set of probability distributions arrive one by one in a uniformly random order, and an algorithm must irrevocably pick one of the samples as soon as it arrives. The goal is to maximize the expected value of the sample picked relative to the expected maximum of the distributions. This is one of the most simple and fundamental problems in online decision making that models the process selling one item to a sequence of costumers. For a closely related problem called the Prophet Inequality where the order of the random variables is adversarial, it is known that one can achieve in expectation 1/2 of the expected maximum, and no better ratio is possible. For the Prophet Secretary problem, that is, when the variables arrive in a random order, Esfandiari et al. (2015) showed that one can actually get 1-1/e of the maximum. The 1-1/e bound was recently extended to more general settings by Ehsani et al. (2018). Given these results, one might be tempted to believe that 1-1/e is the correct bound. We show that this is not the case by providing an algorithm for the Prophet Secretary problem that beats the 1-1/e bound and achieves 1-1/e+1/400 times the expected maximum. We also prove a hardness result on the performance of algorithms under a natural restriction which we call deterministic distribution-insensitivity.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "lGKwu4FWyC", "year": null, "venue": "IEEE Trans. Pattern Anal. Mach. Intell. 2023", "pdf_link": "https://ieeexplore.ieee.org/iel7/34/10036240/09816125.pdf", "forum_link": "https://openreview.net/forum?id=lGKwu4FWyC", "arxiv_id": null, "doi": null}, "paper": {"title": "E$^{3}$3Outlier: a Self-Supervised Framework for Unsupervised Deep Outlier Detection", "authors": ["Siqi Wang", "Yijie Zeng", "Guang Yu", "Zhen Cheng", "Xinwang Liu", "Sihang Zhou", "En Zhu", "Marius Kloft", "Jianping Yin", "Qing Liao"], "abstract": "Existing unsupervised outlier detection (OD) solutions face a grave challenge with surging visual data like images. Although deep neural networks (DNNs) prove successful for visual data, deep OD remains difficult due to OD's unsupervised nature. This paper proposes a novel framework named <italic xmlns:mml=\ xmlns:xlink=\>E<inline-formula><tex-math notation=\>$^{3}$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mn>3</mml:mn></mml:msup></mml:math><inline-graphic xlink:href=\/></alternatives></inline-formula>Outlier</i> that can perform <bold xmlns:mml=\ xmlns:xlink=\>e</b> ffective and <bold xmlns:mml=\ xmlns:xlink=\>e</b> nd-to- <bold xmlns:mml=\ xmlns:xlink=\>e</b> nd deep outlier removal. Its core idea is to introduce <italic xmlns:mml=\ xmlns:xlink=\>self-supervision</i> into deep OD. Specifically, our major solution is to adopt a discriminative learning paradigm that creates multiple pseudo classes from given unlabeled data by various data operations, which enables us to apply prevalent discriminative DNNs (e.g., ResNet) to the unsupervised OD problem. Then, with theoretical and empirical demonstration, we argue that inlier priority, a property that encourages DNN to prioritize inliers during self-supervised learning, makes it possible to perform end-to-end OD. Meanwhile, unlike frequently-used outlierness measures (e.g., density, proximity) in previous OD methods, we explore network uncertainty and validate it as a highly effective outlierness measure, while two practical score refinement strategies are also designed to improve OD performance. Finally, in addition to the discriminative learning paradigm above, we also explore the solutions that exploit other learning paradigms (i.e., generative learning and contrastive learning) to introduce self-supervision for <italic xmlns:mml=\ xmlns:xlink=\>E<inline-formula><tex-math notation=\>$^{3}$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mn>3</mml:mn></mml:msup></mml:math><inline-graphic xlink:href=\/></alternatives></inline-formula>Outlier</i> . Such extendibility not only brings further performance gain on relatively difficult datasets, but also enables <italic xmlns:mml=\ xmlns:xlink=\>E<inline-formula><tex-math notation=\>$^{3}$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mn>3</mml:mn></mml:msup></mml:math><inline-graphic xlink:href=\/></alternatives></inline-formula>Outlier</i> to be applied to other OD applications like video abnormal event detection. Extensive experiments demonstrate that <italic xmlns:mml=\ xmlns:xlink=\>E<inline-formula><tex-math notation=\>$^{3}$</tex-math><alternatives><mml:math><mml:msup><mml:mrow/><mml:mn>3</mml:mn></mml:msup></mml:math><inline-graphic xlink:href=\/></alternatives></inline-formula>Outlier</i> can considerably outperform state-of-the-art counterparts by 10%-30% AUROC. Demo codes are available at <uri xmlns:mml=\ xmlns:xlink=\>https://github.com/demonzyj56/E3Outlier</uri> .keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidiNQUeB_cFq6yearvenueBulletin of the EATCS2013pdf_linkhttp://eatcs.org/beatcs/index.php/beatcs/article/download/206/200forum_linkhttps://openreview.net/forum?id=iNQUeB_cFq6arxiv_iddoipapertitleWhen is A=B?authorsAnja GruenheidDonald KossmannBesmira NushiabstractMost database operations such as sorting, grouping and computing joins are based on comparisons between two values. Traditional algorithms assume that machines do not make mistakes. This assumption holds in traditional computing environments; however, it does not hold in several new emerging computing environments. In this write-up, we argue the need for new resilient algorithms that take into account that the result of a comparison might be wrong. The goal is to design algorithms that have low cost (make few comparisons) yet produce high-quality results in the presence of errors.keywordsraw_extracted_contentTheLogic in Computer Science Column\nby\nYuriGurevich\nMicrosoft Research\nOne Microsoft Way, Redmond WA 98052, USA\ngurevich@microsoft.com\nWhen is A=B?\u0003\nAnja Gruünheid\nETH Zürich\nganja@inf.ethz.chDonald Kossmann\nETH Zürich\ndonaldk@ethz.chBesmira Nushi\nETH Zürich\nnushib@inf.ethz.ch\nAbstract\nMost database operations such as sorting, grouping and computing joins\nare based on comparisons between two values. Traditional algorithms as-\nsume that machines do not make mistakes. This assumption holds in tra-\nditional computing environments; however, it does not hold in several new\nemerging computing environments. In this write-up, we argue the need for\nnew resilient algorithms that take into account that the result of a compar-\nison might be wrong. The goal is to design algorithms that have low cost\n(make few comparisons) yet produce high-quality results in the presence of\nerrors.\n\u0003This write-up is based on a talk given at the University of Washington and Mi-\ncrosoft Research, Redmond, in August 2013. The slides of that talk are online at\nhttp://systems.ethz.ch/talks .\n1 Introduction\nWhen we think about computers, we typically assume that they are dumb and\nmake no mistakes. Our software methodology, complexity theory, and algorithmic\ndesign are based on these two assumptions. What happens if we drop one of these\nassumptions? What happens if computers start making mistakes occasionally;\neven simple mistakes such as getting a comparison between two integers wrong?\nWill we need new algorithms or are the existing algorithms good enough?\nThere are a number of research trends that make it worthwhile to think about\nerror-prone computers. The first trend is the emergence of crowdsourcing and\nthe development of hybrid systems that involve machines and humans to compute\ntasks that neither machines nor humans are capable of computing alone. [4] gives\nan overview of such systems. Since these systems rely on human input, some of\nthe computations carried out by these systems may be error-prone and algorithms\nthat are designed for these systems need to take this fact into account.\nA second trend is the development of new, low-energy processors that trade\npower for accuracy. That is, these processors might occasionally get an opera-\ntion wrong in exchange for much lower power consumption. Examples for such\ndesigns are [1].\nThird, with the advent of Big Data technologies, we are automating an in-\ncreasing number of tasks based on previous experience. Recommendation sys-\ntems such as those deployed by Amazon as part of their online shop improve with\nan increasing amount of data. Putting it di \u000berently, these systems might make\npoor recommendations if only little data is available.\nBased on these observations, we believe that it is worthwhile to revisit exist-\ning algorithms and start thinking about how to design algorithms for computer\nsystems that occasionally do make errors. It turns out that an algorithm that is\noptimal in the traditional (error-free) computational model may perform poorly in\nthe presence of error. As an example, this paper reports on some simple obser-\nvations that we made when studying QuickSort. Furthermore, this paper reports\non some observations on how to group objects in a robust way if the machine oc-\ncasionally misclassifies two objects. These two examples indicate that we might\nhave to rethink complexity theory and algorithm design. As of now, the results of\nthis paper are anecdotal, and we have not been yet able to develop a new theory.\nThe main purpose of this paper is to raise the issue.\nThe remainder of this paper is organized as follows: Section 2 studies sort-\ning. Section 3 gives an example of how errors impact algorithms for grouping or\nclustering objects. Section 4 contains conclusions and related work.\n2 Example 1: QuickSort\nTo show how the presence of errors may impact algorithm design, let us start\nwith a discussion of QuickSort. [7] gives a more general discussion of sorting\nalgorithms in the presence of errors. Here and in the remainder of this paper,\nwe assume a computational model in which the computer system might make an\nerror when executing a comparison; however, the logic of the algorithm is exe-\ncuted correctly. Furthermore, we assume that comparisons are the most expensive\noperation. This computational model matches nicely hybrid systems in which\ncomparisons are crowdsourced (e.g., [10]).\nQuickSort is generally perceived as one of the best algorithms for sorting.\nHowever, what makes QuickSort great for traditional, error-free computing sce-\nnarios, hurts QuickSort in the presence of mistakes. The following example shows\nwhy. The task is to sort the following sequence of numbers:\n7;24;2;13;51\nLet us assume that QuickSort chooses 7 as the pivot element of the first partition-\ning phase and let us furthermore assume that the machine gets all comparisons\nright in the first partitioning, except for the comparison 7 <51. As a consequence,\nthe result of the first iteration of QuickSort are the following two partitions:\n2;51\n24;13\nEven if the machine is perfect and makes no further mistakes, the best possible\noutcome to sort the sequence of numbers is:\n2;51;7;13;24\nThe key observation is that one wrong comparison (misclassifying 7 <51) re-\nsulted in three errors in the final result (misclassifying 13 <51 and 24 <51 in\naddition to 7 <51). The reason is that the QuickSort algorithm aggressively ex-\nploits the transitivity of the <relation so that errors propagate. There are many\ndi\u000berent notions of error and the most appropriate definition depends on the utility\nfunction of the application. We use the number of misclassified comparisons in\nthe final result here and in [7] because it is easy to formalize and it is a metric that\nis highly relevant for many applications that involve sorting or ranking data.\nIt turns out that it is di \u000ecult to fix QuickSort. The most natural way to improve\nthe quality of the result is to avoid misclassifications by repeating the computa-\ntion. That is, recomputing 7 <51 several times and then do a majority vote or\naccept based on a theshold. That will increase the number of comparisons by a\nTransactionId Customer Purchase\n1 Jane $ 1000\n2 Bob $ 500\n3 Jane $ 100\n4 Jane $ 50\nTable 1: Example Transactions\nconstant factor (i.e., the number of times each comparison is made) so that Quick-\nSort continues to be in the O(n\u0003log(n)) complexity class. The problem is that even\nwith a high number of attempts, the probability of a misclassification is not zero.\nSo, we can never expect perfection with QuickSort. Also, the impact of a wrong\ncomparison grows with the size of the sequence in our particular error model that\ncounts the misclassifications in the final result: In the worst case, it is n=2 with\nnthe length of the sequence. The question then is how to best invest additional\ncomparisons and whether new algorithms are more appropriate than traditional\nalgorithms to achieve high quality for lower cost. [7], for instance, shows that\niteratively running BubbleSort might be a better a way to invest additional com-\nputation for better quality. That is, do an intial sorting with QuickSort and then\nrun BubbleSort once or several times on the result to improve the quality of the\nresult, thereby exploiting that BubbleSort has O(n) complexity if the data is sorted\nalready and the a \u000bects of wrong comparisons are always local in BubbleSort.\n3 Example 2: Grouping\n3.1 Vote Graphs\nAs a second example of how the cost /quality trade-o \u000bof error-prone computer\nsystems impacts algorithm design, consider the list of transactions of Table 1.\nThe task is to compute the total purchase of each customer; i.e., $ 1150 for Jane\nand $ 500 for Bob. With SQL, this task can be specified using a simple GROUP\nBY clause. Depending on the number of customers, the number of transactions,\nand the skew in the distribution of transactions to customers, modern database sys-\ntems choose one of three alternative ways to compute this grouping of transactions\nby customer: sorting, hashing, or nested-loops. For the purpose of this example,\nwe will use the nested-loop variant and discuss alternative ways to compare the\ncustomer fields of two transactions in order to decide whether they belong to the\nsame customer. Note that hashing and sorting are often more e \u000ecient variants, but\nthey su \u000ber from the same kind of error propagation as the QuickSort algorithm in\nthe previous section.\n(a) No Conflict (b) With Conflict\nFigure 1: Example V ote Graphs for Table 1\nSimilarly to Section 2, we assume that the comparison of two customer names\nis the only error-prone and costly operation. Thus, the goal is to minimize the\nnumber of such comparisons and minimize the impact of mistakes made when\ncomputing these comparisons. Figure 1 illustrates one possible approach to do\nthat. It depicts two example Vote Graphs . Such V ote Graphs capture the results of\nall comparisons carried out between the customer names of the four transactions.\nThe nodes of a V ote Graph are transactions. Edges of a V ote Graph represent\nthe results of comparing the customer names of two transactions. The weight of\nan edge indicates how often the comparison returned that results; the sign of the\nweights of an edge indicates the result of the comparison (true or false).\nThe V ote Graph of Figure 1a, for instance, indicates that we compared three\ntimes the customer names of Transactions 1 and 2 (i.e., “Jane =Bob”) and all\nthree times the answer was “false” (which happens to be the correct answer in this\nexample). Furthermore, it shows that all seven comparisons between the customer\nnames of Transactions 1 and 3 were positive (which happens to be correct, too, in\nthis example).\n3.2 Decision Functions\nIf minimizing comparisons between two customer names is our main objective\n(e.g., because they need to be crowdsourced or need to be executed repeatedly on\nan error-prone machine), then it makes sense to exploit the transitivity of the =\nrelation. So, if the grouping algorithm asks whether Transactions 1 and 4 belong\nto the same customer in Figure 1a, the answer is true and can be inferred from\nFigure 1a without actually looking at the customer names of these transactions.\nTransitivity and anti-transitivity can be applied in a straightforward way in the\nexample of Figure 1a. The situation becomes trickier in Figure 1b because in that\nV ote Graph there is a conflicting edge: The negative edge between Transactions 1\nand 4 conflicts with the positive edges, “1-3” and “1-4”.\nIn the presence of error-prone computations, conflicts in the V ote Graph are\ninevitable. Therefore, it is important to tolerate these errors and make decisions\neven in conflict situations. In the example of Figure 1b, it is evident that the\nsystem should conclude that the same customer carried out Transactions 1 and 4\nbecause the weight of the edges “1-3” and “3-4” is much higher than the weight of\nthe negative edge “1-4”. In general, we propose the use of a decision function that\ngiven a V ote Graph, determines whether two nodes are the same, not the same, or\nif additional comparisons are needed in order to make the decision.\nThere are many decisions functions conceivable and [8] contains a more de-\ntailed discussion of which properties a decision function should have. For in-\nstance, a decision function that always says that two nodes are the same is obvi-\nously not good because it will result in poor quality . Likewise, a decision function\nthat always says “I do not know” is not good because it will result in high costas it\nwould induce additional comparisons. For the discussion in this paper, let us con-\nsider a decision function that is inspired by work on combining scoring functions\n[5] and that we call the MinMax function.\nThe MinMax function considers all positive and negative paths between two\nnodes. A positive path is a path that involves only edges with weight greater than\n0. A negative path is a path that has exactly one negative edge. Paths with more\nthan one negative edge are ignored because neither equality nor inequality can be\ninferred from them. For each path, the MinMax function computes a score: For\na positive path, the score is the minimum of the weights of the edges of the path.\nFor a negative path, the score is defined as the minimum of the absolute weights\nof the edges (i.e., the weight of the only negative edge is multiplied by -1 for this\npurpose). The intuition behind this scoring function is that a path is as strong as\nits weakest link. Another way to interpret the minimum is that it implements a\nconjunction (i.e.,^) along the path, thereby interpreting each edge as a predicate.\nContinuing the example of Figure 1b, the score of the positive path ’1-3-4’ is\n5 while the score for the negative path ’1-4’ is 1.\nAfter computing the scores for all positive and negative paths, the MinMax\ndecision function aggregates these scores into a single positive score, pScore , and\na single negative score, nScore .pScore is the maximum of the scores of all pos-\nitive paths. If there is no positive path, then pScore =0. Analogously, nScore is\nthemaximum of the scores of all negative paths. If there is no negative path, then\nnScore =0. These values represent the maximum impact that a positive respec-\ntively negative path can have within an entity.\nFinally, the MinMax function uses a threshold qin order to form a final deci-\nsion based on the positive and negative scores; e.g., q=3. That is, if the positive\nscore is 3 or more higher than the negative score then the MinMax function de-\ncides that the two nodes are the same. More formally, the decision part of MinMax\nis defined as follows.\nFigure 2: Interesting MinMax Example\nf(r1;r2)=8>>>>><>>>>>:Yes, pS core (r1;r2)\u0000nS core (r1;r2)\u0015q\nNo, nS core (r1;r2)\u0000pS core (r1;r2)\u0015q\nDo-not-know, otherwise\n3.3 Observations\n[8] contains a full discussion of this grouping /clustering use case under uncer-\ntainty with a series of experiments. The important observation and conclusion of\n[8] is that maintaining a V ote Graph and doing inference with the MinMax func-\ntion is much better than doing pairwise comparisons in terms of both quality and\ncost in order to compute any database operation that is based on equality (e.g.,\njoins, grouping, or clustering). In terms of cost, it is better because of its infer-\nence capability; in terms of quality, it is better because it detects inconsistencies\nand tries to keep the whole graph consistent. The designers of traditional database\nsystems would never consider keeping such a V ote Graph because it is in tradi-\ntional computing environments it is always cheaper (and as reliable) to recompute\na comparison than to infer its result from a V ote Graph.\n[8] discusses some of the properties of the MinMax decision function. It turns\nout that it is not transitive and an example can be seen in Figure 2 with a threshold\nof 3. In that example, the MinMax rules that “X =Y” (pScore =3, nScore =0) and\n“Y=Z” (pScore =5, nScore =2)), but it rules that “X =Z” is unknown (pScore =3,\nnScore =2). There are many conceivable decision functions; many which indeed\nare transitive. For instance, it would be possible to define a decision function by\napplying the MinCuts algorithm on every instance of the V ote Graph (i.e., after\ncomputing every comparison). This decision would indeed be transitive, but its\nimplementation would have high computational cost. [8] proposes the MinMax\nfunction because it can be implemented in a highly e \u000ecient way.\nFor the purpose of designing good and robust algorithms for error-prone com-\nputer systems, however, we would like to make another important, somewhat sur-\nprising observation. Going back to Figure 2 and using the MinMax function, the\nbest way to conclude that “X =Z” is notby comparing “X =Z” directly. Doing\nso would require, in the best case, five calls to the comparison function. Instead,\ninvesting into the “Y =Z” edge is more promising: In the best case, two compar-\nisons that confirm that indeed “Y =Z” are su \u000ecient to finally conclude with the\nMinMax function that “X =Z”.\n4 Conclusion and Related Work\nThe two examples showed some phenomena that may occur if computer systems\nmake mistakes. The examples show that an optimal algorithm for the traditonal\n(error-free) computing model might result in poor quality when run on error-prone\ncomputer systems. It is an open question of what the optimal algorithms to sort\na sequence of numbers and to group /cluster objects in the presence of errors are.\nThe main message that we would like to illustrate with these examples is that error\nshould be part of the equation. That is, we need to do two things:\n\u000fWe need to design algorithms that scale (with the problem size) and tolerate\nerrors. (Traditional algorithms were designed only to scale.)\n\u000fWe need to optimize for both costandquality . (Traditional algorithms were\ndesigned to minimize cost only.)\nIn other words, algorithm designers face two kinds of optimizations:\n\u000fGiven a problem (e.g., sorting), a problem instance (e.g., 1000 integers), an\nerror model (e.g., 1% of the comparisons are wrong uniformly) and a budget\n(e.g., 1 million comparisons), maximize the quality of the result.\n\u000fGiven a problem, a problem instance, an error model, and quality require-\nments, minimize the cost.\nAt the moment, we do not even have good abstractions to characterize computa-\ntional error and result quality.\nThe examples used in this paper were derived from typical database operators\n(i.e., sorting, joins, and grouping). Recently, there have a number of papers in the\ndatabase community that studied how to enhance database with crowdsourcing, a\nspecial form of uncertain computation; e.g., [9, 11, 6, 3] to name just a few. It turns\nout that the topic of error-prone computing has been studied in other communi-\nties as well and not only in the context of crowdsourcing. For instance, Busse and\nBuhmann studied the information gain of a comparison in alternative sorting algo-\nrithms [2]. Schulze developed a method to carry out elections, called the Schulze\nmethod, which is similar to the MinMax decision function [12]. Furthermore, de-\nsigners of distributed systems have been developing fault-tolerant algorithms for\ndecades. The fact that several communities are looking into fault-tolerant com-\nputation makes it even more important to develop a theory that incorporates error\nand result quality in algorithm design and complexity.\nReferences\n[1] L. Avinash, K. K. Muntimadugu, C. C. Enz, R. M. Karp, K. V . Palem, and C. Piguet.\nAlgorithmic methodologies for ultra-e \u000ecient inexact architectures for sustaining\ntechnology scaling. In J. Feo, P. Faraboschi, and O. Villa, editors, Conf. Computing\nFrontiers , pages 3–12. ACM, 2012.\n[2] L. M. Busse, M. H. Chehreghani, and J. M. Buhmann. The information content in\nsorting algorithms. In ISIT, pages 2746–2750. IEEE, 2012.\n[3] S. B. Davidson, S. Khanna, T. Milo, and S. Roy. Using the crowd for top-k and\ngroup-by queries. In W.-C. Tan, G. Guerrini, B. Catania, and A. Gounaris, editors,\nICDT , pages 225–236. ACM, 2013.\n[4] A. Doan, R. Ramakrishnan, and A. Y . Halevy. Crowdsourcing systems on the world-\nwide web. Commun. ACM , 54(4):86–96, 2011.\n[5] R. Fagin and E. L. Wimmers. A formula for incorporating weights into scoring rules.\nTheor. Comput. Sci. , 239(2):309–338, 2000.\n[6] M. J. Franklin, D. Kossmann, T. Kraska, S. Ramesh, and R. Xin. Crowddb: answer-\ning queries with crowdsourcing. In Sellis et al. [13], pages 61–72.\n[7] A. Gruenheid and D. Kossmann. Cost and quality trade-o \u000bs in crowdsourcing. In\nR. Cheng, A. D. Sarma, S. Maniu, and P. Senellart, editors, DBCrowd , volume 1025\nofCEUR Workshop Proceedings , pages 43–46. CEUR-WS.org, 2013.\n[8] A. Gruenheid, D. Kossmann, S. Ramesh, and F. Widmer. Crowdsourcing entity\nresolution: When is a =b? Technical Report No. 785, Department of Computer\nScience, ETH Zurich, Sep 2012.\n[9] A. Marcus, E. Wu, D. R. Karger, S. Madden, and R. C. Miller. Demonstration of\nqurk: a query processor for humanoperators. In Sellis et al. [13], pages 1315–1318.\n[10] A. Marcus, E. Wu, D. R. Karger, S. Madden, and R. C. Miller. Human-powered\nsorts and joins. PVLDB , 5(1):13–24, 2011.\n[11] H. Park, R. Pang, A. G. Parameswaran, H. Garcia-Molina, N. Polyzotis, and\nJ. Widom. Deco: A system for declarative crowdsourcing. PVLDB , 5(12):1990–\n1993, 2012.\n[12] M. Schulze. A new monotonic, clone-independent, reversal symmetric, and\ncondorcet-consistent single-winner election method. Social Choice and Welfare ,\n36(2):267–303, 2011.\n[13] T. K. Sellis, R. J. Miller, A. Kementsietsidis, and Y . Velegrakis, editors. Proceedings\nof the ACM SIGMOD International Conference on Management of Data, SIGMOD\n2011, Athens, Greece, June 12-16, 2011 . ACM, 2011.main_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid6g6Y0DGvRkyearvenueEAIS 2020pdf_linkforum_linkhttps://openreview.net/forum?id=6g6Y0DGvRkarxiv_iddoipapertitleGUapp: A Conversational Agent for Job Recommendation for the Italian Public AdministrationauthorsVito BelliniGiovanni Maria BiancofioreTommaso Di NoiaEugenio Di SciascioFedelucio NarducciClaudio Pomoabstractkeywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidvXvw32grMPeyearvenueEAIS 2018pdf_linkhttps://ieeexplore.ieee.org/iel7/8388184/8397169/08397177.pdfforum_linkhttps://openreview.net/forum?id=vXvw32grMPearxiv_iddoipapertitleDeep reinforcement learning for frontal view person shooting using dronesauthorsNikolaos PassalisAnastasios TefasabstractUnmanned Aerial Vehicles (UAVs), also known as drones, are increasingly used for a wide variety of novel tasks, including drone-based cinematography. However, flying drones in such setting requires the coordination of several people, increasing the cost of using drones for aerial cinematography and limiting the shooting flexibility by putting a significant cognitive load on the director and drone/camera operators. To overcome some of these limitation, this paper proposes a deep reinforcement learning (RL) method for performing autonomous frontal view shooting. To this end, a realistic simulation environment is developed, which ensures that the learned agent can be directly deployed on a drone. Then, a deep RL algorithm, tailored to the needs of the specific application, is derived building upon the well known deep Q-learning approach. The effectiveness of the proposed technique is experimentally demonstrated using several quantitative and qualitative experiments.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidyvi2w_wGTOYyearvenueEAIS 2012pdf_linkhttps://ieeexplore.ieee.org/iel5/6225463/6232786/06232798.pdfforum_linkhttps://openreview.net/forum?id=yvi2w_wGTOYarxiv_iddoipapertitleOnline learning with kernels in classification and regressionauthorsGuoqi LiGuangshe ZhaoabstractNew optimization models and algorithms for online learning with kernels (OLK) in classification and regression are proposed in a Reproducing Kernel Hilbert Space (RKHS) by solving a constrained optimization model. The “forgetting” factor in the model makes it possible that the memory requirement of the algorithm can be bounded as the learning process continues. The applications of the proposed OLK algorithms in classification and regression show their effectiveness in comparing with the state of art algorithms.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid0KlhaP9dy-yyearvenueEAIS 2015pdf_linkhttps://ieeexplore.ieee.org/iel7/7361817/7368765/07368781.pdfforum_linkhttps://openreview.net/forum?id=0KlhaP9dy-yarxiv_iddoipapertitleAn entropy-based method for estimating demographic trendsauthorsGuang-She ZhaoYi XuGuoqi LiZhao-Xu YangabstractIn this paper, an entropy-based method is proposed to forecast the demographical changes of countries. We formulate the estimation of future demographical profiles as a constrained optimization problem, anchored on the empirically validated assumption that the entropy of age distribution is increasing in time. The procedure of the proposed method involves three stages, namely: 1) Prediction of the age distribution of a country's population based on an “age-structured population model”; 2) Estimation the age distribution of each individual household size with an entropy-based formulation based on an “individual household size model”; and 3) Estimation the number of each household size based on a “total household size model”. The last stage is achieved by projecting the age distribution of the country's population (obtained in stage 1) onto the age distributions of individual household sizes (obtained in stage 2). The effectiveness of the proposed method is demonstrated by feeding real world data, and it is general and versatile enough to be extended to other time dependent demographic variables.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidDsrhyzVWRFlyearvenueEAIS 2020pdf_linkhttps://ieeexplore.ieee.org/iel7/9119729/9122692/09122753.pdfforum_linkhttps://openreview.net/forum?id=DsrhyzVWRFlarxiv_iddoipapertitleDeep Learning-Based Adaptive Image Compression System for a Real-World ScenarioauthorsVito Walter AnelliYashar DeldjooTommaso Di NoiaDaniele MalitestaabstractDeep Learning-based (DL) image compression has shown prominent results compared to standard image compression techniques like JPEG, JPEG2000, BPG and WebP. Nevertheless, neither DL nor standard techniques generally can cope with critical real-world scenarios, with stringent performance constraints. In order to explore the nature of this gap, we first introduce an industrial scenario, which contemplates real-time compression of high-resolution images, with strict requirements on a number of quality-performance indicators, namely: the output image quality, the hardware, and the compression complexity. Next, we propose a DL-based image compression model, i.e. a Convolutional Residual Autoencoder (CRAE). In particular, CRAE integrates some structural benefits of a deep neural network, including PReLU activation function and sub-pixel convolution, which have proven to be especially suitable for image compression tasks. We analyze the performance of the proposed CRAE approach by adopting two types of processing: (i) global and, (ii) patch-based processing of image data. To test the models, we exploit a dataset composed of high-resolution images provided by the MERMEC company composed of consecutive images of the railway track captured by a machine vision system called V-CUBE. Furthermore, the company provided strict compression requirements that needed to be met by the developed system. Preliminary results of an ongoing study indicates that the proposed image compression system can meet the requirements by MERMEC with reasonable performance, with a mild advantage observed for full-based CRAE. The obtained outcomes suggests that CRAE can adapt to the specific structure of the given dataset and extracts the salient recurrent patterns inside an image. In summary, this line of research represents the core of the future plug-and-play DL architecture for constrained image compression.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidRkMV-wVUGCyearvenueEAIS 2022pdf_linkhttps://ieeexplore.ieee.org/iel7/9787685/9787686/09787753.pdfforum_linkhttps://openreview.net/forum?id=RkMV-wVUGCarxiv_iddoipapertitleOnline Monitoring of Stance from Tweets: The case of Green Pass in ItalyauthorsAlessandro BondielliGiuseppe Cancello TortoraPietro DucangeArmando MacriFrancesco MarcelloniAlessandro RendaabstractStance detection on social media has attracted a lot of attention in the last few years, as opinionated posts are an invaluable source of information which can possibly be exploited in dedicated systems. This is especially true in the case of particularly polarizing topics for which there is no clear consensus among population. In this paper, we focus on one of these topics, namely the EU digital COVID certificate (also known as Green Pass), with the objective of uncovering the stance towards it in a specific time period for the Italian Twitter community. To this aim, we first tested some classifiers for determining the most suitable one in terms of performance and complexity for the stance detection problem under consideration. Then, we compared several approaches aimed at counteracting the occurrence of concept drift, i.e., that phenomenon for which the characteristics of the dataset vary over time, possibly resulting in a degradation of classification accuracy. Our experimental analysis suggests that updating the classifier during the stance monitoring campaign is crucial for maintaining a satisfactory level of performance. Finally, we deployed our system to monitor the stance on the topic of Green Pass expressed in tweets published from July to December 2021 and to obtain insights about its evolution.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidg3KmAoqLFp79yearvenueEAIS 2013pdf_linkhttps://ieeexplore.ieee.org/iel7/6588691/6604096/06604112.pdfforum_linkhttps://openreview.net/forum?id=g3KmAoqLFp79arxiv_iddoipapertitleOnline identification of complex multi-input-multi-output system based on generic evolving neuro-fuzzy inference systemauthorsMahardhika PratamaSreenatha G. AnavattiMatthew A. GarrattEdwin LughoferabstractNowadays, unmanned aerial vehicles (UAV) play a noteworthy role in miscellaneous defence and civilian operation. A major facet in the UAV control system is an identification phase feeding the valid and up-to-date information of the system dynamic in order to generate proper adaptive control action to handle various UAV maneuvers. UAV, however, constitutes a complex system possessing a highly non-linear property. Conversely, the learning environment in modeling UAV's dynamic varies overtime and demands online learning scheme encouraging a fully adaptive and evolving algorithm with a mild computational load to settle the task. In contrast, contemporaneous literatures scrutinizing the identification of UAV dynamic yet rely on offline or batched learning procedures. Evolving neuro-fuzzy system (ENFS) where the landmarks are flexible rule base and usable in the time-critical applications offers a promising impetus in the UAV research field, and in particular its identification standpoint. The principle cornerstone is ENFS can commence its learning mechanism from scratch with an empty rule base and very limited expert knowledge. Nonetheless, it can perform automatic knowledge building from streaming data without catastrophic forgetting previous valid knowledge which is alike autonomous mental development of human brain. This paper elaborates the identification of rotary wing UAV based on our incipient ENFS algorithm termed generic evolving neuro-fuzzy system (GENEFIS). In summary, our algorithm can not only trace footprint of the UAV dynamic but also ameliorate the performance of existing ENFS in terms of predictive quality and resultant rule base burden.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "cehY-TOeNV", "year": null, "venue": "EAIS 2017", "pdf_link": "https://ieeexplore.ieee.org/iel7/7947643/7954814/07954836.pdf", "forum_link": "https://openreview.net/forum?id=cehY-TOeNV", "arxiv_id": null, "doi": null}, "paper": {"title": "Incremental rule splitting in generalized evolving fuzzy regression models", "authors": ["Edwin Lughofer", "Mahardhika Pratama", "Igor Skrjanc"], "abstract": "We propose an incremental rule splitting concept for generalized fuzzy rules in evolving fuzzy regression models in order to properly react on gradual drifts and to compensate inappropriate settings of rule evolution parameters; both occurrences may lead to oversized rules with untypically large local errors, which also usually affects the global model error. The generalized rules are directly defined in the multi-dimensional feature space through a kernel function, and thus allowing any rotated orientation of their shapes. Our splitting condition is based 1.) on the local error of rules measured in terms of a weighted contribution to the whole model error and 2.) on the size of the rules measured in terms of its volume. Thereby, we use the concept of statistical process control for automatic thresholding, in order to omit two extra parameters. The splitting technique relies on the eigendecompisition of the rule covariance matrix by adequately manipulating the largest eigenvector and eigenvalues in order to retrieve the new centers and contours of the two split rules. Thus, splitting is performed along the main principal component direction of a rule. The splitting concepts are integrated in the generalized smart evolving learning engine (Gen-Smart-EFS) and successfully tested on two real-world application scenarios, engine test benches and rolling mills, the latter including a real-occurring gradual drift (whose position in the data is known). Results show clearly improved error trend lines over time when splitting is applied: reduction of the error by about one third (rolling mills) and one half (engine test benches). In case of rolling mills, three rule splits right after the gradual drift starts were essential for this significant improvement.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "clB2C2MVQUF", "year": null, "venue": "EAIS 2018", "pdf_link": "https://ieeexplore.ieee.org/iel7/8388184/8397169/08397186.pdf", "forum_link": "https://openreview.net/forum?id=clB2C2MVQUF", "arxiv_id": null, "doi": null}, "paper": {"title": "Evolving time-series based prediction models for quality criteria in a multi-stage production process", "authors": ["Edwin Lughofer", "Robert Pollak", "Pauline Meyer-Heye", "Helmut Zörrer", "Christian Eitzinger", "Jasmin Lehner", "Thomas Radauer", "Mahardhika Pratama"], "abstract": "We address the problem of predicting product quality for a latter stage in a production process already at an early stage. Thereby, the idea is to use time-series of process values, recorded during the on-line production process of the early stage and containing possible system dynamics and variations according to parameter settings or different environmental conditions, as input to predict the final quality criteria in the latter stage. We apply a non-linear partial least squares (PLS) variant for reducing the high input dimensionality of time-series batch-process problems, by combining PLS with generalized Takagi-Sugeno fuzzy systems, a new extended variant of classical TS fuzzy system (thus termed as PLS-Fuzzy). This combination opens the possibility to resolve non-linearities in the PLS score space without requiring extra pre-tuning parameters (as is the case in other non-linear PLS variants). The models are trained by an evolving and iterative vector quantization approach to find the optimal number of rules and their ideal positioning and shape, combined with a fuzzily weighted version of elastic net regu-larization for robust estimation of the consequent parameters. The adaptation algorithm of the models (termed as IPLS-GEFS) includes an on-the-fly evolving rule learning concept (GEFS), a forgetting strategy with dynamically varying forgetting factor in case of drifts (to increase flexibility by outweighing older samples) as well as a new variant for an incremental single-pass update of the latent variable space (IPLS). The latter can be seen as a new variant for incremental dimension reduction and subspace update and is necessary when the covariance characteristics between input and output changes. Results on a real-world data set from microfluidic chip production show a comparable performance of PLS-Fuzzy with random forests, extreme learning machines and deep learning with MLP neural networks, achieving low prediction errors (below 10%) with low model complexity. Updating the models with new on-line data — only achievable with our method, as the others are batch off-line methods (with mostly slow re-training phases) — decreased the model errors, at most when including incremental latent variable space update.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "5HPVzt2IXR", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122772.pdf", "forum_link": "https://openreview.net/forum?id=5HPVzt2IXR", "arxiv_id": null, "doi": null}, "paper": {"title": "Online Sequential Ensembling of Fuzzy Systems", "authors": ["Edwin Lughofer", "Mahardhika Pratama"], "abstract": "Evolving fuzzy systems (EFS) have enjoyed a wide attraction in the community to handle learning from data streams in an incremental, single-pass and transparent manner. The main concentration so far lied in the development of approaches for single EFS models. Forgetting mechanisms have been used to increase their flexibility, especially to adapt quickly to changing situations such as drifting data distributions. These, however, require a forgetting factor steering the degree of timely outweighing older learned concepts. Furthermore, as being pure supervised incremental methods, they typically assume that target reference values are immediately available without any delays. In this paper, we propose a new concept of learning fuzzy systems from data streams, which we call sequential ensembling. It is able to model the recent dependencies in streams on a chunk-wise basis: for each new incoming chunk, a new fuzzy model is trained from scratch and added to the ensemble (of fuzzy systems trained before). The point is that a new chunk can be used for establishing a new fuzzy model as soon as the target values are available. This induces i.) flexibility for respecting the actual system delay in receiving target values (by setting the chunksize adequately) and ii.) fast drift handling possibilities. The latter are realized with specific prediction techniques for new data chunks based on the sequential ensemble members trained so far over time, for which we propose four different variants. These include specific spatial and timely uncertainty concepts. Finally, in order to cope with large-scale and (theoretically) infinite data streams within a reasonable amount of prediction time, we demonstrate a concept for pruning past ensemble members. The results based on two data streams show significantly improved performance compared to single EFS models in terms of a better convergence of the accumulated chunk-wise ahead prediction error trends over time. This is especially true in the case of abrupt and gradual drifts appearing in the target concept, where the sequential ensemble (especially due to recent weak members) is able to react more flexibly and quickly than (more heavy) single EFS models. In the case of input space drifts and new operating conditions, the more advanced prediction schemes, which include uncertainty weighing concepts, can significantly outperform standard averaging over all members' outputs.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid4_n1n-K9xqyearvenueEAIS 2020pdf_linkhttps://ieeexplore.ieee.org/iel7/9119729/9122692/09122752.pdfforum_linkhttps://openreview.net/forum?id=4_n1n-K9xqarxiv_iddoipapertitleScalable Teacher-Forcing Networks under Spark Environments for Large-Scale Streaming ProblemsauthorsChoiru Za'in", "Andri Ashfahani", "Mahardhika Pratama", "Edwin Lughofer", "Eric Pardede"], "abstract": "Large-scale data streams remains an open issue in the existing literature. It features a never ending information flow, mostly going beyond the capacity of a single processing node. Nonetheless, algorithmic development of large-scale streaming algorithms under distributed platforms faces major challenge due to the scalability issue. The network complexity exponentially grows with the increase of data batches, leading to an accuracy loss if the model fusion phase is not properly designed. A largescale streaming algorithm, namely Scalable Teacher Forcing Network (ScatterNet), is proposed here. ScatterNet has an elastic structure to handle the concept drift in the local scale within the data batch or in the global scale across batches. It is built upon the teacher forcing concept providing a short-term memory aptitude. ScatterNet features the data-free model fusion approach which consists of the zero-shot merging mechanism and the online model selection. Our numerical study demonstrates the moderate improvement of prediction accuracy by ScatterNet while gaining competitive advantage in terms of the execution time compared to its counterpart.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "L6KmftF7OGP", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122698.pdf", "forum_link": "https://openreview.net/forum?id=L6KmftF7OGP", "arxiv_id": null, "doi": null}, "paper": {"title": "Emotions Understanding Model from Spoken Language using Deep Neural Networks and Mel-Frequency Cepstral Coefficients", "authors": ["Marco Giuseppe de Pinto", "Marco Polignano", "Pasquale Lops", "Giovanni Semeraro"], "abstract": "The ability to understand people through spoken language is a skill that many human beings take for granted. On the contrary, the same task is not as easy for machines, as consequences of a large number of variables which vary the speaking sound wave while people are talking to each other. A sub-task of speeches understanding is about the detection of the emotions elicited by the speaker while talking, and this is the main focus of our contribution. In particular, we are presenting a classification model of emotions elicited by speeches based on deep neural networks (CNNs). For the purpose, we focused on the audio recordings available in the Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS) dataset. The model has been trained to classify eight different emotions (neutral, calm, happy, sad, angry, fearful, disgust, surprise) which correspond to the ones proposed by Ekman plus the neutral and calm ones. We considered as evaluation metric the F1 score, obtaining a weighted average of 0.91 on the test set and the best performances on the \"Angry\" class with a score of 0.95. Our worst results have been observed for the sad class with a score of 0.87 that is nevertheless better than the state-of-the-art. In order to support future development and the replicability of results, the source code of the proposed model is available on the following GitHub repository: https://github.com/marcogdepinto/Emotion-Classification-Ravdess.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "Actw4Lc3lZZ", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122760.pdf", "forum_link": "https://openreview.net/forum?id=Actw4Lc3lZZ", "arxiv_id": null, "doi": null}, "paper": {"title": "Training in a Virtual Learning Environment: A Process Mining Approach", "authors": ["Annalisa Appice", "Pasquale Ardimento", "Donato Malerba", "Giuseppe Modugno", "Diego Marra", "Marco Mottola"], "abstract": "Upgrading the workers' skills through adequate training is highly demanded in modern companies seeking for overall productivity and competitiveness. Currently the spread of technology-based training has fostered the development of new training approaches appositely designed for workers. In this paper, we introduce a new training platform that uses a virtual learning environment that combines virtual reality (VR) and 360 degree technologies. In particular, we focus on the problem of predicting the outcome of the workers' training based on how the workers have behaved in the virtual learning environment during their training session. To this aim, we formulate a process mining methodology that combines features engineering and classification algorithms. The effectiveness of the proposed methodology has been validated against a real use case.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "puxg1Dp6DD", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122749.pdf", "forum_link": "https://openreview.net/forum?id=puxg1Dp6DD", "arxiv_id": null, "doi": null}, "paper": {"title": "Saliency Detection for Hyperspectral Images via Sparse-Non Negative-Matrix-Factorization and novel Distance Measures*", "authors": ["Antonella Falini", "Graziano Castellano", "Cristiano Tamborrino", "Francesca Mazzia", "Rosa Maria Mininni", "Annalisa Appice", "Donato Malerba"], "abstract": "Saliency detection is a very active area in computer vision. When hyperspectral images are analyzed, a big amount of data need to be processed. Hence, dimensionality reduction techniques are used to highlight salient pixels allowing us to neglect redundant features. We propose a bottom-up approach based on two main ingredients: sparse non negative matrix factorization (SNMF) and spatial and spectral distances between the input image and the reconstructed one. In particular, we use both well known and novel distance functions. The method is validated on both hyperspectral and multispectral images.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "ig8SrPL-_H", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122780.pdf", "forum_link": "https://openreview.net/forum?id=ig8SrPL-_H", "arxiv_id": null, "doi": null}, "paper": {"title": "Analyzing Dynamic Social Media Data via Random Projection - A New Challenge for Stream Classifiers", "authors": ["Moritz Heusinger", "Christoph Raab", "Frank-Michael Schleif"], "abstract": "In recent years social media became an important part of everyday life for many people. A big challenge of social media is, to find posts, which are interesting for the user. Many social networks like Twitter handle this problem with so called hashtags. A user can label his own Tweet (post) with a hashtag, while other users can search for posts containing a specified hashtag. But what about finding posts which are not labeled by the creator? We provide a way of completing hashtags for unlabeled posts using classification on a novel real world Twitter data stream. New posts will be created every second, thus this context fits perfectly for non-stationary data analysis. Our goal is to show, how labels (hashtags) of social media posts can be predicted by streaming classifiers. In particular we employ Random Projection (RP) as a preprocessing step in calculating streaming models. Also we provide a novel real world data set for streaming analysis called NSDQ with a comprehensive data description. We show that this dataset is a real challenge for stateof-the-art stream classifiers. While RP has been widely used and evaluated in stationary data analysis scenarios, non-stationary environments are not well analyzed. In this paper we provide a use case of RP on real world streaming data, especially on NSDQ dataset. We discuss why RP can be used in this scenario and how it can handle stream specific situations like concept drift. We also provide experiments with RP on streaming data, using state-of-the-art streaming classifiers like Adaptive Random Forest and concept drift detectors.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "ZHr7b3VDyeO", "year": null, "venue": "EAIS 2022", "pdf_link": "https://ieeexplore.ieee.org/iel7/9787685/9787686/09787693.pdf", "forum_link": "https://openreview.net/forum?id=ZHr7b3VDyeO", "arxiv_id": null, "doi": null}, "paper": {"title": "Adaptive Classification of Occluded Facial Expressions of Affective States", "authors": ["Jordan Vice", "Masood Mehmood Khan", "Iain Murray", "Svetlana N. Yanushkevich"], "abstract": "Internationally, the recent pandemic caused severe social changes forcing people to adopt new practices in their daily lives. One of these changes requires people to wear masks in public spaces to mitigate the spread of viral diseases. Affective state assessment (ASA) systems that rely on facial expression analysis become impaired and less effective due to the presence of visual occlusions caused by wearing masks. Therefore, ASA systems need to be future-proofed and equipped with adaptive technologies to be able to analyze and assess occluded facial expressions, particularly in the presence of masks. This paper presents an adaptive approach for classifying occluded facial expressions when human faces are partially covered with masks. We deployed an unsupervised, cosine similarity-based clustering approach exploiting the continuous nature of the extended Cohn-Kanade (CK+) dataset. The cosine similarity-based clustering resulted in twenty-one micro-expression clusters that describe minor variations of human facial expressions. Linear discriminant analysis was used to project all clusters onto lower-dimensional discriminant feature spaces, allowing for binary occlusion classification and the dynamic assessment of affective states. During the validation stage, we observed 100% accuracy when classifying faces with features extracted from the lower part of the occluded faces (occlusion detection). We observed 76.11% facial expression classification accuracy when features were gathered from the uncovered full-faces and 73.63% classification accuracy when classifying upper-facial expressions - applied when the lower part of the face is occluded. The presented system promises an improvement to visual inspection systems through an adaptive occlusion detection and facial expression classification framework.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "njK_zKi6Q_d", "year": null, "venue": "EAIS 2022", "pdf_link": "https://ieeexplore.ieee.org/iel7/9787685/9787686/09787730.pdf", "forum_link": "https://openreview.net/forum?id=njK_zKi6Q_d", "arxiv_id": null, "doi": null}, "paper": {"title": "Dynamic Hybrid Learning for Improving Facial Expression Classifier Reliability", "authors": ["Jordan Vice", "Masood Mehmood Khan", "Tele Tan", "Svetlana N. Yanushkevich"], "abstract": "Independent, discrete models like Paul Ekman’s six basic emotions model are widely used in affective state assessment (ASA) and facial expression classification. However, the continuous and dynamic nature of human expressions often needs to be considered for accurately assessing facial expressions of affective states. This paper investigates how mutual information-carrying continuous models can be extracted and used in continuous and dynamic facial expression classification systems for improving the efficacy and reliability of ASA systems. A novel, hybrid learning model that projects continuous data onto a multidimensional hyperplane is proposed. Through cosine similarity-based clustering (unsupervised) and classification (supervised) processes, our hybrid approach allows us to transform seven, discrete facial expression models into twenty-one facial expression models that include micro-expressions. The proposed continuous, dynamic classifier was able to achieve greater than 73% accuracy when experimented with Random Forest, Support Vector Machine (SVM) and Neural Network classification architectures. The presented system was validated using the Ryerson Audio-Visual Database of Emotional Speech and Song (RAVDESS) and the extended Cohn-Kanade (CK+) dataset.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "-3rktfgwN2", "year": null, "venue": "EAIS 2017", "pdf_link": "https://ieeexplore.ieee.org/iel7/7947643/7954814/07954842.pdf", "forum_link": "https://openreview.net/forum?id=-3rktfgwN2", "arxiv_id": null, "doi": null}, "paper": {"title": "Estimation of moving agents density in 2D space based on LSTM neural network", "authors": ["Marsela Polic", "Ziad Salem", "Karlo Griparic", "Stjepan Bogdan", "Thomas Schmickl"], "abstract": "As a part of ASSISIbf project, with a final goal of forming a collective adaptive bio-hybrid society of animals and robots, an artificial neural network based on LSTM architecture was designed and trained for bee density estimation. During experiments, the bees are placed inside a plastic arena covered with wax, where they interact with and adapt to specialized static robotic units, CASUs, designed specially for this project. In order to interact with honeybees, the CASUs require the capability i) to produce and perceive the stimuli, i.e., environmental cues, that are relevant to honeybee behaviour, and ii) to sense the honeybees presence. The second requirement is implemented through 6 proximity sensors mounted on the upper part of CASU. In this paper we present estimation of honeybees (moving agents) density in 2D space (experimental arena) that is based on LSTM neural network. When compared to previous work done in this field, experiments demonstrate satisfactory results in estimating sizes of bee groups placed in the arena within a larger scope of outputs. Two different approaches were tested: regression and classification, with classification yielding higher accuracy.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "vPjTJbEdej", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122758.pdf", "forum_link": "https://openreview.net/forum?id=vPjTJbEdej", "arxiv_id": null, "doi": null}, "paper": {"title": "Evaluation of Cognitive Impairment in Pediatric Multiple Sclerosis with Machine Learning: An Exploratory Study of miRNA Expressions", "authors": ["Gabriella Casalino", "Gennaro Vessio", "Arianna Consiglio"], "abstract": "Multiple Sclerosis (MS) is a demyelinating autoimmune disease that usually affects young adults; however, recently some symptoms of cognitive impairment have been recognized as early signs of MS onset in pediatric patients (PedMS). The underlying relationships between these two conditions, as well as their molecular markers, have not been fully understood yet. In this work, we analyze microRNAs (miRNAs) expression profiles of PedMS patients with machine learning algorithms in order to create effective models able to detect the presence of cognitive impairment. In particular, we compare three different classification algorithms, fed with features automatically selected by a feature selection strategy. Experimental results show that linear support vector machines achieved the best performance. Moreover, we discuss the importance of ten of the most discriminant automatically selected miRNAs. A graphical analysis of these features highlights the relationships among miRNAs and the two classes the patients belongs to.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "lWjku5ZFFJY", "year": null, "venue": "EAIS 2020", "pdf_link": "https://ieeexplore.ieee.org/iel7/9119729/9122692/09122777.pdf", "forum_link": "https://openreview.net/forum?id=lWjku5ZFFJY", "arxiv_id": null, "doi": null}, "paper": {"title": "Exploiting Categorization of Online News for Profiling City Areas", "authors": ["Alessandro Bondielli", "Pietro Ducange", "Francesco Marcelloni"], "abstract": "Profiling city areas, in terms of citizens' behaviour and commercial and social activities, is an interesting issue in the context of smart cities, especially considering a real-time streaming context. Several methods have been proposed in the literature, exploiting different data sources. In this paper, we propose an approach to perform profiling of city areas based on articles of local online newspapers, by exploiting information regarding the text as well as metadata such as geo-localization and tags. In particular, we use tags associated with each article for identifying macro-categories through clustering analysis on tags embeddings. Further, we employ a text categorization model based on SVM to label online a new article, represented as Bag-of-Words, with one of such categories. The categorization approach has been integrated into a framework recently proposed by the authors for profiling city areas exploiting different web sources of data: the online newspapers are monitored continuously, thus producing a news stream to be analysed. We show experiments performed on the city of Rome, considering data from 2014 to 2018. We discuss the results obtained by adopting different classifiers and present that the best classifier, namely an SVM, can achieve an accuracy and an f1-score up to 93% and 79%, respectively.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid51nB9fYocRyearvenueEAIS 2017pdf_linkhttps://ieeexplore.ieee.org/iel7/7947643/7954814/07954837.pdfforum_linkhttps://openreview.net/forum?id=51nB9fYocRarxiv_iddoipapertitleSelf-evolving kernel recursive least squares algorithm for control and predictionauthorsZhao-Xu YangHai-Jun RongGuang-She ZhaoJing YangabstractThis paper presents a self-evolving kernel recursive least squares (KRLS) algorithm which implements the modelling of unknown nonlinear systems in reproducing kernel Hilbert spaces (RKHS). The prime motivation of this development is a reformulation of the well known KRLS algorithm which inevitably increases the computational complexity to the cases where data arrive sequentially. The self-evolving KRLS algorithm utilizes the measurement of kernel evaluation and adaptive approximation error to determine the learning system with a structure of a suitable size that involves recruiting and dimension reduction of the kernel vector during the adaptive learning phase without predefining them. This self-evolving procedure allows the algorithm to operate online, often in real time, reducing the computational time and improving the learning performance. This algorithm is finally utilized in the applications of online adaptive control and time series prediction where the system is described as a unknown function by Nonlinear AutoRegressive with Exogenous inputs model. Simulation results from an inverted pendulum system and Time Series Data Library demonstrate the satisfactory performance of the proposed self-evolving KRLS algorithm.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidsHtW2_nGhiyearvenueEAIS 2011pdf_linkhttps://ieeexplore.ieee.org/iel5/5936949/5945904/05945914.pdfforum_linkhttps://openreview.net/forum?id=sHtW2_nGhiarxiv_iddoipapertitleA Simplified Structure Evolving Method for Fuzzy System structure learningauthorsDi WangXiao-Jun ZengJohn A. KeaneabstractThis paper proposes a Simplified Structure Evolving Method (SSEM) for Fuzzy Systems, which improves our previous work of Structure Evolving Learning Method for Fuzzy Systems (SELM [1]). SSEM keeps all the advantages of SELM [1] and improve SELM by starting with the simplest fuzzy rule set with only one fuzzy rule (instead of 2 <sup xmlns:mml=\ xmlns:xlink=\>n</sup> fuzzy rules in SELM) as the starting point. By doing this SSEM is able to select the most efficient partitions and the most efficient attributes as well for system identification. This improvement enables fuzzy systems applicable to high dimensional problems. Benchmark examples with high dimension inputs are given to illustrate the advantages of the proposed algorithm.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidmtZq5xmr5DeyearvenueEAIS 2020pdf_linkhttps://ieeexplore.ieee.org/iel7/9119729/9122692/09122706.pdfforum_linkhttps://openreview.net/forum?id=mtZq5xmr5Dearxiv_iddoipapertitleDouble Deep Q Network with In-Parallel Experience GeneratorauthorsVincenzo DentamaroDonato ImpedovoGiuseppe PirloGiacomo AbbattistaVincenzo GattulliabstractIn this paper, an algorithm, for in-parallel, greedy experience generator (briefly IPE, In Parallel Experiences), has been crafted, and added to the Double Deep Q-Learning algorithm. The algorithm aims to perturbs the weights of the online network, and as results, the network, trying to recover from the perturbed weights, escapes from the local minima. DDQN with IPE takes about the double of time of the previous to compute, but even if it slows down the learning rate in terms of wall clock time, the solution converges faster in terms of number of epochs.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidD8Nt4vgAltayearvenueEAIS 2020pdf_linkhttps://ieeexplore.ieee.org/iel7/9119729/9122692/09122751.pdfforum_linkhttps://openreview.net/forum?id=D8Nt4vgAltaarxiv_iddoipapertitleVertex Feature Classification (VFC)authorsVincenzo DentamaroDonato ImpedovoGiuseppe PirloAlessandro MassaroabstractIn this paper a new algorithm for multi-class classification is presented. The algorithm, that is named Vertex Feature Classification (VFC), maps input data sets into an ad-hoc built space, called \ in order to perform geometric classification. More precisely, each class is first associated to a specific vertex of the polytope computed in the feature space. Successively, pattern classification is performed according to the geometric arrangement of patterns in a higher dimensional feature space. The experimental results, carried out on datasets of the UCI Machine Learning Repository, demonstrate the accuracy of the new algorithm is comparable with KNN, VDA and SVM, without or with a little training phase. An important aspect of this algorithm is its training time, which takes often a few milliseconds. Furthermore, the algorithm is robust and computationally efficient.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidOtT949P0k8ByearvenueEAIS 2018pdf_linkhttps://ieeexplore.ieee.org/iel7/8388184/8397169/08397178.pdfforum_linkhttps://openreview.net/forum?id=OtT949P0k8Barxiv_iddoipapertitleAn adaptable deep learning system for optical character verification in retail food packagingauthorsFabio De Sousa RibeiroFrancesco CaliváMark SwainsonKjartan GudmundssonGeorgios LeontidisStefanos D. KolliasabstractRetail food packages contain various types of information such as food name, ingredients list and use by dates. Such information is critical to ensure proper distribution of products to the market and eliminate health risks due to erroneous mislabelling. The latter is considerably detrimental to both consumers and suppliers alike. In this paper, an adaptable deep learning based system is proposed and tested across various possible scenarios: a) for the identification of blurry images and/or missing information from food packaging photos. These were captured during the validation process in supply chains; b) for deep neural network adaptation. This was achieved through a novel methodology that utilises facets of the same convolutional neural network architecture. Latent variables were extracted from different datasets and used as input into a Λ-means clustering and Λ-nearest neighbour classification algorithm, to compute a new set of centroids which better adapts to the target dataset's distribution. Furthermore, visualisation and analysis of network adaptation provides insight into how higher accuracy was achieved when compared to the original deep neural network. The proposed system performed very well in the conducted experiments, showing that it can be deployed in real-world supply chains, for automating the verification process, cutting down costs and eliminating errors that could prove risky for public health.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "Gh0Is0LvGCD", "year": null, "venue": "EAIS 2015", "pdf_link": "https://ieeexplore.ieee.org/iel7/7361817/7368765/07368774.pdf", "forum_link": "https://openreview.net/forum?id=Gh0Is0LvGCD", "arxiv_id": null, "doi": null}, "paper": {"title": "Preface", "authors": ["Moamar Sayed Mouchaweh", "Anthony Fleury", "Plamen P. Angelov", "Edwin Lughofer", "José Antonio Iglesias"], "abstract": "Today world is changing very fast and the volume of automatically generated data is constantly increasing over time. Moreover, the rapid technological developments have led to a significant growth in system complexity as well as of the influence of its interaction with the surrounding environments. In these conditions, building models describing the behaviour of a system requires to extract useful information from data streams of unbounded size, arriving at high steady rate and may evolve overtime.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "qrde3igfD9jN", "year": null, "venue": "EAIS 2015", "pdf_link": "https://ieeexplore.ieee.org/iel7/7361817/7368765/07368802.pdf", "forum_link": "https://openreview.net/forum?id=qrde3igfD9jN", "arxiv_id": null, "doi": null}, "paper": {"title": "Drift detection in data stream classification without fully labelled instances", "authors": ["Edwin Lughofer", "Eva Weigl", "Wolfgang Heidl", "Christian Eitzinger", "Thomas Radauer"], "abstract": "Drift detection is an important issue in classification-based stream mining in order to be able to inform the operators in case of unintended changes in the system. Usually, current detection approaches rely on the assumption to have fully supervised labeled streams available, which is often a quite unrealistic scenario in on-line real-world applications. We propose two ways to improve economy and applicability of drift detection: 1.) a semi-supervised approach employing single-pass active learning filters for selecting the most interesting samples for supervising the performance of classifiers and 2.) a fully unsupervised approach based on the overlap degree of classifier's output certainty distributions. Both variants rely on a modified version of the Page-Hinkley test, where a fading factor is introduced to outweigh older samples, making it more flexible to detect successive drift occurrences in a stream. The approaches are compared with the fully supervised variant (SoA) on two real-world on-line applications: the semi-supervised approach is able to detect three real-occurring drifts in these streams with an even lower than resp. the same delay as the supervised variant of about 200 (versus 300) resp. 70 samples, and this by requiring only 20% labelled samples.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidBkJT8R6XE2nyearvenueEAIS 2013pdf_linkhttps://ieeexplore.ieee.org/iel7/6588691/6604096/06604103.pdfforum_linkhttps://openreview.net/forum?id=BkJT8R6XE2narxiv_iddoipapertitleeVQ-AM: An extended dynamic version of evolving vector quantizationauthorsEdwin LughoferabstractIn this paper, we are presenting a new dynamically evolving clustering approach which extends conventional evolving Vector Quantization (eVQ), successfully applied before as fast learning engine for evolving cluster models, classifiers and evolving fuzzy systems in various real-world applications. The first extension concerns the ability to extract ellipsoidal prototype-based clusters in arbitrary position, thus increasing its flexibility to model any orentiation/rotation of local data clouds. The second extension includes a single-pass merging strategy in order to resolve unnecessary overlaps or to dynamically compensate inappropriately chosen learning parameters (which may lead to over-clustering effects). The new approach, termed as eVQ-AM (eVQ for Arbitrary ellipsoids with Merging functionality), is compared with conventional eVQ, other incremental and batch learning clustering methods based on two-dimensional as well as high-dimensional streaming clustering showing an evolving behavior in terms of adding/joining clusters as well as feature range expansions. The comparison includes a sensitivity analysis on the learning parameters and observations of finally achieved cluster partition qualities.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidipIm54-kbE8FyearvenueEAIS 2013pdf_linkhttps://ieeexplore.ieee.org/iel7/6588691/6604096/06604099.pdfforum_linkhttps://openreview.net/forum?id=ipIm54-kbE8Farxiv_iddoipapertitleResolving global and local drifts in data stream regression using evolving rule-based modelsauthorsAmmar ShakerEdwin LughoferabstractIn this paper, we present new concepts for dealing with drifts in data streams during the run of on-line modeling processes for regression problems in the context of evolving fuzzy systems. Opposed to the nominal case based on conventional life-long learning, drifts are requiring a specific treatment for the modeling phase, as they refer to changes in the underlying data distribution or target concepts, which makes older learned concepts obsolete. Our approach comes with three new stages for an appropriate drift handling: 1.) drifts are not only detected, but also quantified with a new extended version of the Page-Hinkley test, which overcomes some instabilities during downtrends of the indicator; 2.) based on the current intensity quantification of the drift, the necessary degree of forgetting (weak to strong) is extracted (adaptive forgetting); 3.) the latter is achieved by two variants, a.) a single forgetting factor value, accounting for global drifts, and b.) a forgetting factor vector with different entries for separate regions of the feature space, accounting for local drifts. Forgetting factors are integrated into the learning scheme of both, the antecedent and consequent parts of the evolving fuzzy systems. The new approach will be evaluated on high-dimensional data streams, where the results will show that 1.) our adaptive forgetting strategy outperforms the usage of fixed forgetting factors throughout the learning process and 2.) forgetting in local regions may improve forgetting in global ones when drifts appear locally.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidOno7E-v8tIAyearvenueEAIS 2012pdf_linkhttps://ieeexplore.ieee.org/iel5/6225463/6232786/06232795.pdfforum_linkhttps://openreview.net/forum?id=Ono7E-v8tIAarxiv_iddoipapertitleOn-line active learning based on enhanced reliability conceptsauthorsEdwin LughoferabstractIn this paper, we present a new methodology for conducting active learning in a single-pass on-line learning context, thus reducing the annotation effort for operators by selecting the most informative samples, i.e. those ones helping incremental, evolving classifiers most to improve their own predictive performance. Our approach will be based on certainty-based sample selection in connection with version-space reduction approach. Therefore, two new concepts regarding classifier's reliability in its predictions will be investigated and developed in connection with evolving fuzzy classifiers: conflict and ignorance. Conflict models the extent to which a new query point lies in the conflicting region between two or more classes. Ignorance represents the extent to which the new query point appears in an unexplored region of the feature space. The results based on real-world streaming classification data will show a stable high predictive quality of our approach, despite the fact that the requested number of class labels is decreased by up to 90%.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "raix3CvPd_K", "year": null, "venue": "EAIS 2015", "pdf_link": "https://ieeexplore.ieee.org/iel7/7361817/7368765/07368805.pdf", "forum_link": "https://openreview.net/forum?id=raix3CvPd_K", "arxiv_id": null, "doi": null}, "paper": {"title": "A case study on collective intelligence based on energy flow", "authors": ["Kaveh Hassani", "Aliakbar Asgari", "Won-Sook Lee"], "abstract": "In this paper, we propose a stochastic scheme for modeling a multi-species prey-predator artificial ecosystem in order to investigate the influence of energy flow on ecosystem lifetime and stability. Inhabitants of this environment are a few species of herbivore and carnivore birds. In this model, collective behavior emerges in terms of flocking, breeding, competing, resting, hunting, escaping, seeking, and foraging. Ecosystem is defined as a combination of prey and predator species with inter-competition among species within the same level of the food chain, and intra-competition among those belonging to different levels of the food chain. Some energy variables are also introduced as functions of behaviors to model the energy within the ecosystem. Experimental results of 11,000 simulations analyzed by Cox univariate analysis and hazard function suggest that only five corresponding energy variables out of eight aforementioned behaviors influence the ecosystem lifetime. Also, results of survival analysis show that among pairwise interactions between energy factors, only two interactions affect the system lifetime, including interaction between flocking and seeking energies, and interaction between flocking and hunting energies. These results match the observations of real life birds, which use flocking behavior for flexible movements, efficient foraging, social learning, and reducing predation risks.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "08i1HM0_Wx4", "year": null, "venue": "EAIS 2015", "pdf_link": "https://ieeexplore.ieee.org/iel7/7361817/7368765/07368804.pdf", "forum_link": "https://openreview.net/forum?id=08i1HM0_Wx4", "arxiv_id": null, "doi": null}, "paper": {"title": "Adaptive animation generation using web content mining", "authors": ["Kaveh Hassani", "Won-Sook Lee"], "abstract": "Creating 3D animation is a labor-intensive and time-consuming process requiring designers to learn and utilize a complex combination of menus, dialog boxes, buttons and manipulation interfaces for a given stand-alone animation design software. On the other hand, conceptual simplicity and naturalness of visualizing imaginations from lingual descriptions motivates researchers for developing automatic animation generation systems using natural language interfaces. In this research, we introduce an interactive and adaptive animation generation system that utilizes data-driven techniques to extract the required common-sense and domain-specific knowledge from web. This system is capable of creating 3D animation based on user's lingual commands. It uses the user interactions as a relevance feedback to learn the implicit design knowledge, correct the extracted knowledge, and manipulate the dynamics of the virtual world in an active and incremental manner. Moreover, system is designed based on a multi-agent methodology which provides it with distributed processing capabilities and cross-platform characteristics. In this paper, we will focus on information retrieval agent which is responsible for extracting numeric data utilized in object attributes, spatiotemporal relations, and environment dynamics using web mining techniques.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataiduXjiUmY8xPcyearvenueEAIS 2012pdf_linkhttps://ieeexplore.ieee.org/iel5/6225463/6232786/06232812.pdfforum_linkhttps://openreview.net/forum?id=uXjiUmY8xPcarxiv_iddoipapertitleEvolving activity recognition from sensor streamsauthorsJosé Antonio IglesiasFrancisco Javier OrdóñezAgapito LedezmaPaula de ToledoAraceli SanchisabstractRecognizing people's activity automatically is an important task that needs to be tackled in order to face other more complex tasks such as action prediction, remote health monitoring, or interventions. Recent research on activity recognition has demonstrated that many different activities can be recognized. In most of these researches, the activities are previously predefined as statistic models over time. However, how people perform a specific activity is changing continuously. In this paper we present an approach for classifying different activities from sensor readings based on Evolving Fuzzy Systems (EFS). Thus, the model that describes an activity evolves according to the changes observed in how that activity is performed. This approach has been successfully tested on a real world domain using binary sensors data streams.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "GS_Ln_8jIJD", "year": null, "venue": "EAIS 2015", "pdf_link": "https://ieeexplore.ieee.org/iel7/7361817/7368765/07368805.pdf", "forum_link": "https://openreview.net/forum?id=GS_Ln_8jIJD", "arxiv_id": null, "doi": null}, "paper": {"title": "A case study on collective intelligence based on energy flow", "authors": ["Kaveh Hassani", "Aliakbar Asgari", "Won-Sook Lee"], "abstract": "In this paper, we propose a stochastic scheme for modeling a multi-species prey-predator artificial ecosystem in order to investigate the influence of energy flow on ecosystem lifetime and stability. Inhabitants of this environment are a few species of herbivore and carnivore birds. In this model, collective behavior emerges in terms of flocking, breeding, competing, resting, hunting, escaping, seeking, and foraging. Ecosystem is defined as a combination of prey and predator species with inter-competition among species within the same level of the food chain, and intra-competition among those belonging to different levels of the food chain. Some energy variables are also introduced as functions of behaviors to model the energy within the ecosystem. Experimental results of 11,000 simulations analyzed by Cox univariate analysis and hazard function suggest that only five corresponding energy variables out of eight aforementioned behaviors influence the ecosystem lifetime. Also, results of survival analysis show that among pairwise interactions between energy factors, only two interactions affect the system lifetime, including interaction between flocking and seeking energies, and interaction between flocking and hunting energies. These results match the observations of real life birds, which use flocking behavior for flexible movements, efficient foraging, social learning, and reducing predation risks.", "keywords": [], "raw_extracted_content": null, "main_paper_content": null}, "review": {"decision": "Unknown", "reviews": []}, "citation_count": 0, "normalized_citation_count": 0, "cited_papers": [], "citing_papers": []} |
| {"metadata": {"id": "pHa132zP9Su", "year": null, "venue": "EAIS 2015", "pdf_link": "https://ieeexplore.ieee.org/iel7/7361817/7368765/07368804.pdf", "forum_link": "https://openreview.net/forum?id=pHa132zP9Su", "arxiv_id": null, "doi": null}, "paper": {"title": "Adaptive animation generation using web content mining", "authors": ["Kaveh Hassani", "Won-Sook Lee"], "abstract": "Creating 3D animation is a labor-intensive and time-consuming process requiring designers to learn and utilize a complex combination of menus, dialog boxes, buttons and manipulation interfaces for a given stand-alone animation design software. On the other hand, conceptual simplicity and naturalness of visualizing imaginations from lingual descriptions motivates researchers for developing automatic animation generation systems using natural language interfaces. In this research, we introduce an interactive and adaptive animation generation system that utilizes data-driven techniques to extract the required common-sense and domain-specific knowledge from web. This system is capable of creating 3D animation based on user's lingual commands. It uses the user interactions as a relevance feedback to learn the implicit design knowledge, correct the extracted knowledge, and manipulate the dynamics of the virtual world in an active and incremental manner. Moreover, system is designed based on a multi-agent methodology which provides it with distributed processing capabilities and cross-platform characteristics. In this paper, we will focus on information retrieval agent which is responsible for extracting numeric data utilized in object attributes, spatiotemporal relations, and environment dynamics using web mining techniques.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidrRf45ynBG1cyearvenueEAIS 2011pdf_linkhttps://ieeexplore.ieee.org/iel5/5936949/5945904/05945909.pdfforum_linkhttps://openreview.net/forum?id=rRf45ynBG1carxiv_iddoipapertitleUsing a map-based encoding to evolve plastic neural networksauthorsPaul TonelliJean-Baptiste MouretabstractMany controllers for complex agents have been successfully generated by automatically desiging artificial neural networks with evolutionary algorithms. However, typical evolved neural networks are not able to adapt themselves online, making them unable to perform tasks that require online adaptation. Nature solved this problem on animals with plastic nervous systems. Inpired by neuroscience models of plastic neural-network, the present contribution proposes to use a combination of Hebbian learning, neuro-modulation and a a generative map-based encoding. We applied the proposed approach on a problem from operant conditioning (a Skinner box), in which numerous different association rules can be learned. Results show that the map-based encoding scaled up better than a classic direct encoding on this task. Evolving neural networks using a map-based generative encoding also lead to networks that works with most rule sets even when the evolution is done on a small subset of all the possible cases. Such a generative encoding therefore appears as a key to improve the generalization abilities of evolved adaptive neural networks.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidPNroBN_gwRyearvenueEAIS 2022pdf_linkhttps://ieeexplore.ieee.org/iel7/9787685/9787686/09787735.pdfforum_linkhttps://openreview.net/forum?id=PNroBN_gwRarxiv_iddoipapertitleCollision-Free Navigation using Evolutionary Symmetrical Neural NetworksauthorsHesham M. EraqiMena NagiubPeter SidraabstractCollision avoidance systems play a vital role in reducing the number of vehicle accidents and saving human lives. This paper extends the previous work using evolutionary neural networks for reactive collision avoidance. We are proposing a new method we have called symmetric neural networks. The method improves the model’s performance by enforcing constraints between the network weights which reduces the model optimization search space and hence, learns more accurate control of the vehicle steering for improved maneuvering. The training and validation processes are carried out using a simulation environment - the codebase is publicly available. Extensive experiments are conducted to analyze the proposed method and evaluate its performance. The method is tested in several simulated driving scenarios. In addition, we have analyzed the effect of the rangefinder sensor resolution and noise on the overall goal of reactive collision avoidance. Finally, we have tested the generalization of the proposed method. The results are encouraging; the proposed method has improved the model’s learning curve for training scenarios and generalization to the new test scenarios. Using constrained weights has significantly improved the number of generations required for the Genetic Algorithm optimization.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidObIremyIAVxyearvenueEANN 2022pdf_linkforum_linkhttps://openreview.net/forum?id=ObIremyIAVxarxiv_iddoipapertitleA Robust, Quantization-Aware Training Method for Photonic Neural NetworksauthorsA. OikonomouManos KirtasNikos PassalisGeorge Mourgias-AlexandrisMiltiadis Moralis-PegiosNikos PlerosAnastasios TefasabstractThe computationally demanding nature of Deep Learning (DL) has fueled the research on neuromorphics due to their potential to provide high-speed and low energy hardware accelerators. To this end, neuromorphic photonics are increasingly gain attention since they can operate in very high frequencies with very low energy consumption. However, they also introduce new challenges in DL training and deployment. In this paper, we propose a novel training method that is able to compensate for quantization noise, which profoundly exists in photonic hardware due to analog-to-digital (ADC) and digital-to-analog (DAC) conversions, targeting photonic neural networks (PNNs) which employ easily saturated activation functions. The proposed method takes into account quantization during training, leading to significant performance improvements during the inference phase. We conduct evaluation experiments on both image classification and time-series analysis tasks, employing a wide range of existing photonic neuromorphic architectures. The evaluation experiments demonstrate the effectiveness of the proposed method when low-bit resolution photonic architectures are used, as well as its generalization ability.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidOVXT98ukS8yearvenueEANN 2017pdf_linkforum_linkhttps://openreview.net/forum?id=OVXT98ukS8arxiv_iddoipapertitleImproving Face Pose Estimation Using Long-Term Temporal Averaging for Stochastic OptimizationauthorsNikolaos PassalisAnastasios TefasabstractAmong the most crucial components of an intelligent system capable of assisting drone-based cinematography is estimating the pose of the main actors. However, training deep CNNs towards this task is not straightforward, mainly due to the noisy nature of the data and instabilities that occur during the learning process, significantly slowing down the development of such systems. In this work we propose a temporal averaging technique that is capable of stabilizing as well as speeding up the convergence of stochastic optimization techniques for neural network training. We use two face pose estimation datasets to experimentally verify that the proposed method can improve both the convergence of training algorithms and the accuracy of pose estimation. This also reduces the risk of stopping the training process when a bad descent step was taken and the learning rate was not appropriately set, ensuring that the network will perform well at any point of the training process.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid6BjNumo16RbyearvenueEANN 2021pdf_linkforum_linkhttps://openreview.net/forum?id=6BjNumo16Rbarxiv_iddoipapertitlePredicting Stock Price Movement Using Financial News SentimentauthorsJiaying GongBradley PayeGregory KadlecHoda EldardiryabstractA central question in financial economics concerns the degree of informational efficiency. Violations of informational efficiency represent capital miss-allocations and potentially profitable trading opportunities. Market efficiency analyses have evolved to incorporate increasingly rich public information and innovative statistical methods to analyze this information. We propose an Automatic Crawling and Prediction System (ACPS) to 1) automatically crawl online media, 2) extract useful information from a rich set of financial news, and 3) predict future stock price movements. ACPS consists of a feature selection pipeline to select an optimal set of predictive features and a sentiment analysis model to measure sentence-level news sentiment. Generated features and news sentiment data are further processed via an ensemble model based on several machine learning and deep learning algorithms to generate forecasts. Results demonstrate the robustness of our proposed model in predicting the directional movement of daily stock prices. Specifically, the model consistently outperforms existing methods on single stock prediction and it performs well across all S&P 500 stocks. Our results indicate the potential value of rich text analysis and ensemble learning methods in a real-time trading context.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataiduotiJIrOaghyearvenueEANN 2022pdf_linkforum_linkhttps://openreview.net/forum?id=uotiJIrOagharxiv_iddoipapertitlePredicting Seriousness of Injury in a Traffic Accident: A New Imbalanced Dataset and BenchmarkauthorsPaschalis LagiasGeorge D. MagoulasYlli PriftiAlessandro ProvettiabstractThe paper introduces a new dataset to assess the performance of machine learning algorithms in the prediction of the seriousness of injury in a traffic accident. The dataset is created by aggregating publicly available datasets from the UK Department for Transport, which are drastically imbalanced with missing attributes sometimes approaching 50% of the overall data dimensionality. The paper presents the data analysis pipeline starting from the publicly available data of road traffic accidents and ending with predictors of possible injuries and their degree of severity. It addresses the huge incompleteness of public data with a MissForest model. The paper also introduces two baseline approaches to create injury predictors: a supervised artificial neural network and a reinforcement learning model. The dataset can potentially stimulate diverse aspects of machine learning research on imbalanced datasets and the two approaches can be used as baseline references when researchers test more advanced learning algorithms in this area.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidWQxLJMkdgvyearvenueEANN 2012pdf_linkforum_linkhttps://openreview.net/forum?id=WQxLJMkdgvarxiv_iddoipapertitleKnowledge Clustering Using a Neural Network in a Course on Medical-Surgical NursingauthorsJosé Luis Fernández AlemánChrisina JayneAna Belén Sánchez GarcíaJuan Manuel Carrillo de GeaJosé Ambrosio Toval ÁlvarezabstractThis paper presents a neural network-based intelligent data analysis for knowledge clustering in an undergraduate nursing course. A MCQ (Multiple Choice Question) test was performed to evaluate medical-surgical nursing knowledge in a second-year course. A total of 23 pattern groups were created from the answers of 208 students. Data collected were used to provide customized feedback which guide students towards a greater understanding of particular concepts. The pattern groupings can be integrated with an on-line (MCQ) system for training purposes.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidgpE8rXRMAr9yearvenueEANN 2020pdf_linkforum_linkhttps://openreview.net/forum?id=gpE8rXRMAr9arxiv_iddoipapertitleDetection of Shocking Images as One-Class Classification Using Convolutional and Siamese Neural NetworksauthorsPavel GulyaevAndrey FilchenkovabstractNot safe for work content automatic detection is a serious challenge for social media due to overwhelming growth of uploaded images, gifs and videos. This paper focuses on shocking images automatic detection by convolutional neural networks. It was considered that the correct recognition of the shocking class is more important than the non-shocking one. Binary classification by a convolutional network that training during operation has been used as a baseline solution. However, this solution has two drawbacks: the network highlights incorrect features of non-shocking images (infinite class) and tends to forget rare subclasses of shocking images, which is unacceptable. To eliminate the first drawback, we approach this problem as a one-class classification with having in mind that a “non-shocking” image can be defined only via contradiction with a shocking one. This method is based on using sparse autoencoders build on top of a pretrained convolutional neural network and is not trained during operation. To eliminate the second drawback, we memorized vectors of images that were incorrectly classified during operation. A trained siamese network during the prediction is used to search for similar images in the database. In the case of an incorrect prediction by the combined model, vectors of images are added to the database and the siamese network is trained on them. This method allows you to minimize the number of errors in rare subclasses identified only during the operation phase of the model.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataide4sZVqnNZQZyearvenueEANN 2020pdf_linkforum_linkhttps://openreview.net/forum?id=e4sZVqnNZQZarxiv_iddoipapertitleEvaluating the Transferability of Personalised Exercise Recognition ModelsauthorsAnjana WijekoonNirmalie WiratungaabstractExercise Recognition (ExR) is relevant in many high impact domains, from healthcare to recreational activities to sports sciences. Like Human Activity Recognition (HAR), ExR faces many challenges when deployed in the real-world. For instance, typical lab performances of Machine Learning (ML) models, are hard to replicate, due to differences in personal nuances, traits and ambulatory rhythms. Thus effective transferability of a trained ExR model, depends on its ability to adapt and personalise to a new user or a user group. This calls for new experimental design strategies that are person-aware, and able to organise train and test data differently from standard ML practice. Specifically, we look at person-agnostic and person-aware methods of train-test data creation, and compare them to identify best practices on a comparative study of personalised ExR model transfer. Our findings show that ExR when compared to results with other HAR tasks, to be a far more challenging personalisation problem and also confirms the utility of metric learning algorithms for personalised model transfer.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidNTsNgox-IrkyearvenueEANN 2017pdf_linkforum_linkhttps://openreview.net/forum?id=NTsNgox-Irkarxiv_iddoipapertitleA Genetic Algorithm for Discovering Linguistic Communities in Spatiosocial Tensors with an Application to Trilingual LuxemburgauthorsGeorgios DrakopoulosFotini StathopoulouGiannis TzimasMichael ParaskevasPhivos MylonasSpyros SioutasabstractMultimodal social networks are omnipresent in Web 2.0 with virtually every human communication action taking place there. Nonetheless, language remains by far the main premise such communicative acts unfold upon. Thus, it is statutory to discover language communities especially in social data stemming from historically multilingual countries such as Luxemburg. An adjacency tensor is especially suitable for representing such spatiosocial data. However, because of its potentially large size, heuristics should be developed for locating community structure efficiently. Linguistic structure discovery has a plethora of applications including digital marketing and online political campaigns, especially in case of prolonged and intense cross-linguistic contact. This conference paper presents TENSOR-G, a flexible genetic algorithm for approximate tensor clustering along with two alternative fitness functions derived from language variation or diffusion properties. The Kruskal tensor decomposition serves as a benchmark and the results obtained from a set of trilingual Luxemburgian tweets are analyzed with linguistic criteria.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidiKHJd73FvHyearvenueEANN 2021pdf_linkforum_linkhttps://openreview.net/forum?id=iKHJd73FvHarxiv_iddoipapertitleA Novel CNN-LSTM Hybrid Architecture for the Recognition of Human ActivitiesauthorsSofia Stylianou-NikolaidouIoannis VernikosEirini MatheEvaggelos SpyrouPhivos MylonasabstractThe problem of human activity recognition (HAR) has been increasingly attracting the efforts of the research community, having several applications. In this paper we propose a multi-modal approach addressing the task of video-based HAR. Our approach uses three modalities, i.e., raw RGB video data, depth sequences and 3D skeletal motion data. The latter are transformed into a 2D image representation into the spectral domain. In order to extract spatio-temporal features from the available data, we propose a novel hybrid deep neural network architecture that combines a Convolutional Neural Network (CNN) and a Long-Short Term Memory (LSTM) network. We focus on the tasks of recognition of activities of daily living (ADLs) and medical conditions and we evaluate our approach using two challenging datasets.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidNYAZqTVj-9yearvenueEANN 2019pdf_linkforum_linkhttps://openreview.net/forum?id=NYAZqTVj-9arxiv_iddoipapertitleRecognizing Human Actions Using 3D Skeletal Information and CNNsauthorsAntonios PapadakisEirini MatheIoannis VernikosApostolos ManiatisEvaggelos SpyrouPhivos MylonasabstractIn this paper we present an approach for the recognition of human actions targeting at activities of daily living (ADLs). Skeletal information is used to create images capturing the motion of joints in the 3D space. These images are then transformed to the spectral domain using 4 well-known image transforms. A deep Convolutional Neural Network is trained on those images. Our approach is thoroughly evaluated using a well-known, publicly available challenging dataset and for a set of actions that resembles to common ADLs, covering both cross-view and cross-subject cases.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataid_N1tJPJd0DyearvenueEANN 2022pdf_linkforum_linkhttps://openreview.net/forum?id=_N1tJPJd0Darxiv_iddoipapertitleHuman Activity Recognition Under Partial OcclusionauthorsIoannis-Aris KostisEirini MatheEvaggelos SpyrouPhivos MylonasabstractOne of the major challenges in Human Activity Recognition (HAR) using cameras, is occlusion of one or more body parts. However, this problem is often underestimated in contemporary research works, wherein training and evaluation is based on datasets shot under laboratory conditions, i.e., without some kind of occlusion. In this work we propose an approach for HAR in the presence of partial occlusion, i.e., in case of up to two occluded body parts. We solve this problem using regression, performed by a deep neural network. That is, given an occluded sample, we attempt to reconstruct the missing information regarding the motion of the occluded part(s). We evaluate our approach using a publicly available human motion dataset. Our experimental results indicate a significant increase of performance, when compared to a baseline approach, wherein a network that has been trained using non-occluded samples is evaluated using occluded samples. To the best of our knowledge, this is the first research work that tackles the problem of HAR under occlusion as a regression problem.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| metadataidJfELsCjNNR2yearvenueEANN 2012pdf_linkforum_linkhttps://openreview.net/forum?id=JfELsCjNNR2arxiv_iddoipapertitleApplying Kernel Methods on Protein Complexes Detection ProblemauthorsCharalampos N. MoschopoulosGriet LaenenGeorge D. KritikosYves MoreauabstractDuring the last years, various methodologies have made possible the detection of large parts of the protein interaction network of various organisms. However, these networks are containing highly noisy data, degrading the quality of information they carry. Various weighting schemes have been applied in order to eliminate noise from interaction data and help bioinformaticians to extract valuable information such as the detection of protein complexes. In this contribution, we propose the addition of an extra step on these weighting schemes by using kernel methods to better assess the reliability of each pairwise interaction. Our experimental results prove that kernel methods clearly help the elimination of noise by producing improved results on the protein complexes detection problem.keywordsraw_extracted_contentmain_paper_contentreviewdecisionUnknownreviewscitation_countnormalized_citation_countcited_papersciting_papers |
| |