corpus_id
stringlengths
7
12
paper_id
stringlengths
9
16
title
stringlengths
1
261
abstract
stringlengths
70
4.02k
source
stringclasses
1 value
bibtex
stringlengths
208
20.9k
citation_key
stringlengths
6
100
arxiv-4201
0806.4858
On stars and Steiner stars II
<|reference_start|>On stars and Steiner stars II: A {\em Steiner star} for a set $P$ of $n$ points in $\RR^d$ connects an arbitrary center point to all points of $P$, while a {\em star} connects a point $p\in P$ to the remaining $n-1$ points of $P$. All connections are realized by straight line segments. Fekete and Meijer showed that the minimum star is at most $\sqrt{2}$ times longer than the minimum Steiner star for any finite point configuration in $\RR^d$. The maximum ratio between them, over all finite point configurations in $\RR^d$, is called the {\em star Steiner ratio} in $\RR^d$. It is conjectured that this ratio is $4/\pi = 1.2732...$ in the plane and $4/3=1.3333...$ in three dimensions. Here we give upper bounds of 1.3631 in the plane, and 1.3833 in 3-space, thereby substantially improving recent upper bounds of 1.3999, and $\sqrt{2}-10^{-4}$, respectively. Our results also imply improved bounds on the maximum ratios between the minimum star and the maximum matching in two and three dimensions.<|reference_end|>
arxiv
@article{dumitrescu2008on, title={On stars and Steiner stars. II}, author={Adrian Dumitrescu, Csaba D. T'oth and Guangwu Xu}, journal={arXiv preprint arXiv:0806.4858}, year={2008}, archivePrefix={arXiv}, eprint={0806.4858}, primaryClass={cs.CG} }
dumitrescu2008on
arxiv-4202
0806.4859
Termination of lambda-calculus with the extra Call-By-Value rule known as assoc
<|reference_start|>Termination of lambda-calculus with the extra Call-By-Value rule known as assoc: In this paper we prove that any lambda-term that is strongly normalising for beta-reduction is also strongly normalising for beta,assoc-reduction. assoc is a call-by-value rule that has been used in works by Moggi, Joachimsky, Espirito Santo and others. The result has often been justified with incomplete or incorrect proofs. Here we give one in full details.<|reference_end|>
arxiv
@article{lengrand2008termination, title={Termination of lambda-calculus with the extra Call-By-Value rule known as assoc}, author={St'ephane Lengrand (LIX)}, journal={arXiv preprint arXiv:0806.4859}, year={2008}, archivePrefix={arXiv}, eprint={0806.4859}, primaryClass={cs.LO} }
lengrand2008termination
arxiv-4203
0806.4874
Myopic Coding in Multiterminal Networks
<|reference_start|>Myopic Coding in Multiterminal Networks: This paper investigates the interplay between cooperation and achievable rates in multi-terminal networks. Cooperation refers to the process of nodes working together to relay data toward the destination. There is an inherent tradeoff between achievable information transmission rates and the level of cooperation, which is determined by how many nodes are involved and how the nodes encode/decode the data. We illustrate this trade-off by studying information-theoretic decode-forward based coding strategies for data transmission in multi-terminal networks. Decode-forward strategies are usually discussed in the context of omniscient coding, in which all nodes in the network fully cooperate with each other, both in encoding and decoding. In this paper, we investigate myopic coding, in which each node cooperates with only a few neighboring nodes. We show that achievable rates of myopic decode-forward can be as large as that of omniscient decode-forward in the low SNR regime. We also show that when each node has only a few cooperating neighbors, adding one node into the cooperation increases the transmission rate significantly. Furthermore, we show that myopic decode-forward can achieve non-zero rates as the network size grows without bound.<|reference_end|>
arxiv
@article{ong2008myopic, title={Myopic Coding in Multiterminal Networks}, author={Lawrence Ong, Mehul Motani}, journal={IEEE Transactions on Information Theory, Vol. 54, No. 7, pp. 3295-3314, Jul. 2008}, year={2008}, doi={10.1109/TIT.2008.924675}, archivePrefix={arXiv}, eprint={0806.4874}, primaryClass={cs.IT math.IT} }
ong2008myopic
arxiv-4204
0806.4899
A Dynamic Programming Approach To Length-Limited Huffman Coding
<|reference_start|>A Dynamic Programming Approach To Length-Limited Huffman Coding: The ``state-of-the-art'' in Length Limited Huffman Coding algorithms is the $\Theta(ND)$-time, $\Theta(N)$-space one of Hirschberg and Larmore, where $D\le N$ is the length restriction on the code. This is a very clever, very problem specific, technique. In this note we show that there is a simple Dynamic-Programming (DP) method that solves the problem with the same time and space bounds. The fact that there was an $\Theta(ND)$ time DP algorithm was previously known; it is a straightforward DP with the Monge property (which permits an order of magnitude speedup). It was not interesting, though, because it also required $\Theta(ND)$ space. The main result of this paper is the technique developed for reducing the space. It is quite simple and applicable to many other problems modeled by DPs with the Monge property. We illustrate this with examples from web-proxy design and wireless mobile paging.<|reference_end|>
arxiv
@article{golin2008a, title={A Dynamic Programming Approach To Length-Limited Huffman Coding}, author={Mordecai Golin and Yan Zhang}, journal={arXiv preprint arXiv:0806.4899}, year={2008}, archivePrefix={arXiv}, eprint={0806.4899}, primaryClass={cs.DS cs.IT math.IT} }
golin2008a
arxiv-4205
0806.4920
Conception et Evaluation de XQuery dans une architecture de m\'ediation "Tout-XML"
<|reference_start|>Conception et Evaluation de XQuery dans une architecture de m\'ediation "Tout-XML": XML has emerged as the leading language for representing and exchanging data not only on the Web, but also in general in the enterprise. XQuery is emerging as the standard query language for XML. Thus, tools are required to mediate between XML queries and heterogeneous data sources to integrate data in XML. This paper presents the XMedia mediator, a unique tool for integrating and querying disparate heterogeneous information as unified XML views. It describes the mediator architecture and focuses on the unique distributed query processing technology implemented in this component. Query evaluation is based on an original XML algebra simply extending classical operators to process tuples of tree elements. Further, we present a set of performance evaluation on a relational benchmark, which leads to discuss possible performance enhancements.<|reference_end|>
arxiv
@article{dang-ngoc2008conception, title={Conception et Evaluation de XQuery dans une architecture de m\'ediation "Tout-XML"}, author={Tuyet-Tram Dang-Ngoc (PRISM), Georges Gardarin (PRISM)}, journal={Revue ISI (Integration de syst\`emes d'information) : Num\'ero sp\'ecial sur les Bases de Donn\'ees Semi-structur\'ees 8, 5-6 (2003) 11-25}, year={2008}, archivePrefix={arXiv}, eprint={0806.4920}, primaryClass={cs.DB} }
dang-ngoc2008conception
arxiv-4206
0806.4921
Interpr\'etation vague des contraintes structurelles pour la RI dans des corpus de documents XML - \'Evaluation d'une m\'ethode approch\'ee de RI structur\'ee
<|reference_start|>Interpr\'etation vague des contraintes structurelles pour la RI dans des corpus de documents XML - \'Evaluation d'une m\'ethode approch\'ee de RI structur\'ee: We propose specific data structures designed to the indexing and retrieval of information elements in heterogeneous XML data bases. The indexing scheme is well suited to the management of various contextual searches, expressed either at a structural level or at an information content level. The approximate search mechanisms are based on a modified Levenshtein editing distance and information fusion heuristics. The implementation described highlights the mixing of structured information presented as field/value instances and free text elements. The retrieval performances of the proposed approach are evaluated within the INEX 2005 evaluation campaign. The evaluation results rank the proposed approach among the best evaluated XML IR systems for the VVCAS task.<|reference_end|>
arxiv
@article{popovici2008interpr\'etation, title={Interpr\'etation vague des contraintes structurelles pour la RI dans des corpus de documents XML - \'Evaluation d'une m\'ethode approch\'ee de RI structur\'ee}, author={Eugen Popovici (VALORIA), Gilbas M'enier (VALORIA), Pierre-Franc{c}ois Marteau (VALORIA)}, journal={Document num\'erique 10, 1 (2007) 63--88}, year={2008}, doi={10.3166/dn.10.63-88}, archivePrefix={arXiv}, eprint={0806.4921}, primaryClass={cs.IR} }
popovici2008interpr\'etation
arxiv-4207
0806.4956
Game Refinement Relations and Metrics
<|reference_start|>Game Refinement Relations and Metrics: We consider two-player games played over finite state spaces for an infinite number of rounds. At each state, the players simultaneously choose moves; the moves determine a successor state. It is often advantageous for players to choose probability distributions over moves, rather than single moves. Given a goal, for example, reach a target state, the question of winning is thus a probabilistic one: what is the maximal probability of winning from a given state? On these game structures, two fundamental notions are those of equivalences and metrics. Given a set of winning conditions, two states are equivalent if the players can win the same games with the same probability from both states. Metrics provide a bound on the difference in the probabilities of winning across states, capturing a quantitative notion of state similarity. We introduce equivalences and metrics for two-player game structures, and we show that they characterize the difference in probability of winning games whose goals are expressed in the quantitative mu-calculus. The quantitative mu-calculus can express a large set of goals, including reachability, safety, and omega-regular properties. Thus, we claim that our relations and metrics provide the canonical extensions to games, of the classical notion of bisimulation for transition systems. We develop our results both for equivalences and metrics, which generalize bisimulation, and for asymmetrical versions, which generalize simulation.<|reference_end|>
arxiv
@article{de alfaro2008game, title={Game Refinement Relations and Metrics}, author={Luca de Alfaro, Rupak Majumdar, Vishwanath Raman, Mari"elle Stoelinga}, journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September 11, 2008) lmcs:781}, year={2008}, doi={10.2168/LMCS-4(3:7)2008}, archivePrefix={arXiv}, eprint={0806.4956}, primaryClass={cs.LO} }
de alfaro2008game
arxiv-4208
0806.4958
Deterministic Designs with Deterministic Guarantees: Toeplitz Compressed Sensing Matrices, Sequence Designs and System Identification
<|reference_start|>Deterministic Designs with Deterministic Guarantees: Toeplitz Compressed Sensing Matrices, Sequence Designs and System Identification: In this paper we present a new family of discrete sequences having "random like" uniformly decaying auto-correlation properties. The new class of infinite length sequences are higher order chirps constructed using irrational numbers. Exploiting results from the theory of continued fractions and diophantine approximations, we show that the class of sequences so formed has the property that the worst-case auto-correlation coefficients for every finite length sequence decays at a polynomial rate. These sequences display doppler immunity as well. We also show that Toeplitz matrices formed from such sequences satisfy restricted-isometry-property (RIP), a concept that has played a central role recently in Compressed Sensing applications. Compressed sensing has conventionally dealt with sensing matrices with arbitrary components. Nevertheless, such arbitrary sensing matrices are not appropriate for linear system identification and one must employ Toeplitz structured sensing matrices. Linear system identification plays a central role in a wide variety of applications such as channel estimation for multipath wireless systems as well as control system applications. Toeplitz matrices are also desirable on account of their filtering structure, which allows for fast implementation together with reduced storage requirements.<|reference_end|>
arxiv
@article{saligrama2008deterministic, title={Deterministic Designs with Deterministic Guarantees: Toeplitz Compressed Sensing Matrices, Sequence Designs and System Identification}, author={Venkatesh Saligrama}, journal={arXiv preprint arXiv:0806.4958}, year={2008}, archivePrefix={arXiv}, eprint={0806.4958}, primaryClass={cs.IT math.IT} }
saligrama2008deterministic
arxiv-4209
0806.4979
Bounds on Codes Based on Graph Theory
<|reference_start|>Bounds on Codes Based on Graph Theory: Let $A_q(n,d)$ be the maximum order (maximum number of codewords) of a $q$-ary code of length $n$ and Hamming distance at least $d$. And let $A(n,d,w)$ that of a binary code of constant weight $w$. Building on results from algebraic graph theory and Erd\H{o}s-ko-Rado like theorems in extremal combinatorics, we show how several known bounds on $A_q(n,d)$ and $A(n,d,w)$ can be easily obtained in a single framework. For instance, both the Hamming and Singleton bounds can derived as an application of a property relating the clique number and the independence number of vertex transitive graphs. Using the same techniques, we also derive some new bounds and present some additional applications.<|reference_end|>
arxiv
@article{rouayheb2008bounds, title={Bounds on Codes Based on Graph Theory}, author={Salim Y. El Rouayheb, C. N. Georghiades, E. Soljanin, A. Sprintson}, journal={arXiv preprint arXiv:0806.4979}, year={2008}, archivePrefix={arXiv}, eprint={0806.4979}, primaryClass={cs.IT math.IT} }
rouayheb2008bounds
arxiv-4210
0807.0007
Finding Large Clique Minors is Hard
<|reference_start|>Finding Large Clique Minors is Hard: We prove that it is NP-complete, given a graph G and a parameter h, to determine whether G contains a complete graph K_h as a minor.<|reference_end|>
arxiv
@article{eppstein2008finding, title={Finding Large Clique Minors is Hard}, author={David Eppstein}, journal={J. Graph Algorithms and Applications 13(2):197-204, 2009}, year={2008}, archivePrefix={arXiv}, eprint={0807.0007}, primaryClass={cs.DM} }
eppstein2008finding
arxiv-4211
0807.0023
Automatic Metadata Generation using Associative Networks
<|reference_start|>Automatic Metadata Generation using Associative Networks: In spite of its tremendous value, metadata is generally sparse and incomplete, thereby hampering the effectiveness of digital information services. Many of the existing mechanisms for the automated creation of metadata rely primarily on content analysis which can be costly and inefficient. The automatic metadata generation system proposed in this article leverages resource relationships generated from existing metadata as a medium for propagation from metadata-rich to metadata-poor resources. Because of its independence from content analysis, it can be applied to a wide variety of resource media types and is shown to be computationally inexpensive. The proposed method operates through two distinct phases. Occurrence and co-occurrence algorithms first generate an associative network of repository resources leveraging existing repository metadata. Second, using the associative network as a substrate, metadata associated with metadata-rich resources is propagated to metadata-poor resources by means of a discrete-form spreading activation algorithm. This article discusses the general framework for building associative networks, an algorithm for disseminating metadata through such networks, and the results of an experiment and validation of the proposed method using a standard bibliographic dataset.<|reference_end|>
arxiv
@article{rodriguez2008automatic, title={Automatic Metadata Generation using Associative Networks}, author={Marko A. Rodriguez, Johan Bollen, Herbert Van de Sompel}, journal={ACM Transactions on Information Systems, volume 27, number 2, pages 1-20, ISSN: 1046-8188, ACM Press, February 2009}, year={2008}, doi={10.1145/1462198.1462199}, number={LA-UR-06-3445}, archivePrefix={arXiv}, eprint={0807.0023}, primaryClass={cs.IR cs.DL} }
rodriguez2008automatic
arxiv-4212
0807.0038
A Novel Mathematical Model for the Unique Shortest Path Routing Problem
<|reference_start|>A Novel Mathematical Model for the Unique Shortest Path Routing Problem: Link weights are the principal parameters of shortest path routing protocols, the most commonly used protocols for IP networks. The problem of optimally setting link weights for unique shortest path routing is addressed. Due to the complexity of the constraints involved, there exist challenges to formulate the problem properly, so that a solution algorithm may be developed which could prove to be more efficient than those already in existence. In this paper, a novel complete formulation with a polynomial number of constraints is first introduced and then mathematically proved to be correct. It is further illustrated that the formulation has advantages over a prior one in terms of both constraint structure and model size for a proposed decomposition method to solve the problem.<|reference_end|>
arxiv
@article{zhang2008a, title={A Novel Mathematical Model for the Unique Shortest Path Routing Problem}, author={Changyong Zhang}, journal={arXiv preprint arXiv:0807.0038}, year={2008}, archivePrefix={arXiv}, eprint={0807.0038}, primaryClass={math.OC cs.DS} }
zhang2008a
arxiv-4213
0807.0042
A Simple Converse Proof and a Unified Capacity Formula for Channels with Input Constraints
<|reference_start|>A Simple Converse Proof and a Unified Capacity Formula for Channels with Input Constraints: Given the single-letter capacity formula and the converse proof of a channel without constraints, we provide a simple approach to extend the results for the same channel but with constraints. The resulting capacity formula is the minimum of a Lagrange dual function. It gives an unified formula in the sense that it works regardless whether the problem is convex. If the problem is non-convex, we show that the capacity can be larger than the formula obtained by the naive approach of imposing constraints on the maximization in the capacity formula of the case without the constraints. The extension on the converse proof is simply by adding a term involving the Lagrange multiplier and the constraints. The rest of the proof does not need to be changed. We name the proof method the Lagrangian Converse Proof. In contrast, traditional approaches need to construct a better input distribution for convex problems or need to introduce a time sharing variable for non-convex problems. We illustrate the Lagrangian Converse Proof for three channels, the classic discrete time memoryless channel, the channel with non-causal channel-state information at the transmitter, the channel with limited channel-state feedback. The extension to the rate distortion theory is also provided.<|reference_end|>
arxiv
@article{liu2008a, title={A Simple Converse Proof and a Unified Capacity Formula for Channels with Input Constraints}, author={Youjian Liu}, journal={arXiv preprint arXiv:0807.0042}, year={2008}, archivePrefix={arXiv}, eprint={0807.0042}, primaryClass={cs.IT math.IT} }
liu2008a
arxiv-4214
0807.0070
Quantitative Paradigm of Software Reliability as Content Relevance
<|reference_start|>Quantitative Paradigm of Software Reliability as Content Relevance: This paper presents a quantitative approach to software reliability and content relevance definitions validated by the systems' potential reliability law.Thus it is argued for the unified math nature or quantitative paradigm of software reliability and content relevance.<|reference_end|>
arxiv
@article{arkhipkin2008quantitative, title={Quantitative Paradigm of Software Reliability as Content Relevance}, author={Yuri Arkhipkin}, journal={arXiv preprint arXiv:0807.0070}, year={2008}, archivePrefix={arXiv}, eprint={0807.0070}, primaryClass={cs.SE cs.IR} }
arkhipkin2008quantitative
arxiv-4215
0807.0087
Path lengths in tree-child time consistent hybridization networks
<|reference_start|>Path lengths in tree-child time consistent hybridization networks: Hybridization networks are representations of evolutionary histories that allow for the inclusion of reticulate events like recombinations, hybridizations, or lateral gene transfers. The recent growth in the number of hybridization network reconstruction algorithms has led to an increasing interest in the definition of metrics for their comparison that can be used to assess the accuracy or robustness of these methods. In this paper we establish some basic results that make it possible the generalization to tree-child time consistent (TCTC) hybridization networks of some of the oldest known metrics for phylogenetic trees: those based on the comparison of the vectors of path lengths between leaves. More specifically, we associate to each hybridization network a suitably defined vector of `splitted' path lengths between its leaves, and we prove that if two TCTC hybridization networks have the same such vectors, then they must be isomorphic. Thus, comparing these vectors by means of a metric for real-valued vectors defines a metric for TCTC hybridization networks. We also consider the case of fully resolved hybridization networks, where we prove that simpler, `non-splitted' vectors can be used.<|reference_end|>
arxiv
@article{cardona2008path, title={Path lengths in tree-child time consistent hybridization networks}, author={Gabriel Cardona, Merce Llabres, Francesc Rossello, Gabriel Valiente}, journal={arXiv preprint arXiv:0807.0087}, year={2008}, archivePrefix={arXiv}, eprint={0807.0087}, primaryClass={q-bio.PE cs.CE cs.DM q-bio.QM} }
cardona2008path
arxiv-4216
0807.0093
Graph Kernels
<|reference_start|>Graph Kernels: We present a unified framework to study graph kernels, special cases of which include the random walk graph kernel \citep{GaeFlaWro03,BorOngSchVisetal05}, marginalized graph kernel \citep{KasTsuIno03,KasTsuIno04,MahUedAkuPeretal04}, and geometric kernel on graphs \citep{Gaertner02}. Through extensions of linear algebra to Reproducing Kernel Hilbert Spaces (RKHS) and reduction to a Sylvester equation, we construct an algorithm that improves the time complexity of kernel computation from $O(n^6)$ to $O(n^3)$. When the graphs are sparse, conjugate gradient solvers or fixed-point iterations bring our algorithm into the sub-cubic domain. Experiments on graphs from bioinformatics and other application domains show that it is often more than a thousand times faster than previous approaches. We then explore connections between diffusion kernels \citep{KonLaf02}, regularization on graphs \citep{SmoKon03}, and graph kernels, and use these connections to propose new graph kernels. Finally, we show that rational kernels \citep{CorHafMoh02,CorHafMoh03,CorHafMoh04} when specialized to graphs reduce to the random walk graph kernel.<|reference_end|>
arxiv
@article{vishwanathan2008graph, title={Graph Kernels}, author={S.V.N. Vishwanathan, Karsten M. Borgwardt, Imre Risi Kondor, Nicol N. Schraudolph}, journal={Journal of Machine Learning Research 11 (Apr): 1201-1242, 2010}, year={2008}, archivePrefix={arXiv}, eprint={0807.0093}, primaryClass={cs.LG} }
vishwanathan2008graph
arxiv-4217
0807.0140
The Dynamics of Probabilistic Population Protocols
<|reference_start|>The Dynamics of Probabilistic Population Protocols: We study here the dynamics (and stability) of Probabilistic Population Protocols, via the differential equations approach. We provide a quite general model and we show that it includes the model of Angluin et. al. in the case of very large populations. For the general model we give a sufficient condition for stability that can be checked in polynomial time. We also study two interesting subcases: (a) protocols whose specifications (in our terms) are configuration independent. We show that they are always stable and that their eventual subpopulation percentages are actually a Markov Chain stationary distribution. (b) protocols that have dynamics resembling virus spread. We show that their dynamics are actually similar to the well-known Replicator Dynamics of Evolutionary Games. We also provide a sufficient condition for stability in this case.<|reference_end|>
arxiv
@article{chatzigiannakis2008the, title={The Dynamics of Probabilistic Population Protocols}, author={Ioannis Chatzigiannakis and Paul G. Spirakis}, journal={arXiv preprint arXiv:0807.0140}, year={2008}, archivePrefix={arXiv}, eprint={0807.0140}, primaryClass={cs.DC cs.GT} }
chatzigiannakis2008the
arxiv-4218
0807.0161
Increase of Software Safety
<|reference_start|>Increase of Software Safety: New model of software safety is offered. Distribution of mistakes in program on stages of life cycle is researched. Study of ways of increase of reliability of software at help simulation program is leaded.<|reference_end|>
arxiv
@article{khandjian2008increase, title={Increase of Software Safety}, author={Arkadiy Khandjian}, journal={arXiv preprint arXiv:0807.0161}, year={2008}, archivePrefix={arXiv}, eprint={0807.0161}, primaryClass={cs.SE} }
khandjian2008increase
arxiv-4219
0807.0199
Quadratic Forms and Space-Time Block Codes from Generalized Quaternion and Biquaternion Algebras
<|reference_start|>Quadratic Forms and Space-Time Block Codes from Generalized Quaternion and Biquaternion Algebras: In the context of space-time block codes (STBCs), the theory of generalized quaternion and biquaternion algebras (i.e., tensor products of two quaternion algebras) over arbitrary base fields is presented, as well as quadratic form theoretic criteria to check if such algebras are division algebras. For base fields relevant to STBCs, these criteria are exploited, via Springer's theorem, to construct several explicit infinite families of (bi-)quaternion division algebras. These are used to obtain new $2\x 2$ and $4\x 4$ STBCs.<|reference_end|>
arxiv
@article{unger2008quadratic, title={Quadratic Forms and Space-Time Block Codes from Generalized Quaternion and Biquaternion Algebras}, author={Thomas Unger and Nadya Markin}, journal={IEEE Trans. Inform. Theory 57 (2011), no. 9, 6148-6156}, year={2008}, doi={10.1109/TIT.2011.2161909}, archivePrefix={arXiv}, eprint={0807.0199}, primaryClass={cs.IT math.IT} }
unger2008quadratic
arxiv-4220
0807.0204
Diversity Multiplexing Tradeoff of Asynchronous Cooperative Relay Networks
<|reference_start|>Diversity Multiplexing Tradeoff of Asynchronous Cooperative Relay Networks: The assumption of nodes in a cooperative communication relay network operating in synchronous fashion is often unrealistic. In the present paper, we consider two different models of asynchronous operation in cooperative-diversity networks experiencing slow fading and examine the corresponding diversity-multiplexing tradeoffs (DMT). For both models, we propose protocols and distributed space-time codes that asymptotically achieve the transmit diversity bound for all multiplexing gains and for any number of relays.<|reference_end|>
arxiv
@article{krishnakumar2008diversity, title={Diversity Multiplexing Tradeoff of Asynchronous Cooperative Relay Networks}, author={R. N. Krishnakumar, N. Naveen and P. Vijay Kumar}, journal={arXiv preprint arXiv:0807.0204}, year={2008}, archivePrefix={arXiv}, eprint={0807.0204}, primaryClass={cs.IT math.IT} }
krishnakumar2008diversity
arxiv-4221
0807.0222
Range Medians
<|reference_start|>Range Medians: We study a generalization of the classical median finding problem to batched query case: given an array of unsorted $n$ items and $k$ (not necessarily disjoint) intervals in the array, the goal is to determine the median in {\em each} of the intervals in the array. We give an algorithm that uses $O(n\log n + k\log k \log n)$ comparisons and show a lower bound of $\Omega(n\log k)$ comparisons for this problem. This is optimal for $k=O(n/\log n)$.<|reference_end|>
arxiv
@article{har-peled2008range, title={Range Medians}, author={Sariel Har-Peled, S. Muthukrishnan}, journal={arXiv preprint arXiv:0807.0222}, year={2008}, archivePrefix={arXiv}, eprint={0807.0222}, primaryClass={cs.DS cs.OH} }
har-peled2008range
arxiv-4222
0807.0245
Full Diversity Codes for MISO Systems Equipped with Linear or ML Detectors
<|reference_start|>Full Diversity Codes for MISO Systems Equipped with Linear or ML Detectors: In this paper, a general criterion for space time block codes (STBC) to achieve full-diversity with a linear receiver is proposed for a wireless communication system having multiple transmitter and single receiver antennas (MISO). Particularly, the STBC with Toeplitz structure satisfies this criterion and therefore, enables full-diversity. Further examination of this Toeplitz STBC reveals the following important properties: a) The symbol transmission rate can be made to approach unity. b) Applying the Toeplitz code to any signalling scheme having nonzero distance between the nearest constellation points results in a non-vanishing determinant. In addition, if QAM is used as the signalling scheme, then for independent MISO flat fading channels, the Toeplitz codes is proved to approach the optimal diversity-vs-multiplexing tradeoff with a ZF receiver when the number of channel uses is large. This is, so far, the first non-orthogonal STBC shown to achieve the optimal tradeoff for such a receiver. On the other hand, when ML detection is employed in a MISO system, the Toeplitz STBC achieves the maximum coding gain for independent channels. When the channel fading coefficients are correlated, the inherent transmission matrix in the Toeplitz STBC can be designed to minimize the average worst case pair-wise error probability.<|reference_end|>
arxiv
@article{liu2008full, title={Full Diversity Codes for MISO Systems Equipped with Linear or ML Detectors}, author={Jing Liu, Jian-Kang Zhang, Kon Max Wong}, journal={arXiv preprint arXiv:0807.0245}, year={2008}, doi={10.1109/TIT.2008.928992}, archivePrefix={arXiv}, eprint={0807.0245}, primaryClass={cs.IT math.IT} }
liu2008full
arxiv-4223
0807.0311
About the creation of a parallel bilingual corpora of web-publications
<|reference_start|>About the creation of a parallel bilingual corpora of web-publications: The algorithm of the creation texts parallel corpora was presented. The algorithm is based on the use of "key words" in text documents, and on the means of their automated translation. Key words were singled out by means of using Russian and Ukrainian morphological dictionaries, as well as dictionaries of the translation of nouns for the Russian and Ukrainianlanguages. Besides, to calculate the weights of the terms in the documents, empiric-statistic rules were used. The algorithm under consideration was realized in the form of a program complex, integrated into the content-monitoring InfoStream system. As a result, a parallel bilingual corpora of web-publications containing about 30 thousand documents, was created<|reference_end|>
arxiv
@article{lande2008about, title={About the creation of a parallel bilingual corpora of web-publications}, author={D.V. Lande and V.V. Zhygalo}, journal={arXiv preprint arXiv:0807.0311}, year={2008}, archivePrefix={arXiv}, eprint={0807.0311}, primaryClass={cs.CL} }
lande2008about
arxiv-4224
0807.0336
Hardness of embedding simplicial complexes in $\R^d$
<|reference_start|>Hardness of embedding simplicial complexes in $\R^d$: Let EMBED(k,d) be the following algorithmic problem: Given a finite simplicial complex K of dimension at most k, does there exist a (piecewise linear) embedding of K into R^d? Known results easily imply polynomiality of EMBED(k,2) (k=1,2; the case k=1, d=2 is graph planarity) and of EMBED(k,2k) for all k>2 (even if k is not considered fixed). We observe that the celebrated result of Novikov on the algorithmic unsolvability of recognizing the 5-sphere implies that EMBED(d,d) and EMBED(d-1,d) are undecidable for each d>4. Our main result is NP-hardness of EMBED(2,4) and, more generally, of EMBED(k,d) for all k,d with d>3 and d\geq k \geq (2d-2)/3. These dimensions fall outside the so-called metastable range of a theorem of Haefliger and Weber, which characterizes embeddability using the deleted product obstruction. Our reductions are based on examples, due to Segal, Spie\.z, Freedman, Krushkal, Teichner, and Skopenkov, showing that outside the metastable range the deleted product obstruction is not sufficient to characterize embeddability.<|reference_end|>
arxiv
@article{matoušek2008hardness, title={Hardness of embedding simplicial complexes in $\R^d$}, author={Jiv{r}'i Matouv{s}ek, Martin Tancer, and Uli Wagner}, journal={arXiv preprint arXiv:0807.0336}, year={2008}, archivePrefix={arXiv}, eprint={0807.0336}, primaryClass={cs.CG math.GT} }
matoušek2008hardness
arxiv-4225
0807.0337
Unveiling the mystery of visual information processing in human brain
<|reference_start|>Unveiling the mystery of visual information processing in human brain: It is generally accepted that human vision is an extremely powerful information processing system that facilitates our interaction with the surrounding world. However, despite extended and extensive research efforts, which encompass many exploration fields, the underlying fundamentals and operational principles of visual information processing in human brain remain unknown. We still are unable to figure out where and how along the path from eyes to the cortex the sensory input perceived by the retina is converted into a meaningful object representation, which can be consciously manipulated by the brain. Studying the vast literature considering the various aspects of brain information processing, I was surprised to learn that the respected scholarly discussion is totally indifferent to the basic keynote question: "What is information?" in general or "What is visual information?" in particular. In the old days, it was assumed that any scientific research approach has first to define its basic departure points. Why was it overlooked in brain information processing research remains a conundrum. In this paper, I am trying to find a remedy for this bizarre situation. I propose an uncommon definition of "information", which can be derived from Kolmogorov's Complexity Theory and Chaitin's notion of Algorithmic Information. Embracing this new definition leads to an inevitable revision of traditional dogmas that shape the state of the art of brain information processing research. I hope this revision would better serve the challenging goal of human visual information processing modeling.<|reference_end|>
arxiv
@article{diamant2008unveiling, title={Unveiling the mystery of visual information processing in human brain}, author={Emanuel Diamant}, journal={arXiv preprint arXiv:0807.0337}, year={2008}, archivePrefix={arXiv}, eprint={0807.0337}, primaryClass={cs.AI cs.IR cs.IT math.IT q-bio.NC} }
diamant2008unveiling
arxiv-4226
0807.0425
Wireless Broadcast with Network Coding in Mobile Ad-Hoc Networks: DRAGONCAST
<|reference_start|>Wireless Broadcast with Network Coding in Mobile Ad-Hoc Networks: DRAGONCAST: Network coding is a recently proposed method for transmitting data, which has been shown to have potential to improve wireless network performance. We study network coding for one specific case of multicast, broadcasting, from one source to all nodes of the network. We use network coding as a loss tolerant, energy-efficient, method for broadcast. Our emphasis is on mobile networks. Our contribution is the proposal of DRAGONCAST, a protocol to perform network coding in such a dynamically evolving environment. It is based on three building blocks: a method to permit real-time decoding of network coding, a method to adjust the network coding transmission rates, and a method for ensuring the termination of the broadcast. The performance and behavior of the method are explored experimentally by simulations; they illustrate the excellent performance of the protocol.<|reference_end|>
arxiv
@article{cho2008wireless, title={Wireless Broadcast with Network Coding in Mobile Ad-Hoc Networks: DRAGONCAST}, author={Song Yean Cho (INRIA Rocquencourt, LIX), C'edric Adjih (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:0807.0425}, year={2008}, archivePrefix={arXiv}, eprint={0807.0425}, primaryClass={cs.NI} }
cho2008wireless
arxiv-4227
0807.0462
On the complexity of finding a sun in a graph
<|reference_start|>On the complexity of finding a sun in a graph: The sun is the graph obtained from a cycle of length even and at least six by adding edges to make the even-indexed vertices pairwise adjacent. Suns play an important role in the study of strongly chordal graphs. A graph is chordal if it does not contain an induced cycle of length at least four. A graph is strongly chordal if it is chordal and every even cycle has a chord joining vertices whose distance on the cycle is odd. Farber proved that a graph is strongly chordal if and only if it is chordal and contains no induced suns. There are well known polynomial-time algorithms for recognizing a sun in a chordal graph. Recently, polynomial-time algorithms for finding a sun for a larger class of graphs, the so-called HHD-free graphs, have been discovered. In this paper, we prove the problem of deciding whether an arbitrary graph contains a sun in NP-complete.<|reference_end|>
arxiv
@article{hoang2008on, title={On the complexity of finding a sun in a graph}, author={Chinh T. Hoang}, journal={arXiv preprint arXiv:0807.0462}, year={2008}, archivePrefix={arXiv}, eprint={0807.0462}, primaryClass={cs.DM} }
hoang2008on
arxiv-4228
0807.0476
Research report: State complexity of operations on two-way quantum finite automata
<|reference_start|>Research report: State complexity of operations on two-way quantum finite automata: This paper deals with the size complexity of minimal {\it two-way quantum finite automata} (2qfa's) necessary for operations to perform on all inputs of each fixed length. Such a complexity measure, known as state complexity of operations, is useful in measuring how much information is necessary to convert languages. We focus on intersection, union, reversals, and catenation operations and show some upper bounds of state complexity of operations on 2qfa's. Also, we present a number of non-regular languages and prove that these languages can be accepted by 2qfa's with one-sided error probabilities within linear time. Notably, these examples show that our bounds obtained for these operations are not tight, and therefore worth improving. We give an instance to show that the upper bound of the state number for the simulation of one-way deterministic finite automata by two-way reversible finite automata is not tight in general.<|reference_end|>
arxiv
@article{qiu2008research, title={Research report: State complexity of operations on two-way quantum finite automata}, author={Daowen Qiu}, journal={arXiv preprint arXiv:0807.0476}, year={2008}, archivePrefix={arXiv}, eprint={0807.0476}, primaryClass={cs.DM cs.CC} }
qiu2008research
arxiv-4229
0807.0484
Improved bounds and new techniques for Davenport-Schinzel sequences and their generalizations
<|reference_start|>Improved bounds and new techniques for Davenport-Schinzel sequences and their generalizations: Let lambda_s(n) denote the maximum length of a Davenport-Schinzel sequence of order s on n symbols. For s=3 it is known that lambda_3(n) = Theta(n alpha(n)) (Hart and Sharir, 1986). For general s>=4 there are almost-tight upper and lower bounds, both of the form n * 2^poly(alpha(n)) (Agarwal, Sharir, and Shor, 1989). Our first result is an improvement of the upper-bound technique of Agarwal et al. We obtain improved upper bounds for s>=6, which are tight for even s up to lower-order terms in the exponent. More importantly, we also present a new technique for deriving upper bounds for lambda_s(n). With this new technique we: (1) re-derive the upper bound of lambda_3(n) <= 2n alpha(n) + O(n sqrt alpha(n)) (first shown by Klazar, 1999); (2) re-derive our own new upper bounds for general s; and (3) obtain improved upper bounds for the generalized Davenport-Schinzel sequences considered by Adamec, Klazar, and Valtr (1992). Regarding lower bounds, we show that lambda_3(n) >= 2n alpha(n) - O(n), and therefore, the coefficient 2 is tight. We also present a simpler version of the construction of Agarwal, Sharir, and Shor that achieves the known lower bounds for even s>=4.<|reference_end|>
arxiv
@article{nivasch2008improved, title={Improved bounds and new techniques for Davenport-Schinzel sequences and their generalizations}, author={Gabriel Nivasch}, journal={Journal of the ACM, 57, article 17, 44 pages, 2010}, year={2008}, doi={10.1145/1706591.1706597}, archivePrefix={arXiv}, eprint={0807.0484}, primaryClass={cs.DM cs.CG} }
nivasch2008improved
arxiv-4230
0807.0517
Modeling belief systems with scale-free networks
<|reference_start|>Modeling belief systems with scale-free networks: Evolution of belief systems has always been in focus of cognitive research. In this paper we delineate a new model describing belief systems as a network of statements considered true. Testing the model a small number of parameters enabled us to reproduce a variety of well-known mechanisms ranging from opinion changes to development of psychological problems. The self-organizing opinion structure showed a scale-free degree distribution. The novelty of our work lies in applying a convenient set of definitions allowing us to depict opinion network dynamics in a highly favorable way, which resulted in a scale-free belief network. As an additional benefit, we listed several conjectural consequences in a number of areas related to thinking and reasoning.<|reference_end|>
arxiv
@article{antal2008modeling, title={Modeling belief systems with scale-free networks}, author={Miklos Antal, Laszlo Balogh}, journal={arXiv preprint arXiv:0807.0517}, year={2008}, archivePrefix={arXiv}, eprint={0807.0517}, primaryClass={cs.AI physics.soc-ph} }
antal2008modeling
arxiv-4231
0807.0552
Decomposition of Multiple Coverings into More Parts
<|reference_start|>Decomposition of Multiple Coverings into More Parts: We prove that for every centrally symmetric convex polygon Q, there exists a constant alpha such that any alpha*k-fold covering of the plane by translates of Q can be decomposed into k coverings. This improves on a quadratic upper bound proved by Pach and Toth (SoCG'07). The question is motivated by a sensor network problem, in which a region has to be monitored by sensors with limited battery lifetime.<|reference_end|>
arxiv
@article{aloupis2008decomposition, title={Decomposition of Multiple Coverings into More Parts}, author={G. Aloupis and J. Cardinal and S. Collette and S. Langerman and D. Orden and P. Ramos}, journal={Discrete and Computational Geometry 44:3 (2010), 706-723}, year={2008}, doi={10.1007/s00454-009-9238-3}, archivePrefix={arXiv}, eprint={0807.0552}, primaryClass={cs.CG} }
aloupis2008decomposition
arxiv-4232
0807.0564
Linear-Programming Receivers
<|reference_start|>Linear-Programming Receivers: It is shown that any communication system which admits a sum-product (SP) receiver also admits a corresponding linear-programming (LP) receiver. The two receivers have a relationship defined by the local structure of the underlying graphical model, and are inhibited by the same phenomenon, which we call 'pseudoconfigurations'. This concept is a generalization of the concept of 'pseudocodewords' for linear codes. It is proved that the LP receiver has the 'optimum certificate' property, and that the receiver output is the lowest cost pseudoconfiguration. Equivalence of graph-cover pseudoconfigurations and linear-programming pseudoconfigurations is also proved. While the LP receiver is generally more complex than the corresponding SP receiver, the LP receiver and its associated pseudoconfiguration structure provide an analytic tool for the analysis of SP receivers. As an example application, we show how the LP design technique may be applied to the problem of joint equalization and decoding.<|reference_end|>
arxiv
@article{flanagan2008linear-programming, title={Linear-Programming Receivers}, author={Mark F. Flanagan}, journal={arXiv preprint arXiv:0807.0564}, year={2008}, archivePrefix={arXiv}, eprint={0807.0564}, primaryClass={cs.IT math.IT} }
flanagan2008linear-programming
arxiv-4233
0807.0565
Music, Complexity, Information
<|reference_start|>Music, Complexity, Information: These are the preparatory notes for a Science & Music essay, "Playing by numbers", appeared in Nature 453 (2008) 988-989.<|reference_end|>
arxiv
@article{zanette2008music,, title={Music, Complexity, Information}, author={Damian H. Zanette}, journal={arXiv preprint arXiv:0807.0565}, year={2008}, archivePrefix={arXiv}, eprint={0807.0565}, primaryClass={physics.soc-ph cs.CL} }
zanette2008music,
arxiv-4234
0807.0595
Nonstandard linear recurring sequence subgroups in finite fields and automorphisms of cyclic codes
<|reference_start|>Nonstandard linear recurring sequence subgroups in finite fields and automorphisms of cyclic codes: Let $q=p^r$ be a prime power, and let $f(x)=x^m-\gs_{m-1}x^{m-1}- >...-\gs_1x-\gs_0$ be an irreducible polynomial over the finite field $\GF(q)$ of size $q$. A zero $\xi$ of $f$ is called {\em nonstandard (of degree $m$) over $\GF(q)$} if the recurrence relation $u_m=\gs_{m-1}u_{m-1} + ... + \gs_1u_1+\gs_0u_0$ with characteristic polynomial $f$ can generate the powers of $\xi$ in a nontrivial way, that is, with $u_0=1$ and $f(u_1)\neq 0$. In 2003, Brison and Nogueira asked for a characterisation of all nonstandard cases in the case $m=2$, and solved this problem for $q$ a prime, and later for $q=p^r$ with $r\leq4$. In this paper, we first show that classifying nonstandard finite field elements is equivalent to classifying those cyclic codes over $\GF(q)$ generated by a single zero that posses extra permutation automorphisms. Apart from two sporadic examples of degree 11 over $\GF(2)$ and of degree 5 over $\GF(3)$, related to the Golay codes, there exist two classes of examples of nonstandard finite field elements. One of these classes (type I) involves irreducible polynomials $f$ of the form $f(x)=x^m-f_0$, and is well-understood. The other class (type II) can be obtained from a primitive element in some subfield by a process that we call extension and lifting. We will use the known classification of the subgroups of $\PGL(2,q)$ in combination with a recent result by Brison and Nogueira to show that a nonstandard element of degree two over $\GF(q)$ necessarily is of type I or type II, thus solving completely the classification problem for the case $m=2$.<|reference_end|>
arxiv
@article{hollmann2008nonstandard, title={Nonstandard linear recurring sequence subgroups in finite fields and automorphisms of cyclic codes}, author={Henk D.L. Hollmann}, journal={arXiv preprint arXiv:0807.0595}, year={2008}, number={Philips Research manuscript number PR-MS 29.450}, archivePrefix={arXiv}, eprint={0807.0595}, primaryClass={cs.IT cs.DM math.CO math.IT} }
hollmann2008nonstandard
arxiv-4235
0807.0610
Lightweight Security for Network Coding
<|reference_start|>Lightweight Security for Network Coding: Under the emerging network coding paradigm, intermediate nodes in the network are allowed not only to store and forward packets but also to process and mix different data flows. We propose a low-complexity cryptographic scheme that exploits the inherent security provided by random linear network coding and offers the advantage of reduced overhead in comparison to traditional end-to-end encryption of the entire data. Confidentiality is achieved by protecting (or "locking") the source coefficients required to decode the encoded data, without preventing intermediate nodes from running their standard network coding operations. Our scheme can be easily combined with existing techniques that counter active attacks.<|reference_end|>
arxiv
@article{vilela2008lightweight, title={Lightweight Security for Network Coding}, author={Joao P. Vilela, Luisa Lima, Joao Barros}, journal={arXiv preprint arXiv:0807.0610}, year={2008}, doi={10.1109/ICC.2008.336}, archivePrefix={arXiv}, eprint={0807.0610}, primaryClass={cs.CR cs.NI} }
vilela2008lightweight
arxiv-4236
0807.0626
Asymptotic Mean Time To Failure and Higher Moments for Large, Recursive Networks
<|reference_start|>Asymptotic Mean Time To Failure and Higher Moments for Large, Recursive Networks: This paper deals with asymptotic expressions of the Mean Time To Failure (MTTF) and higher moments for large, recursive, and non-repairable systems in the context of two-terminal reliability. Our aim is to extend the well-known results of the series and parallel cases. We first consider several exactly solvable configurations of identical components with exponential failure-time distribution functions to illustrate different (logarithmic or power-law) behaviors as the size of the system, indexed by an integer n, increases. The general case is then addressed: it provides a simple interpretation of the origin of the power-law exponent and an efficient asymptotic expression for the total reliability of large, recursive systems. Finally, we assess the influence of the non-exponential character of the component reliability on the n-dependence of the MTTF.<|reference_end|>
arxiv
@article{tanguy2008asymptotic, title={Asymptotic Mean Time To Failure and Higher Moments for Large, Recursive Networks}, author={Christian Tanguy}, journal={arXiv preprint arXiv:0807.0626}, year={2008}, archivePrefix={arXiv}, eprint={0807.0626}, primaryClass={cs.PF} }
tanguy2008asymptotic
arxiv-4237
0807.0627
Belief decision support and reject for textured images characterization
<|reference_start|>Belief decision support and reject for textured images characterization: The textured images' classification assumes to consider the images in terms of area with the same texture. In uncertain environment, it could be better to take an imprecise decision or to reject the area corresponding to an unlearning class. Moreover, on the areas that are the classification units, we can have more than one texture. These considerations allows us to develop a belief decision model permitting to reject an area as unlearning and to decide on unions and intersections of learning classes. The proposed approach finds all its justification in an application of seabed characterization from sonar images, which contributes to an illustration.<|reference_end|>
arxiv
@article{martin2008belief, title={Belief decision support and reject for textured images characterization}, author={Arnaud Martin (E3I2)}, journal={International Conference on Information Fusion, Lens : France (2008)}, year={2008}, archivePrefix={arXiv}, eprint={0807.0627}, primaryClass={cs.AI} }
martin2008belief
arxiv-4238
0807.0629
Exact two-terminal reliability of some directed networks
<|reference_start|>Exact two-terminal reliability of some directed networks: The calculation of network reliability in a probabilistic context has long been an issue of practical and academic importance. Conventional approaches (determination of bounds, sums of disjoint products algorithms, Monte Carlo evaluations, studies of the reliability polynomials, etc.) only provide approximations when the network's size increases, even when nodes do not fail and all edges have the same reliability p. We consider here a directed, generic graph of arbitrary size mimicking real-life long-haul communication networks, and give the exact, analytical solution for the two-terminal reliability. This solution involves a product of transfer matrices, in which individual reliabilities of edges and nodes are taken into account. The special case of identical edge and node reliabilities (p and rho, respectively) is addressed. We consider a case study based on a commonly-used configuration, and assess the influence of the edges being directed (or not) on various measures of network performance. While the two-terminal reliability, the failure frequency and the failure rate of the connection are quite similar, the locations of complex zeros of the two-terminal reliability polynomials exhibit strong differences, and various structure transitions at specific values of rho. The present work could be extended to provide a catalog of exactly solvable networks in terms of reliability, which could be useful as building blocks for new and improved bounds, as well as benchmarks, in the general case.<|reference_end|>
arxiv
@article{tanguy2008exact, title={Exact two-terminal reliability of some directed networks}, author={Christian Tanguy}, journal={Proceedings of the 6th International Workshop on the Design of Reliable Communication, La Rochelle : France (2007)}, year={2008}, archivePrefix={arXiv}, eprint={0807.0629}, primaryClass={cs.PF} }
tanguy2008exact
arxiv-4239
0807.0644
Greedy D-Approximation Algorithm for Covering with Arbitrary Constraints and Submodular Cost
<|reference_start|>Greedy D-Approximation Algorithm for Covering with Arbitrary Constraints and Submodular Cost: This paper describes a simple greedy D-approximation algorithm for any covering problem whose objective function is submodular and non-decreasing, and whose feasible region can be expressed as the intersection of arbitrary (closed upwards) covering constraints, each of which constrains at most D variables of the problem. (A simple example is Vertex Cover, with D = 2.) The algorithm generalizes previous approximation algorithms for fundamental covering problems and online paging and caching problems.<|reference_end|>
arxiv
@article{koufogiannakis2008greedy, title={Greedy D-Approximation Algorithm for Covering with Arbitrary Constraints and Submodular Cost}, author={Christos Koufogiannakis and Neal E. Young}, journal={Algorithmica 66(1):113-152 (2013)}, year={2008}, doi={10.1007/978-3-642-02927-1_53}, archivePrefix={arXiv}, eprint={0807.0644}, primaryClass={cs.DS cs.DC} }
koufogiannakis2008greedy
arxiv-4240
0807.0661
Collaborative Virtual Queue: Fair Management of Congested Departure Operations and Benefit Analysis
<|reference_start|>Collaborative Virtual Queue: Fair Management of Congested Departure Operations and Benefit Analysis: Due to the stochastic nature of departure operations, working at full capacity makes major US airports very sensitive to uncertainties. Consequently, airport ground operations face critically congested taxiways and long runway queues. In this report, we show how improved management of departure operations from the ready-to-push-back time to the wheels-off time can potentially yield significant benefits to airlines and air traffic services. We develop a Collaborative Virtual Queue to enable better optimization capabilities during congested situations while taking into account the laissez-faire competitive environment. Results are evaluated using a departure system model, validated using current statistics and previous studies. First, the Collaborative Virtual Queue enables keeping aircraft away from runway queues, which increases wheels-off time predictability. Second, holding aircraft enables last-minute intra-airline flight switching. This creates new optimization capabilities for airlines i.e. it gives airlines the flexibility to prioritize their flight sequence in real-time. These capabilities are illustrated by the trade-off between minimizing the average passenger waiting time and minimizing the level of unfairness between aircraft of the same airline. For instance, airlines could choose to decrease by up to 15% their average passenger waiting time by prioritizing heavy planes over small planes when the taxiway system is congested.<|reference_end|>
arxiv
@article{burgain2008collaborative, title={Collaborative Virtual Queue: Fair Management of Congested Departure Operations and Benefit Analysis}, author={Pierrick Burgain, Eric Feron, John-Paul Clarke}, journal={arXiv preprint arXiv:0807.0661}, year={2008}, archivePrefix={arXiv}, eprint={0807.0661}, primaryClass={cs.OH} }
burgain2008collaborative
arxiv-4241
0807.0672
Algorithmic Problem Complexity
<|reference_start|>Algorithmic Problem Complexity: People solve different problems and know that some of them are simple, some are complex and some insoluble. The main goal of this work is to develop a mathematical theory of algorithmic complexity for problems. This theory is aimed at determination of computer abilities in solving different problems and estimation of resources that computers need to do this. Here we build the part of this theory related to static measures of algorithms. At first, we consider problems for finite words and study algorithmic complexity of such problems, building optimal complexity measures. Then we consider problems for such infinite objects as functions and study algorithmic complexity of these problems, also building optimal complexity measures. In the second part of the work, complexity of algorithmic problems, such as the halting problem for Turing machines, is measured by the classes of automata that are necessary to solve this problem. To classify different problems with respect to their complexity, inductive Turing machines, which extend possibilities of Turing machines, are used. A hierarchy of inductive Turing machines generates an inductive hierarchy of algorithmic problems. Here we specifically consider algorithmic problems related to Turing machines and inductive Turing machines, and find a place for these problems in the inductive hierarchy of algorithmic problems.<|reference_end|>
arxiv
@article{burgin2008algorithmic, title={Algorithmic Problem Complexity}, author={Mark Burgin}, journal={arXiv preprint arXiv:0807.0672}, year={2008}, archivePrefix={arXiv}, eprint={0807.0672}, primaryClass={cs.CC cs.IT math.IT} }
burgin2008algorithmic
arxiv-4242
0807.0704
Knowledge bases over algebraic models Some notes about informational equivalence
<|reference_start|>Knowledge bases over algebraic models Some notes about informational equivalence: The recent advances in knowledge base research and the growing importance of effective knowledge management raised an important question of knowledge base equivalence verification. This problem has not been stated earlier, at least in a way that allows speaking about algorithms for verification of informational equivalence, because the informal definition of knowledge bases makes formal solution of this problem impossible. In this paper we provide an implementable formal algorithm for knowledge base equivalence verification based on the formal definition of knowledge base proposed by Plotkin B. and Plotkin T., and study some important properties of automorphic equivalence of models. We also describe the concept of equivalence and formulate the criterion for the equivalence of knowledge bases defined over finite models. Further we define multi-models and automorphic equivalence of models and multi-models, that is generalization of automorphic equivalence of algebras.<|reference_end|>
arxiv
@article{marina2008knowledge, title={Knowledge bases over algebraic models. Some notes about informational equivalence}, author={Knyazhansky Marina, Plotkin Tatjana}, journal={arXiv preprint arXiv:0807.0704}, year={2008}, archivePrefix={arXiv}, eprint={0807.0704}, primaryClass={cs.LO} }
marina2008knowledge
arxiv-4243
0807.0718
The Parikh functions of sparse context-free languages are quasi-polynomials
<|reference_start|>The Parikh functions of sparse context-free languages are quasi-polynomials: We prove that the Parikh map of a bounded context-free language is a box spline. Moreover we prove that in this case, such a function is rational.<|reference_end|>
arxiv
@article{d'alessandro2008the, title={The Parikh functions of sparse context-free languages are quasi-polynomials}, author={Flavio D'Alessandro, Benedetto Intrigila, Stefano Varricchio}, journal={arXiv preprint arXiv:0807.0718}, year={2008}, archivePrefix={arXiv}, eprint={0807.0718}, primaryClass={cs.DM} }
d'alessandro2008the
arxiv-4244
0807.0799
An Improved Robust Fuzzy Extractor
<|reference_start|>An Improved Robust Fuzzy Extractor: We consider the problem of building robust fuzzy extractors, which allow two parties holding similar random variables W, W' to agree on a secret key R in the presence of an active adversary. Robust fuzzy extractors were defined by Dodis et al. in Crypto 2006 to be noninteractive, i.e., only one message P, which can be modified by an unbounded adversary, can pass from one party to the other. This allows them to be used by a single party at different points in time (e.g., for key recovery or biometric authentication), but also presents an additional challenge: what if R is used, and thus possibly observed by the adversary, before the adversary has a chance to modify P. Fuzzy extractors secure against such a strong attack are called post-application robust. We construct a fuzzy extractor with post-application robustness that extracts a shared secret key of up to (2m-n)/2 bits (depending on error-tolerance and security parameters), where n is the bit-length and m is the entropy of W. The previously best known result, also of Dodis et al., extracted up to (2m-n)/3 bits (depending on the same parameters).<|reference_end|>
arxiv
@article{kanukurthi2008an, title={An Improved Robust Fuzzy Extractor}, author={Bhavana Kanukurthi and Leonid Reyzin}, journal={arXiv preprint arXiv:0807.0799}, year={2008}, archivePrefix={arXiv}, eprint={0807.0799}, primaryClass={cs.CR} }
kanukurthi2008an
arxiv-4245
0807.0807
Shortest Paths Avoiding Forbidden Subpaths
<|reference_start|>Shortest Paths Avoiding Forbidden Subpaths: In this paper we study a variant of the shortest path problem in graphs: given a weighted graph G and vertices s and t, and given a set X of forbidden paths in G, find a shortest s-t path P such that no path in X is a subpath of P. Path P is allowed to repeat vertices and edges. We call each path in X an exception, and our desired path a shortest exception-avoiding path. We formulate a new version of the problem where the algorithm has no a priori knowledge of X, and finds out about an exception x in X only when a path containing x fails. This situation arises in computing shortest paths in optical networks. We give an algorithm that finds a shortest exception avoiding path in time polynomial in |G| and |X|. The main idea is to run Dijkstra's algorithm incrementally after replicating vertices when an exception is discovered.<|reference_end|>
arxiv
@article{ahmed2008shortest, title={Shortest Paths Avoiding Forbidden Subpaths}, author={Mustaq Ahmed, Anna Lubiw}, journal={Proceedings of the 26th International Symposium on Theoretical Aspects of Computer Science (STACS), Freiburg, Germany, 2009, pp. 63-74}, year={2008}, archivePrefix={arXiv}, eprint={0807.0807}, primaryClass={cs.DM cs.DS} }
ahmed2008shortest
arxiv-4246
0807.0821
On Wiretap Networks II
<|reference_start|>On Wiretap Networks II: We consider the problem of securing a multicast network against a wiretapper that can intercept the packets on a limited number of arbitrary network links of his choice. We assume that the network implements network coding techniques to simultaneously deliver all the packets available at the source to all the destinations. We show how this problem can be looked at as a network generalization of the Ozarow-Wyner Wiretap Channel of type II. In particular, we show that network security can be achieved by using the Ozarow-Wyner approach of coset coding at the source on top of the implemented network code. This way, we quickly and transparently recover some of the results available in the literature on secure network coding for wiretapped networks. We also derive new bounds on the required secure code alphabet size and an algorithm for code construction.<|reference_end|>
arxiv
@article{rouayheb2008on, title={On Wiretap Networks II}, author={Salim El Rouayheb, Emina Soljanin}, journal={arXiv preprint arXiv:0807.0821}, year={2008}, archivePrefix={arXiv}, eprint={0807.0821}, primaryClass={cs.IT math.IT} }
rouayheb2008on
arxiv-4247
0807.0868
On the Capacity of Pairwise Collaborative Networks
<|reference_start|>On the Capacity of Pairwise Collaborative Networks: We derive expressions for the achievable rate region of a collaborative coding scheme in a two-transmitter, two-receiver Pairwise Collaborative Network (PCN) where one transmitter and receiver pair, namely relay pair, assists the other pair, namely the source pair, by partially decoding and forwarding the transmitted message to the intended receiver. The relay pair provides such assistance while handling a private message. We assume that users can use the past channel outputs and can transmit and receive at the same time and in the same frequency band. In this collaborative scheme, the transmitter of the source pair splits its information into two independent parts. Ironically, the relay pair employs the decode and forward coding to assist the source pair in delivering a part of its message and re-encodes the decoded message along with private message, which is intended to the receiver of the relay pair, and broadcasts the results. The receiver of the relay pair decodes both messages, retrieves the private message, re-encodes and transmits the decoded massage to the intended destination. We also characterize the achievable rate region for Gaussian PCN. Finally, we provide numerical results to study the rate trade off for the involved pairs. Numerical result shows that the collaboration offers gain when the channel gain between the users of the relay pair are strong. It also shows that if the channel conditions between transmitters or between the receivers of the relay and source pairs are poor, such a collaboration is not beneficial.<|reference_end|>
arxiv
@article{astaneh2008on, title={On the Capacity of Pairwise Collaborative Networks}, author={Saeed A. Astaneh, Saeed Gazor, Hamid Behroozi}, journal={arXiv preprint arXiv:0807.0868}, year={2008}, archivePrefix={arXiv}, eprint={0807.0868}, primaryClass={cs.IT math.IT} }
astaneh2008on
arxiv-4248
0807.0887
On Cycles in AS Relationships
<|reference_start|>On Cycles in AS Relationships: Several users of our AS relationship inference data (http://www.caida.org/data/active/as-relationships/), released with cs/0604017, asked us why it contained AS relationship cycles, e.g., cases where AS A is a provider of AS B, B is a provider of C, and C is a provider of A, or other cycle types. Having been answering these questions in private communications, we have eventually decided to write down our answers here for future reference.<|reference_end|>
arxiv
@article{dimitropoulos2008on, title={On Cycles in AS Relationships}, author={Xenofontas Dimitropoulos, M. Angeles Serrano, Dmitri Krioukov}, journal={ACM SIGCOMM Computer Communication Review (CCR), v.38, n.3, p.103-104, 2008}, year={2008}, doi={10.1145/1384609.1384624}, archivePrefix={arXiv}, eprint={0807.0887}, primaryClass={cs.NI} }
dimitropoulos2008on
arxiv-4249
0807.0908
The Correspondence Analysis Platform for Uncovering Deep Structure in Data and Information
<|reference_start|>The Correspondence Analysis Platform for Uncovering Deep Structure in Data and Information: We study two aspects of information semantics: (i) the collection of all relationships, (ii) tracking and spotting anomaly and change. The first is implemented by endowing all relevant information spaces with a Euclidean metric in a common projected space. The second is modelled by an induced ultrametric. A very general way to achieve a Euclidean embedding of different information spaces based on cross-tabulation counts (and from other input data formats) is provided by Correspondence Analysis. From there, the induced ultrametric that we are particularly interested in takes a sequential - e.g. temporal - ordering of the data into account. We employ such a perspective to look at narrative, "the flow of thought and the flow of language" (Chafe). In application to policy decision making, we show how we can focus analysis in a small number of dimensions.<|reference_end|>
arxiv
@article{murtagh2008the, title={The Correspondence Analysis Platform for Uncovering Deep Structure in Data and Information}, author={Fionn Murtagh}, journal={Computer Journal, 53 (3), 304-315, 2010}, year={2008}, doi={10.1093/comjnl/bxn045}, archivePrefix={arXiv}, eprint={0807.0908}, primaryClass={cs.AI} }
murtagh2008the
arxiv-4250
0807.0928
Bloomier Filters: A second look
<|reference_start|>Bloomier Filters: A second look: A Bloom filter is a space efficient structure for storing static sets, where the space efficiency is gained at the expense of a small probability of false-positives. A Bloomier filter generalizes a Bloom filter to compactly store a function with a static support. In this article we give a simple construction of a Bloomier filter. The construction is linear in space and requires constant time to evaluate. The creation of our Bloomier filter takes linear time which is faster than the existing construction. We show how one can improve the space utilization further at the cost of increasing the time for creating the data structure.<|reference_end|>
arxiv
@article{charles2008bloomier, title={Bloomier Filters: A second look}, author={Denis Charles, Kumar Chellapilla}, journal={arXiv preprint arXiv:0807.0928}, year={2008}, archivePrefix={arXiv}, eprint={0807.0928}, primaryClass={cs.DS} }
charles2008bloomier
arxiv-4251
0807.0942
Secrecy via Sources and Channels
<|reference_start|>Secrecy via Sources and Channels: Alice and Bob want to share a secret key and to communicate an independent message, both of which they desire to be kept secret from an eavesdropper Eve. We study this problem of secret communication and secret key generation when two resources are available -- correlated sources at Alice, Bob, and Eve, and a noisy broadcast channel from Alice to Bob and Eve which is independent of the sources. We are interested in characterizing the fundamental trade-off between the rates of the secret message and secret key. We present an achievable solution and prove its optimality for the parallel channels and sources case when each sub-channel and source component satisfies a degradation order (either in favor of the legitimate receiver or the eavesdropper). This includes the case of jointly Gaussian sources and an additive Gaussian channel, for which the secrecy region is evaluated.<|reference_end|>
arxiv
@article{prabhakaran2008secrecy, title={Secrecy via Sources and Channels}, author={Vinod M. Prabhakaran, Krishnan Eswaran, and Kannan Ramchandran}, journal={arXiv preprint arXiv:0807.0942}, year={2008}, doi={10.1109/TIT.2012.2208579}, archivePrefix={arXiv}, eprint={0807.0942}, primaryClass={cs.IT math.IT} }
prabhakaran2008secrecy
arxiv-4252
0807.0967
Astrophysics in SCoPE
<|reference_start|>Astrophysics in SCoPE: S.Co.P.E. is one of the four projects funded by the Italian Government in order to provide Southern Italy with a distributed computing infrastructure for fundamental science. Beside being aimed at building the infrastructure, S.Co.P.E. is also actively pursuing research in several areas among which astrophysics and observational cosmology. We shortly summarize the most significant results obtained in the first two years of the project and related to the development of middleware and Data Mining tools for the Virtual Observatory.<|reference_end|>
arxiv
@article{brescia2008astrophysics, title={Astrophysics in S.Co.P.E}, author={M. Brescia, S. Cavuoti, G. D'Angelo, R. D'Abrusco, C. Donalek, N. Deniskina, O. Laurino, G. Longo}, journal={arXiv preprint arXiv:0807.0967}, year={2008}, archivePrefix={arXiv}, eprint={0807.0967}, primaryClass={astro-ph cs.DL} }
brescia2008astrophysics
arxiv-4253
0807.0992
Random XML sampling the Boltzmann way
<|reference_start|>Random XML sampling the Boltzmann way: In this article we present the prototype of a framework capable of producing, with linear complexity, uniformly random XML documents with respect to a given RELAX NG grammar. The generation relies on powerful combinatorial methods together with numerical and symbolic resolution of polynomial systems.<|reference_end|>
arxiv
@article{darrasse2008random, title={Random XML sampling the Boltzmann way}, author={Alexis Darrasse (LIP6)}, journal={arXiv preprint arXiv:0807.0992}, year={2008}, archivePrefix={arXiv}, eprint={0807.0992}, primaryClass={cs.OH} }
darrasse2008random
arxiv-4254
0807.0993
WCET analysis of multi-level set-associative instruction caches
<|reference_start|>WCET analysis of multi-level set-associative instruction caches: With the advent of increasingly complex hardware in real-time embedded systems (processors with performance enhancing features such as pipelines, cache hierarchy, multiple cores), many processors now have a set-associative L2 cache. Thus, there is a need for considering cache hierarchies when validating the temporal behavior of real-time systems, in particular when estimating tasks' worst-case execution times (WCETs). To the best of our knowledge, there is only one approach for WCET estimation for systems with cache hierarchies [Mueller, 1997], which turns out to be unsafe for set-associative caches. In this paper, we highlight the conditions under which the approach described in [Mueller, 1997] is unsafe. A safe static instruction cache analysis method is then presented. Contrary to [Mueller, 1997] our method supports set-associative and fully associative caches. The proposed method is experimented on medium-size and large programs. We show that the method is most of the time tight. We further show that in all cases WCET estimations are much tighter when considering the cache hierarchy than when considering only the L1 cache. An evaluation of the analysis time is conducted, demonstrating that analysing the cache hierarchy has a reasonable computation time.<|reference_end|>
arxiv
@article{hardy2008wcet, title={WCET analysis of multi-level set-associative instruction caches}, author={Damien Hardy (IRISA), Isabelle Puaut (IRISA)}, journal={arXiv preprint arXiv:0807.0993}, year={2008}, number={RR-6574}, archivePrefix={arXiv}, eprint={0807.0993}, primaryClass={cs.PF} }
hardy2008wcet
arxiv-4255
0807.1005
Catching Up Faster by Switching Sooner: A Prequential Solution to the AIC-BIC Dilemma
<|reference_start|>Catching Up Faster by Switching Sooner: A Prequential Solution to the AIC-BIC Dilemma: Bayesian model averaging, model selection and its approximations such as BIC are generally statistically consistent, but sometimes achieve slower rates og convergence than other methods such as AIC and leave-one-out cross-validation. On the other hand, these other methods can br inconsistent. We identify the "catch-up phenomenon" as a novel explanation for the slow convergence of Bayesian methods. Based on this analysis we define the switch distribution, a modification of the Bayesian marginal distribution. We show that, under broad conditions,model selection and prediction based on the switch distribution is both consistent and achieves optimal convergence rates, thereby resolving the AIC-BIC dilemma. The method is practical; we give an efficient implementation. The switch distribution has a data compression interpretation, and can thus be viewed as a "prequential" or MDL method; yet it is different from the MDL methods that are usually considered in the literature. We compare the switch distribution to Bayes factor model selection and leave-one-out cross-validation.<|reference_end|>
arxiv
@article{van erven2008catching, title={Catching Up Faster by Switching Sooner: A Prequential Solution to the AIC-BIC Dilemma}, author={Tim van Erven, Peter Grunwald and Steven de Rooij}, journal={arXiv preprint arXiv:0807.1005}, year={2008}, archivePrefix={arXiv}, eprint={0807.1005}, primaryClass={math.ST cs.IT cs.LG math.IT stat.ME stat.ML stat.TH} }
van erven2008catching
arxiv-4256
0807.1016
A General Framework for Sound and Complete Floyd-Hoare Logics
<|reference_start|>A General Framework for Sound and Complete Floyd-Hoare Logics: This paper presents an abstraction of Hoare logic to traced symmetric monoidal categories, a very general framework for the theory of systems. Our abstraction is based on a traced monoidal functor from an arbitrary traced monoidal category into the category of pre-orders and monotone relations. We give several examples of how our theory generalises usual Hoare logics (partial correctness of while programs, partial correctness of pointer programs), and provide some case studies on how it can be used to develop new Hoare logics (run-time analysis of while programs and stream circuits).<|reference_end|>
arxiv
@article{arthan2008a, title={A General Framework for Sound and Complete Floyd-Hoare Logics}, author={Rob Arthan, Ursula Martin, Erik A. Mathiesen, Paulo Oliva}, journal={ACM Transactions on Computational Logic, 11(1), 2009}, year={2008}, doi={10.1145/1614431.1614438}, archivePrefix={arXiv}, eprint={0807.1016}, primaryClass={cs.LO cs.OH} }
arthan2008a
arxiv-4257
0807.1139
Algorithms for Secretary Problems on Graphs and Hypergraphs
<|reference_start|>Algorithms for Secretary Problems on Graphs and Hypergraphs: We examine several online matching problems, with applications to Internet advertising reservation systems. Consider an edge-weighted bipartite graph G, with partite sets L, R. We develop an 8-competitive algorithm for the following secretary problem: Initially given R, and the size of L, the algorithm receives the vertices of L sequentially, in a random order. When a vertex l \in L is seen, all edges incident to l are revealed, together with their weights. The algorithm must immediately either match l to an available vertex of R, or decide that l will remain unmatched. Dimitrov and Plaxton show a 16-competitive algorithm for the transversal matroid secretary problem, which is the special case with weights on vertices, not edges. (Equivalently, one may assume that for each l \in L, the weights on all edges incident to l are identical.) We use a similar algorithm, but simplify and improve the analysis to obtain a better competitive ratio for the more general problem. Perhaps of more interest is the fact that our analysis is easily extended to obtain competitive algorithms for similar problems, such as to find disjoint sets of edges in hypergraphs where edges arrive online. We also introduce secretary problems with adversarially chosen groups. Finally, we give a 2e-competitive algorithm for the secretary problem on graphic matroids, where, with edges appearing online, the goal is to find a maximum-weight acyclic subgraph of a given graph.<|reference_end|>
arxiv
@article{korula2008algorithms, title={Algorithms for Secretary Problems on Graphs and Hypergraphs}, author={Nitish Korula, Martin Pal}, journal={arXiv preprint arXiv:0807.1139}, year={2008}, archivePrefix={arXiv}, eprint={0807.1139}, primaryClass={cs.DS} }
korula2008algorithms
arxiv-4258
0807.1153
CSI: A Paradigm for Behavior-oriented Delivery Services in Mobile Human Networks
<|reference_start|>CSI: A Paradigm for Behavior-oriented Delivery Services in Mobile Human Networks: We propose behavior-oriented services as a new paradigm of communication in mobile human networks. Our study is motivated by the tight user-network coupling in future mobile societies. In such a paradigm, messages are sent to inferred behavioral profiles, instead of explicit IDs. Our paper provides a systematic framework in providing such services. First, user behavioral profiles are constructed based on traces collected from two large wireless networks, and their spatio-temporal stability is analyzed. The implicit relationship discovered between mobile users could be utilized to provide a service for message delivery and discovery in various network environments. As an example application, we provide a detailed design of such a service in challenged opportunistic network architecture, named CSI. We provide a fully distributed solution using behavioral profile space gradients and small world structures. Our analysis shows that user behavioral profiles are surprisingly stable, i.e., the similarity of the behavioral profile of a user to its future behavioral profile is above 0.8 for two days and 0.75 for one week, and remains above 0.6 for five weeks. The correlation coefficient of the similarity metrics between a user pair at different time instants is above 0.7 for four days, 0.62 for a week, and remains above 0.5 for two weeks. Leveraging such a stability in user behaviors, the CSI service achieves delivery rate very close to the delay-optimal strategy (above 94%), with minimal overhead (less than 84% of the optimal). We believe that this new paradigm will act as an enabler of multiple new services in mobile societies, and is potentially applicable in server-based, heterogeneous or infrastructure-less wireless environments.<|reference_end|>
arxiv
@article{hsu2008csi:, title={CSI: A Paradigm for Behavior-oriented Delivery Services in Mobile Human Networks}, author={Wei-jen Hsu, Debojyoti Dutta, and Ahmed Helmy}, journal={arXiv preprint arXiv:0807.1153}, year={2008}, archivePrefix={arXiv}, eprint={0807.1153}, primaryClass={cs.NI} }
hsu2008csi:
arxiv-4259
0807.1158
Path Gain Algebraic Formulation for the Scalar Linear Network Coding Problem
<|reference_start|>Path Gain Algebraic Formulation for the Scalar Linear Network Coding Problem: In the algebraic view, the solution to a network coding problem is seen as a variety specified by a system of polynomial equations typically derived by using edge-to-edge gains as variables. The output from each sink is equated to its demand to obtain polynomial equations. In this work, we propose a method to derive the polynomial equations using source-to-sink path gains as the variables. In the path gain formulation, we show that linear and quadratic equations suffice; therefore, network coding becomes equivalent to a system of polynomial equations of maximum degree 2. We present algorithms for generating the equations in the path gains and for converting path gain solutions to edge-to-edge gain solutions. Because of the low degree, simplification is readily possible for the system of equations obtained using path gains. Using small-sized network coding problems, we show that the path gain approach results in simpler equations and determines solvability of the problem in certain cases. On a larger network (with 87 nodes and 161 edges), we show how the path gain approach continues to provide deterministic solutions to some network coding problems.<|reference_end|>
arxiv
@article{subramanian2008path, title={Path Gain Algebraic Formulation for the Scalar Linear Network Coding Problem}, author={Abhay T. Subramanian, Andrew Thangaraj}, journal={arXiv preprint arXiv:0807.1158}, year={2008}, doi={10.1109/TIT.2010.2054270}, archivePrefix={arXiv}, eprint={0807.1158}, primaryClass={cs.IT math.IT} }
subramanian2008path
arxiv-4260
0807.1160
Session Initiation Protocol (SIP) Server Overload Control: Design and Evaluation
<|reference_start|>Session Initiation Protocol (SIP) Server Overload Control: Design and Evaluation: A Session Initiation Protocol (SIP) server may be overloaded by emergency-induced call volume, ``American Idol'' style flash crowd effects or denial of service attacks. The SIP server overload problem is interesting especially because the costs of serving or rejecting a SIP session can be similar. For this reason, the built-in SIP overload control mechanism based on generating rejection messages cannot prevent the server from entering congestion collapse under heavy load. The SIP overload problem calls for a pushback control solution in which the potentially overloaded receiving server may notify its upstream sending servers to have them send only the amount of load within the receiving server's processing capacity. The pushback framework can be achieved by either a rate-based feedback or a window-based feedback. The centerpiece of the feedback mechanism is the algorithm used to generate load regulation information. We propose three new window-based feedback algorithms and evaluate them together with two existing rate-based feedback algorithms. We compare the different algorithms in terms of the number of tuning parameters and performance under both steady and variable load. Furthermore, we identify two categories of fairness requirements for SIP overload control, namely, user-centric and provider-centric fairness. With the introduction of a new double-feed SIP overload control architecture, we show how the algorithms can meet those fairness criteria.<|reference_end|>
arxiv
@article{shen2008session, title={Session Initiation Protocol (SIP) Server Overload Control: Design and Evaluation}, author={Charles Shen, Henning Schulzrinne, Erich Nahum}, journal={arXiv preprint arXiv:0807.1160}, year={2008}, archivePrefix={arXiv}, eprint={0807.1160}, primaryClass={cs.NI cs.PF} }
shen2008session
arxiv-4261
0807.1162
Measurement and Evaluation of ENUM Server Performance
<|reference_start|>Measurement and Evaluation of ENUM Server Performance: ENUM is a DNS-based protocol standard for mapping E.164 telephone numbers to Internet Uniform Resource Identifiers (URIs). It places unique requirements on the existing DNS infrastructure, such as data scalability, query throughput, response time, and database update rates. This paper measures and evaluates the performance of existing name server implementation as ENUM servers. We compared PowerDNS (PDNS), BIND and Navitas. Results show that BIND is not suitable for ENUM due to its poor scaling property. Both PDNS and Navitas can serve ENUM. However, Navitas turns out to be highly optimized and clearly outperforms PDNS in all aspects we have tested. We also instrumented the PDNS server to identify its performance bottleneck and investigated ways to improve it.<|reference_end|>
arxiv
@article{shen2008measurement, title={Measurement and Evaluation of ENUM Server Performance}, author={Charles Shen and Henning Schulzrinne}, journal={Proceedings of IEEE ICC 2007 p. 1967-1972}, year={2008}, archivePrefix={arXiv}, eprint={0807.1162}, primaryClass={cs.PF cs.NI} }
shen2008measurement
arxiv-4262
0807.1165
Timed Parity Games: Complexity and Robustness
<|reference_start|>Timed Parity Games: Complexity and Robustness: We consider two-player games played in real time on game structures with clocks and parity objectives. The games are concurrent in that at each turn, both players independently propose a time delay and an action, and the action with the shorter delay is chosen. To prevent a player from winning by blocking time, we restrict each player to strategies that ensure that the player cannot be responsible for causing a zeno run. First, we present an efficient reduction of these games to turn-based (i.e., nonconcurrent) finite-state (i.e., untimed) parity games. The states of the resulting game are pairs of clock regions of the original game. Our reduction improves the best known complexity for solving timed parity games. Moreover, the rich class of algorithms for classical parity games can now be applied to timed parity games. Second, we consider two restricted classes of strategies for the player that represents the controller in a real-time synthesis problem, namely, limit-robust and bounded-robust strategies. Using a limit-robust strategy, the controller cannot choose an exact real-valued time delay but must allow for some nonzero jitter in each of its actions. If there is a given lower bound on the jitter, then the strategy is bounded-robust. We show that exact strategies are more powerful than limit-robust strategies, which are more powerful than bounded-robust strategies for any bound. For both kinds of robust strategies, we present efficient reductions to standard timed automaton games. These reductions provide algorithms for the synthesis of robust real-time controllers.<|reference_end|>
arxiv
@article{chatterjee2008timed, title={Timed Parity Games: Complexity and Robustness}, author={Krishnendu Chatterjee, Thomas A. Henzinger and Vinayak Prabhu}, journal={arXiv preprint arXiv:0807.1165}, year={2008}, archivePrefix={arXiv}, eprint={0807.1165}, primaryClass={cs.LO cs.GT} }
chatterjee2008timed
arxiv-4263
0807.1169
A VoIP Privacy Mechanism and its Application in VoIP Peering for Voice Service Provider Topology and Identity Hiding
<|reference_start|>A VoIP Privacy Mechanism and its Application in VoIP Peering for Voice Service Provider Topology and Identity Hiding: Voice Service Providers (VSPs) participating in VoIP peering frequently want to withhold their identity and related privacy-sensitive information from other parties during the VoIP communication. A number of existing documents on VoIP privacy exist, but most of them focus on end user privacy. By summarizing and extending existing work, we present a unified privacy mechanism for both VoIP users and service providers. We also show a case study on how VSPs can use this mechanism for identity and topology hiding in VoIP peering.<|reference_end|>
arxiv
@article{shen2008a, title={A VoIP Privacy Mechanism and its Application in VoIP Peering for Voice Service Provider Topology and Identity Hiding}, author={Charles Shen and Henning Schulzrinne}, journal={arXiv preprint arXiv:0807.1169}, year={2008}, number={cucs-039-06}, archivePrefix={arXiv}, eprint={0807.1169}, primaryClass={cs.NI} }
shen2008a
arxiv-4264
0807.1173
A Counterexample Guided Abstraction-Refinement Framework for Markov Decision Processes
<|reference_start|>A Counterexample Guided Abstraction-Refinement Framework for Markov Decision Processes: The main challenge in using abstractions effectively, is to construct a suitable abstraction for the system being verified. One approach that tries to address this problem is that of {\it counterexample guided abstraction-refinement (CEGAR)}, wherein one starts with a coarse abstraction of the system, and progressively refines it, based on invalid counterexamples seen in prior model checking runs, until either an abstraction proves the correctness of the system or a valid counterexample is generated. While CEGAR has been successfully used in verifying non-probabilistic systems automatically, CEGAR has not been applied in the context of probabilistic systems. The main issues that need to be tackled in order to extend the approach to probabilistic systems is a suitable notion of ``counterexample'', algorithms to generate counterexamples, check their validity, and then automatically refine an abstraction based on an invalid counterexample. In this paper, we address these issues, and present a CEGAR framework for Markov Decision Processes.<|reference_end|>
arxiv
@article{chadha2008a, title={A Counterexample Guided Abstraction-Refinement Framework for Markov Decision Processes}, author={Rohit Chadha and Mahesh Viswanthan}, journal={arXiv preprint arXiv:0807.1173}, year={2008}, archivePrefix={arXiv}, eprint={0807.1173}, primaryClass={cs.SE cs.LO} }
chadha2008a
arxiv-4265
0807.1182
Random drift versus selection in academic vocabulary: an evolutionary analysis of published keywords
<|reference_start|>Random drift versus selection in academic vocabulary: an evolutionary analysis of published keywords: The evolution of vocabulary in academic publishing is characterized via keyword frequencies recorded the ISI Web of Science citations database. In four distinct case-studies, evolutionary analysis of keyword frequency change through time is compared to a model of random copying used as the null hypothesis, such that selection may be identified against it. The case studies from the physical sciences indicate greater selection in keyword choice than in the social sciences. Similar evolutionary analyses can be applied to a wide range of phenomena; wherever the popularity of multiple items through time has been recorded, as with web searches, or sales of popular music and books, for example.<|reference_end|>
arxiv
@article{bentley2008random, title={Random drift versus selection in academic vocabulary: an evolutionary analysis of published keywords}, author={R. Alexander Bentley}, journal={PLoS ONE, 3 (2008) e3057}, year={2008}, doi={10.1371/journal.pone.0003057}, archivePrefix={arXiv}, eprint={0807.1182}, primaryClass={physics.soc-ph cs.DL} }
bentley2008random
arxiv-4266
0807.1211
Flux: FunctionaL Updates for XML (extended report)
<|reference_start|>Flux: FunctionaL Updates for XML (extended report): XML database query languages have been studied extensively, but XML database updates have received relatively little attention, and pose many challenges to language design. We are developing an XML update language called Flux, which stands for FunctionaL Updates for XML, drawing upon ideas from functional programming languages. In prior work, we have introduced a core language for Flux with a clear operational semantics and a sound, decidable static type system based on regular expression types. Our initial proposal had several limitations. First, it lacked support for recursive types or update procedures. Second, although a high-level source language can easily be translated to the core language, it is difficult to propagate meaningful type errors from the core language back to the source. Third, certain updates are well-formed yet contain path errors, or ``dead'' subexpressions which never do any useful work. It would be useful to detect path errors, since they often represent errors or optimization opportunities. In this paper, we address all three limitations. Specifically, we present an improved, sound type system that handles recursion. We also formalize a source update language and give a translation to the core language that preserves and reflects typability. We also develop a path-error analysis (a form of dead-code analysis) for updates.<|reference_end|>
arxiv
@article{cheney2008flux:, title={Flux: FunctionaL Updates for XML (extended report)}, author={James Cheney}, journal={arXiv preprint arXiv:0807.1211}, year={2008}, archivePrefix={arXiv}, eprint={0807.1211}, primaryClass={cs.PL cs.DB} }
cheney2008flux:
arxiv-4267
0807.1221
Line Transversals of Convex Polyhedra in $\reals^3$
<|reference_start|>Line Transversals of Convex Polyhedra in $\reals^3$: We establish a bound of $O(n^2k^{1+\eps})$, for any $\eps>0$, on the combinatorial complexity of the set $\T$ of line transversals of a collection $\P$ of $k$ convex polyhedra in $\reals^3$ with a total of $n$ facets, and present a randomized algorithm which computes the boundary of $\T$ in comparable expected time. Thus, when $k\ll n$, the new bounds on the complexity (and construction cost) of $\T$ improve upon the previously best known bounds, which are nearly cubic in $n$. To obtain the above result, we study the set $\TL$ of line transversals which emanate from a fixed line $\ell_0$, establish an almost tight bound of $O(nk^{1+\eps})$ on the complexity of $\TL$, and provide a randomized algorithm which computes $\TL$ in comparable expected time. Slightly improved combinatorial bounds for the complexity of $\TL$, and comparable improvements in the cost of constructing this set, are established for two special cases, both assuming that the polyhedra of $\P$ are pairwise disjoint: the case where $\ell_0$ is disjoint from the polyhedra of $\P$, and the case where the polyhedra of $\P$ are unbounded in a direction parallel to $\ell_0$.<|reference_end|>
arxiv
@article{kaplan2008line, title={Line Transversals of Convex Polyhedra in $\reals^3$}, author={Haim Kaplan, Natan Rubin, and Micha Sharir}, journal={arXiv preprint arXiv:0807.1221}, year={2008}, archivePrefix={arXiv}, eprint={0807.1221}, primaryClass={cs.CG} }
kaplan2008line
arxiv-4268
0807.1228
Restricted Mobility Improves Delay-Throughput Trade-offs in Mobile Ad-Hoc Networks
<|reference_start|>Restricted Mobility Improves Delay-Throughput Trade-offs in Mobile Ad-Hoc Networks: In this paper, we analyze asymptotic delay-throughput trade-offs in mobile ad-hoc networks comprising heterogeneous nodes with restricted mobility. We show that node spatial heterogeneity has the ability to drastically improve upon existing scaling laws established under the assumption that nodes are identical and uniformly visit the entire network area. In particular, we consider the situation in which each node moves around its own home-point according to a restricted mobility process which results into a spatial stationary distribution that decays as a power law of exponent delta with the distance from the home-point. For such restricted mobility model, we propose a novel class of scheduling and routing schemes, which significantly outperforms all delay-throughput results previously obtained in the case of identical nodes. In particular, for delta = 2 it is possible to achieve almost constant delay and almost constant per-node throughput (except for a poly-logarithmic factor) as the number of nodes increases, even without resorting to sophisticated coding or signal processing techniques.<|reference_end|>
arxiv
@article{garetto2008restricted, title={Restricted Mobility Improves Delay-Throughput Trade-offs in Mobile Ad-Hoc Networks}, author={Michele Garetto and Emilio Leonardi}, journal={arXiv preprint arXiv:0807.1228}, year={2008}, archivePrefix={arXiv}, eprint={0807.1228}, primaryClass={cs.PF cs.NI} }
garetto2008restricted
arxiv-4269
0807.1253
Informed Traders
<|reference_start|>Informed Traders: An asymmetric information model is introduced for the situation in which there is a small agent who is more susceptible to the flow of information in the market than the general market participant, and who tries to implement strategies based on the additional information. In this model market participants have access to a stream of noisy information concerning the future return of an asset, whereas the informed trader has access to a further information source which is obscured by an additional noise that may be correlated with the market noise. The informed trader uses the extraneous information source to seek statistical arbitrage opportunities, while at the same time accommodating the additional risk. The amount of information available to the general market participant concerning the asset return is measured by the mutual information of the asset price and the associated cash flow. The worth of the additional information source is then measured in terms of the difference of mutual information between the general market participant and the informed trader. This difference is shown to be nonnegative when the signal-to-noise ratio of the information flow is known in advance. Explicit trading strategies leading to statistical arbitrage opportunities, taking advantage of the additional information, are constructed, illustrating how excess information can be translated into profit.<|reference_end|>
arxiv
@article{brody2008informed, title={Informed Traders}, author={Dorje C. Brody, Mark H. A. Davis, Robyn L. Friedman, Lane P. Hughston}, journal={Proceedings of the Royal Society London A465, 1103-1122 (2009)}, year={2008}, doi={10.1098/rspa.2008.0465}, archivePrefix={arXiv}, eprint={0807.1253}, primaryClass={q-fin.TR cs.IT math.IT math.PR} }
brody2008informed
arxiv-4270
0807.1267
Optimal Direct Sum and Privacy Trade-off Results for Quantum and Classical Communication Complexity
<|reference_start|>Optimal Direct Sum and Privacy Trade-off Results for Quantum and Classical Communication Complexity: We show optimal Direct Sum result for the one-way entanglement-assisted quantum communication complexity for any relation f subset of X x Y x Z. We show: Q^{1,pub}(f^m) = Omega(m Q^{1,pub}(f)), where Q^{1,pub}(f), represents the one-way entanglement-assisted quantum communication complexity of f with error at most 1/3 and f^m represents m-copies of f. Similarly for the one-way public-coin classical communication complexity we show: R^{1,pub}(f^m) = Omega(m R^{1,pub}(f)), where R^{1,pub}(f), represents the one-way public-coin classical communication complexity of f with error at most 1/3. We show similar optimal Direct Sum results for the Simultaneous Message Passing quantum and classical models. For two-way protocols we present optimal Privacy Trade-off results leading to a Weak Direct Sum result for such protocols. We show our Direct Sum and Privacy Trade-off results via message compression arguments which also imply a new round elimination lemma in quantum communication. This allows us to extend classical lower bounds on the cell probe complexity of some data structure problems, e.g. Approximate Nearest Neighbor Searching on the Hamming cube {0,1}^n and Predecessor Search to the quantum setting. In a separate result we show that Newman's technique of reducing the number of public-coins in a classical protocol cannot be lifted to the quantum setting. We do this by defining a general notion of black-box reduction of prior entanglement that subsumes Newman's technique. We prove that such a black-box reduction is impossible for quantum protocols. In the final result in the theme of message compression, we provide an upper bound on the problem of Exact Remote State Preparation.<|reference_end|>
arxiv
@article{jain2008optimal, title={Optimal Direct Sum and Privacy Trade-off Results for Quantum and Classical Communication Complexity}, author={Rahul Jain and Pranab Sen and Jaikumar Radhakrishnan}, journal={arXiv preprint arXiv:0807.1267}, year={2008}, archivePrefix={arXiv}, eprint={0807.1267}, primaryClass={cs.DC cs.IT math.IT} }
jain2008optimal
arxiv-4271
0807.1277
Randomized greedy algorithms for independent sets and matchings in regular graphs: Exact results and finite girth corrections
<|reference_start|>Randomized greedy algorithms for independent sets and matchings in regular graphs: Exact results and finite girth corrections: We derive new results for the performance of a simple greedy algorithm for finding large independent sets and matchings in constant degree regular graphs. We show that for $r$-regular graphs with $n$ nodes and girth at least $g$, the algorithm finds an independent set of expected cardinality $f(r)n - O\big(\frac{(r-1)^{\frac{g}{2}}}{\frac{g}{2}!} n\big)$, where $f(r)$ is a function which we explicitly compute. A similar result is established for matchings. Our results imply improved bounds for the size of the largest independent set in these graphs, and provide the first results of this type for matchings. As an implication we show that the greedy algorithm returns a nearly perfect matching when both the degree $r$ and girth $g$ are large. Furthermore, we show that the cardinality of independent sets and matchings produced by the greedy algorithm in \emph{arbitrary} bounded degree graphs is concentrated around the mean. Finally, we analyze the performance of the greedy algorithm for the case of random i.i.d. weighted independent sets and matchings, and obtain a remarkably simple expression for the limiting expected values produced by the algorithm. In fact, all the other results are obtained as straightforward corollaries from the results for the weighted case.<|reference_end|>
arxiv
@article{gamarnik2008randomized, title={Randomized greedy algorithms for independent sets and matchings in regular graphs: Exact results and finite girth corrections}, author={David Gamarnik and David Goldberg}, journal={arXiv preprint arXiv:0807.1277}, year={2008}, archivePrefix={arXiv}, eprint={0807.1277}, primaryClass={cs.DM cs.DS} }
gamarnik2008randomized
arxiv-4272
0807.1282
Satisfiability of Almost Disjoint CNF Formulas
<|reference_start|>Satisfiability of Almost Disjoint CNF Formulas: We call a CNF formula linear if any two clauses have at most one variable in common. Let m(k) be the largest integer m such that any linear k-CNF formula with <= m clauses is satisfiable. We show that 4^k / (4e^2k^3) <= m(k) < ln(2) k^4 4^k. More generally, a (k,d)-CSP is a constraint satisfaction problem in conjunctive normal form where each variable can take on one of d values, and each constraint contains k variables and forbids exacty one of the d^k possible assignments to these variables. Call a (k,d)-CSP l-disjoint if no two distinct constraints have l or more variables in common. Let m_l(k,d) denote the largest integer m such that any l-disjoint (k,d)-CSP with at most m constraints is satisfiable. We show that 1/k (d^k/(ed^(l-1)k))^(1+1/(l-1))<= m_l(k,d) < c (k^2/l ln(d) d^k)^(1+1/(l-1)). for some constant c. This means for constant l, upper and lower bound differ only in a polynomial factor in d and k.<|reference_end|>
arxiv
@article{scheder2008satisfiability, title={Satisfiability of Almost Disjoint CNF Formulas}, author={Dominik Scheder}, journal={arXiv preprint arXiv:0807.1282}, year={2008}, archivePrefix={arXiv}, eprint={0807.1282}, primaryClass={cs.DM} }
scheder2008satisfiability
arxiv-4273
0807.1297
General Auction Mechanism for Search Advertising
<|reference_start|>General Auction Mechanism for Search Advertising: In sponsored search, a number of advertising slots is available on a search results page, and have to be allocated among a set of advertisers competing to display an ad on the page. This gives rise to a bipartite matching market that is typically cleared by the way of an automated auction. Several auction mechanisms have been proposed, with variants of the Generalized Second Price (GSP) being widely used in practice. A rich body of work on bipartite matching markets builds upon the stable marriage model of Gale and Shapley and the assignment model of Shapley and Shubik. We apply insights from this line of research into the structure of stable outcomes and their incentive properties to advertising auctions. We model advertising auctions in terms of an assignment model with linear utilities, extended with bidder and item specific maximum and minimum prices. Auction mechanisms like the commonly used GSP or the well-known Vickrey-Clarke-Groves (VCG) are interpreted as simply computing a \emph{bidder-optimal stable matching} in this model, for a suitably defined set of bidder preferences. In our model, the existence of a stable matching is guaranteed, and under a non-degeneracy assumption a bidder-optimal stable matching exists as well. We give an algorithm to find such matching in polynomial time, and use it to design truthful mechanism that generalizes GSP, is truthful for profit-maximizing bidders, implements features like bidder-specific minimum prices and position-specific bids, and works for rich mixtures of bidders and preferences.<|reference_end|>
arxiv
@article{aggarwal2008general, title={General Auction Mechanism for Search Advertising}, author={Gagan Aggarwal and S. Muthukrishnan and David Pal and Martin Pal}, journal={arXiv preprint arXiv:0807.1297}, year={2008}, archivePrefix={arXiv}, eprint={0807.1297}, primaryClass={cs.GT} }
aggarwal2008general
arxiv-4274
0807.1313
On the Tradeoffs of Implementing Randomized Network Coding in Multicast Networks
<|reference_start|>On the Tradeoffs of Implementing Randomized Network Coding in Multicast Networks: Randomized network coding (RNC) greatly reduces the complexity of implementing network coding in large-scale, heterogeneous networks. This paper examines two tradeoffs in applying RNC: The first studies how the performance of RNC varies with a node's randomizing capabilities. Specifically, a limited randomized network coding (L-RNC) scheme - in which intermediate nodes perform randomized encoding based on only a limited number of random coefficients - is proposed and its performance bounds are analyzed. Such a L-RNC approach is applicable to networks in which nodes have either limited computation/storage capacity or have ambiguity about downstream edge connectivity (e.g., as in ad hoc sensor networks). A second tradeoff studied here examines the relationship between the reliability and the capacity gains of generalized RNC, i.e., how the outage probability of RNC relates to the transmission rate at the source node. This tradeoff reveals that significant reductions in outage probability are possible when the source transmits deliberately and only slightly below network capacity. This approach provides an effective means to improve the feasibility probability of RNC when the size of the finite field is fixed.<|reference_end|>
arxiv
@article{chen2008on, title={On the Tradeoffs of Implementing Randomized Network Coding in Multicast Networks}, author={Yingda Chen and Shalinee Kishore}, journal={arXiv preprint arXiv:0807.1313}, year={2008}, archivePrefix={arXiv}, eprint={0807.1313}, primaryClass={cs.IT math.IT} }
chen2008on
arxiv-4275
0807.1372
Communication over Finite-Field Matrix Channels
<|reference_start|>Communication over Finite-Field Matrix Channels: This paper is motivated by the problem of error control in network coding when errors are introduced in a random fashion (rather than chosen by an adversary). An additive-multiplicative matrix channel is considered as a model for random network coding. The model assumes that n packets of length m are transmitted over the network, and up to t erroneous packets are randomly chosen and injected into the network. Upper and lower bounds on capacity are obtained for any channel parameters, and asymptotic expressions are provided in the limit of large field or matrix size. A simple coding scheme is presented that achieves capacity in both limiting cases. The scheme has decoding complexity O(n^2 m) and a probability of error that decreases exponentially both in the packet length and in the field size in bits. Extensions of these results for coherent network coding are also presented.<|reference_end|>
arxiv
@article{silva2008communication, title={Communication over Finite-Field Matrix Channels}, author={Danilo Silva, Frank R. Kschischang, Ralf K"otter}, journal={IEEE Transactions on Information Theory, vol. 56, no. 3, pp. 1296-1305, Mar. 2010}, year={2008}, doi={10.1109/TIT.2009.2039167}, archivePrefix={arXiv}, eprint={0807.1372}, primaryClass={cs.IT math.IT} }
silva2008communication
arxiv-4276
0807.1412
Quantum Query Complexity of Multilinear Identity Testing
<|reference_start|>Quantum Query Complexity of Multilinear Identity Testing: Motivated by the quantum algorithm in \cite{MN05} for testing commutativity of black-box groups, we study the following problem: Given a black-box finite ring $R=\angle{r_1,...,r_k}$ where $\{r_1,r_2,...,r_k\}$ is an additive generating set for $R$ and a multilinear polynomial $f(x_1,...,x_m)$ over $R$ also accessed as a black-box function $f:R^m\to R$ (where we allow the indeterminates $x_1,...,x_m$ to be commuting or noncommuting), we study the problem of testing if $f$ is an \emph{identity} for the ring $R$. More precisely, the problem is to test if $f(a_1,a_2,...,a_m)=0$ for all $a_i\in R$. We give a quantum algorithm with query complexity $O(m(1+\alpha)^{m/2} k^{\frac{m}{m+1}})$ assuming $k\geq (1+1/\alpha)^{m+1}$. Towards a lower bound, we also discuss a reduction from a version of $m$-collision to this problem. We also observe a randomized test with query complexity $4^mmk$ and constant success probability and a deterministic test with $k^m$ query complexity.<|reference_end|>
arxiv
@article{arvind2008quantum, title={Quantum Query Complexity of Multilinear Identity Testing}, author={V. Arvind and Partha Mukhopadhyay}, journal={arXiv preprint arXiv:0807.1412}, year={2008}, archivePrefix={arXiv}, eprint={0807.1412}, primaryClass={cs.CC} }
arvind2008quantum
arxiv-4277
0807.1458
Theory of Rumour Spreading in Complex Social Networks
<|reference_start|>Theory of Rumour Spreading in Complex Social Networks: We introduce a general stochastic model for the spread of rumours, and derive mean-field equations that describe the dynamics of the model on complex social networks (in particular those mediated by the Internet). We use analytical and numerical solutions of these equations to examine the threshold behavior and dynamics of the model on several models of such networks: random graphs, uncorrelated scale-free networks and scale-free networks with assortative degree correlations. We show that in both homogeneous networks and random graphs the model exhibits a critical threshold in the rumour spreading rate below which a rumour cannot propagate in the system. In the case of scale-free networks, on the other hand, this threshold becomes vanishingly small in the limit of infinite system size. We find that the initial rate at which a rumour spreads is much higher in scale-free networks than in random graphs, and that the rate at which the spreading proceeds on scale-free networks is further increased when assortative degree correlations are introduced. The impact of degree correlations on the final fraction of nodes that ever hears a rumour, however, depends on the interplay between network topology and the rumour spreading rate. Our results show that scale-free social networks are prone to the spreading of rumours, just as they are to the spreading of infections. They are relevant to the spreading dynamics of chain emails, viral advertising and large-scale information dissemination algorithms on the Internet.<|reference_end|>
arxiv
@article{nekovee2008theory, title={Theory of Rumour Spreading in Complex Social Networks}, author={Maziar Nekovee, Y. Moreno, G. Bianconi, M. Marsili}, journal={Physica A, Vol 374, 457 (2007)}, year={2008}, doi={10.1016/j.physa.2006.07.017}, archivePrefix={arXiv}, eprint={0807.1458}, primaryClass={physics.soc-ph cond-mat.stat-mech cs.CR physics.bio-ph} }
nekovee2008theory
arxiv-4278
0807.1475
Simulations of Large-scale WiFi-based Wireless Networks: Interdisciplinary Challenges and Applications
<|reference_start|>Simulations of Large-scale WiFi-based Wireless Networks: Interdisciplinary Challenges and Applications: Wireless Fidelity (WiFi) is the fastest growing wireless technology to date. In addition to providing wire-free connectivity to the Internet WiFi technology also enables mobile devices to connect directly to each other and form highly dynamic wireless adhoc networks. Such distributed networks can be used to perform cooperative communication tasks such ad data routing and information dissemination in the absence of a fixed infrastructure. Furthermore, adhoc grids composed of wirelessly networked portable devices are emerging as a new paradigm in grid computing. In this paper we review computational and algorithmic challenges of high-fidelity simulations of such WiFi-based wireless communication and computing networks, including scalable topology maintenance, mobility modelling, parallelisation and synchronisation. We explore similarities and differences between the simulations of these networks and simulations of interacting many-particle systems, such as molecular dynamics (MD) simulations. We show how the cell linked-list algorithm which we have adapted from our MD simulations can be used to greatly improve the computational performance of wireless network simulators in the presence of mobility, and illustrate with an example from our simulation studies of worm attacks on mobile wireless adhoc networks.<|reference_end|>
arxiv
@article{nekovee2008simulations, title={Simulations of Large-scale WiFi-based Wireless Networks: Interdisciplinary Challenges and Applications}, author={Maziar Nekovee}, journal={arXiv preprint arXiv:0807.1475}, year={2008}, doi={10.1016/j.future.2008.05.007}, archivePrefix={arXiv}, eprint={0807.1475}, primaryClass={cs.CE cs.DC} }
nekovee2008simulations
arxiv-4279
0807.1494
Algorithm Selection as a Bandit Problem with Unbounded Losses
<|reference_start|>Algorithm Selection as a Bandit Problem with Unbounded Losses: Algorithm selection is typically based on models of algorithm performance, learned during a separate offline training sequence, which can be prohibitively expensive. In recent work, we adopted an online approach, in which a performance model is iteratively updated and used to guide selection on a sequence of problem instances. The resulting exploration-exploitation trade-off was represented as a bandit problem with expert advice, using an existing solver for this game, but this required the setting of an arbitrary bound on algorithm runtimes, thus invalidating the optimal regret of the solver. In this paper, we propose a simpler framework for representing algorithm selection as a bandit problem, with partial information, and an unknown bound on losses. We adapt an existing solver to this game, proving a bound on its expected regret, which holds also for the resulting algorithm selection technique. We present preliminary experiments with a set of SAT solvers on a mixed SAT-UNSAT benchmark.<|reference_end|>
arxiv
@article{gagliolo2008algorithm, title={Algorithm Selection as a Bandit Problem with Unbounded Losses}, author={Matteo Gagliolo and Juergen Schmidhuber}, journal={arXiv preprint arXiv:0807.1494}, year={2008}, doi={10.1007/978-3-642-13800-3_7}, number={IDSIA-07-08}, archivePrefix={arXiv}, eprint={0807.1494}, primaryClass={cs.AI cs.GT cs.LG} }
gagliolo2008algorithm
arxiv-4280
0807.1496
Expanders via Random Spanning Trees
<|reference_start|>Expanders via Random Spanning Trees: Motivated by the problem of routing reliably and scalably in a graph, we introduce the notion of a splicer, the union of spanning trees of a graph. We prove that for any bounded-degree n-vertex graph, the union of two random spanning trees approximates the expansion of every cut of the graph to within a factor of O(log n). For the random graph G_{n,p}, for p> c log{n}/n, two spanning trees give an expander. This is suggested by the case of the complete graph, where we prove that two random spanning trees give an expander. The construction of the splicer is elementary -- each spanning tree can be produced independently using an algorithm by Aldous and Broder: a random walk in the graph with edges leading to previously unvisited vertices included in the tree. A second important application of splicers is to graph sparsification where the goal is to approximate every cut (and more generally the quadratic form of the Laplacian) using only a small subgraph of the original graph. Benczur-Karger as well as Spielman-Srivastava have shown sparsifiers with O(n log n/eps^2)$ edges that achieve approximation within factors 1+eps and 1-eps. Their methods, based on independent sampling of edges, need Omega(n log n) edges to get any approximation (else the subgraph could be disconnected) and leave open the question of linear-size sparsifiers. Splicers address this question for random graphs by providing sparsifiers of size O(n) that approximate every cut to within a factor of O(log n).<|reference_end|>
arxiv
@article{goyal2008expanders, title={Expanders via Random Spanning Trees}, author={Navin Goyal, Luis Rademacher, Santosh Vempala}, journal={arXiv preprint arXiv:0807.1496}, year={2008}, archivePrefix={arXiv}, eprint={0807.1496}, primaryClass={cs.DM cs.NI} }
goyal2008expanders
arxiv-4281
0807.1513
A First-Order Non-Homogeneous Markov Model for the Response of Spiking Neurons Stimulated by Small Phase-Continuous Signals
<|reference_start|>A First-Order Non-Homogeneous Markov Model for the Response of Spiking Neurons Stimulated by Small Phase-Continuous Signals: We present a first-order non-homogeneous Markov model for the interspike-interval density of a continuously stimulated spiking neuron. The model allows the conditional interspike-interval density and the stationary interspike-interval density to be expressed as products of two separate functions, one of which describes only the neuron characteristics, and the other of which describes only the signal characteristics. This allows the use of this model to predict the response when the underlying neuron model is not known or well determined. The approximation shows particularly clearly that signal autocorrelations and cross-correlations arise as natural features of the interspike-interval density, and are particularly clear for small signals and moderate noise. We show that this model simplifies the design of spiking neuron cross-correlation systems, and describe a four-neuron mutual inhibition network that generates a cross-correlation output for two input signals.<|reference_end|>
arxiv
@article{tapson2008a, title={A First-Order Non-Homogeneous Markov Model for the Response of Spiking Neurons Stimulated by Small Phase-Continuous Signals}, author={J. Tapson, C. Jin, A. van Schaik, and R. Etienne-Cummings}, journal={Neural Computation Volume 21 Issue 6 Pages 1554-1588 Year 2009}, year={2008}, archivePrefix={arXiv}, eprint={0807.1513}, primaryClass={q-bio.NC cs.NE} }
tapson2008a
arxiv-4282
0807.1523
Mean asymptotic behaviour of radix-rational sequences and dilation equations (Extended version)
<|reference_start|>Mean asymptotic behaviour of radix-rational sequences and dilation equations (Extended version): The generating series of a radix-rational sequence is a rational formal power series from formal language theory viewed through a fixed radix numeration system. For each radix-rational sequence with complex values we provide an asymptotic expansion for the sequence of its Ces\`aro means. The precision of the asymptotic expansion depends on the joint spectral radius of the linear representation of the sequence; the coefficients are obtained through some dilation equations. The proofs are based on elementary linear algebra.<|reference_end|>
arxiv
@article{dumas2008mean, title={Mean asymptotic behaviour of radix-rational sequences and dilation equations (Extended version)}, author={Philippe Dumas (INRIA Rocquencourt)}, journal={arXiv preprint arXiv:0807.1523}, year={2008}, archivePrefix={arXiv}, eprint={0807.1523}, primaryClass={cs.DM math.CO} }
dumas2008mean
arxiv-4283
0807.1524
Inductive and Coinductive Components of Corecursive Functions in Coq
<|reference_start|>Inductive and Coinductive Components of Corecursive Functions in Coq: In Constructive Type Theory, recursive and corecursive definitions are subject to syntactic restrictions which guarantee termination for recursive functions and productivity for corecursive functions. However, many terminating and productive functions do not pass the syntactic tests. Bove proposed in her thesis an elegant reformulation of the method of accessibility predicates that widens the range of terminative recursive functions formalisable in Constructive Type Theory. In this paper, we pursue the same goal for productive corecursive functions. Notably, our method of formalisation of coinductive definitions of productive functions in Coq requires not only the use of ad-hoc predicates, but also a systematic algorithm that separates the inductive and coinductive parts of functions.<|reference_end|>
arxiv
@article{bertot2008inductive, title={Inductive and Coinductive Components of Corecursive Functions in Coq}, author={Yves Bertot (INRIA Sophia Antipolis), Ekaterina Komendantskaya (INRIA Sophia Antipolis)}, journal={arXiv preprint arXiv:0807.1524}, year={2008}, archivePrefix={arXiv}, eprint={0807.1524}, primaryClass={cs.LO} }
bertot2008inductive
arxiv-4284
0807.1543
On the Capacity of MIMO Interference Channels
<|reference_start|>On the Capacity of MIMO Interference Channels: The capacity region of a multiple-input-multiple-output interference channel (MIMO IC) where the channel matrices are square and invertible is studied. The capacity region for strong interference is established where the definition of strong interference parallels that of scalar channels. Moreover, the sum-rate capacity for Z interference, noisy interference, and mixed interference is established. These results generalize known results for the scalar Gaussian IC.<|reference_end|>
arxiv
@article{shang2008on, title={On the Capacity of MIMO Interference Channels}, author={Xiaohu Shang, Biao Chen, Gerhard Kramer, H. Vincent Poor}, journal={arXiv preprint arXiv:0807.1543}, year={2008}, archivePrefix={arXiv}, eprint={0807.1543}, primaryClass={cs.IT math.IT} }
shang2008on
arxiv-4285
0807.1550
Discernment of Hubs and Clusters in Socioeconomic Networks
<|reference_start|>Discernment of Hubs and Clusters in Socioeconomic Networks: Interest in the analysis of networks has grown rapidly in the new millennium. Consequently, we promote renewed attention to a certain methodological approach introduced in 1974. Over the succeeding decade, this two-stage--double-standardization and hierarchical clustering (single-linkage-like)--procedure was applied to a wide variety of weighted, directed networks of a socioeconomic nature, frequently revealing the presence of ``hubs''. These were, typically--in the numerous instances studied of migration flows between geographic subdivisions within nations--``cosmopolitan/non-provincial'' areas, a prototypical example being the French capital, Paris. Such locations emit and absorb people broadly across their respective nations. Additionally, the two-stage procedure--which ``might very well be the most successful application of cluster analysis'' (R. C. Dubes, 1985)--detected many (physically or socially) isolated, functional groups (regions) of areas, such as the southern islands, Shikoku and Kyushu, of Japan, the Italian islands of Sardinia and Sicily, and the New England region of the United States. Further, we discuss a (complementary) approach developed in 1976, in which the max-flow/min-cut theorem was applied to raw/non-standardized (interindustry, as well as migration) flows.<|reference_end|>
arxiv
@article{slater2008discernment, title={Discernment of Hubs and Clusters in Socioeconomic Networks}, author={Paul B. Slater}, journal={arXiv preprint arXiv:0807.1550}, year={2008}, archivePrefix={arXiv}, eprint={0807.1550}, primaryClass={physics.soc-ph cs.SI physics.data-an stat.AP} }
slater2008discernment
arxiv-4286
0807.1560
Scientific Paper Summarization Using Citation Summary Networks
<|reference_start|>Scientific Paper Summarization Using Citation Summary Networks: Quickly moving to a new area of research is painful for researchers due to the vast amount of scientific literature in each field of study. One possible way to overcome this problem is to summarize a scientific topic. In this paper, we propose a model of summarizing a single article, which can be further used to summarize an entire topic. Our model is based on analyzing others' viewpoint of the target article's contributions and the study of its citation summary network using a clustering approach.<|reference_end|>
arxiv
@article{qazvinian2008scientific, title={Scientific Paper Summarization Using Citation Summary Networks}, author={Vahed Qazvinian and Dragomir R. Radev}, journal={arXiv preprint arXiv:0807.1560}, year={2008}, archivePrefix={arXiv}, eprint={0807.1560}, primaryClass={cs.IR cs.CL} }
qazvinian2008scientific
arxiv-4287
0807.1603
A Radar for the Internet
<|reference_start|>A Radar for the Internet: In contrast with most internet topology measurement research, our concern here is not to obtain a map as complete and precise as possible of the whole internet. Instead, we claim that each machine's view of this topology, which we call ego-centered view, is an object worth of study in itself. We design and implement an ego-centered measurement tool, and perform radar-like measurements consisting of repeated measurements of such views of the internet topology. We conduct long-term (several weeks) and high-speed (one round every few minutes) measurements of this kind from more than one hundred monitors, and we provide the obtained data. We also show that these data may be used to detect events in the dynamics of internet topology.<|reference_end|>
arxiv
@article{latapy2008a, title={A Radar for the Internet}, author={Matthieu Latapy, Clemence Magnien and Frederic Ouedraogo}, journal={Proceedings of ADN'08: 1st International Workshop on Analysis of Dynamic Networks, in conjonction with IEEE ICDM 2008}, year={2008}, doi={10.1109/ICDMW.2008.121}, archivePrefix={arXiv}, eprint={0807.1603}, primaryClass={cs.NI} }
latapy2008a
arxiv-4288
0807.1667
Quasi-Mandelbrot sets for perturbed complex analytic maps: visual patterns
<|reference_start|>Quasi-Mandelbrot sets for perturbed complex analytic maps: visual patterns: We consider perturbations of the complex quadratic map $ z \to z^2 +c$ and corresponding changes in their quasi-Mandelbrot sets. Depending on particular perturbation, visual forms of quasi-Mandelbrot set changes either sharply (when the perturbation reaches some critical value) or continuously. In the latter case we have a smooth transition from the classical form of the set to some forms, constructed from mostly linear structures, as it is typical for two-dimensional real number dynamics. Two examples of continuous evolution of the quasi-Mandelbrot set are described.<|reference_end|>
arxiv
@article{toporensky2008quasi-mandelbrot, title={Quasi-Mandelbrot sets for perturbed complex analytic maps: visual patterns}, author={A. V. Toporensky}, journal={arXiv preprint arXiv:0807.1667}, year={2008}, archivePrefix={arXiv}, eprint={0807.1667}, primaryClass={cs.GR} }
toporensky2008quasi-mandelbrot
arxiv-4289
0807.1669
Coinductive Formal Reasoning in Exact Real Arithmetic
<|reference_start|>Coinductive Formal Reasoning in Exact Real Arithmetic: In this article we present a method for formally proving the correctness of the lazy algorithms for computing homographic and quadratic transformations -- of which field operations are special cases-- on a representation of real numbers by coinductive streams. The algorithms work on coinductive stream of M\"{o}bius maps and form the basis of the Edalat--Potts exact real arithmetic. We use the machinery of the Coq proof assistant for the coinductive types to present the formalisation. The formalised algorithms are only partially productive, i.e., they do not output provably infinite streams for all possible inputs. We show how to deal with this partiality in the presence of syntactic restrictions posed by the constructive type theory of Coq. Furthermore we show that the type theoretic techniques that we develop are compatible with the semantics of the algorithms as continuous maps on real numbers. The resulting Coq formalisation is available for public download.<|reference_end|>
arxiv
@article{niqui2008coinductive, title={Coinductive Formal Reasoning in Exact Real Arithmetic}, author={Milad Niqui}, journal={Logical Methods in Computer Science, Volume 4, Issue 3 (September 10, 2008) lmcs:953}, year={2008}, doi={10.2168/LMCS-4(3:6)2008}, archivePrefix={arXiv}, eprint={0807.1669}, primaryClass={cs.LO} }
niqui2008coinductive
arxiv-4290
0807.1720
Resource Allocation Strategies for In-Network Stream Processing
<|reference_start|>Resource Allocation Strategies for In-Network Stream Processing: In this paper we consider the operator mapping problem for in-network stream processing applications. In-network stream processing consists in applying a tree of operators in steady-state to multiple data objects that are continually updated at various locations on a network. Examples of in-network stream processing include the processing of data in a sensor network, or of continuous queries on distributed relational databases. We study the operator mapping problem in a ``constructive'' scenario, i.e., a scenario in which one builds a platform dedicated to the application buy purchasing processing servers with various costs and capabilities. The objective is to minimize the cost of the platform while ensuring that the application achieves a minimum steady-state throughput. The first contribution of this paper is the formalization of a set of relevant operator-placement problems as linear programs, and a proof that even simple versions of the problem are NP-complete. Our second contribution is the design of several polynomial time heuristics, which are evaluated via extensive simulations and compared to theoretical bounds for optimal solutions.<|reference_end|>
arxiv
@article{benoit2008resource, title={Resource Allocation Strategies for In-Network Stream Processing}, author={Anne Benoit (LIP), Henri Casanova, Veronika Rehn-Sonigo (LIP), Yves Robert (LIP)}, journal={arXiv preprint arXiv:0807.1720}, year={2008}, archivePrefix={arXiv}, eprint={0807.1720}, primaryClass={cs.DC} }
benoit2008resource
arxiv-4291
0807.1734
Faster Sequential Search with a Two-Pass Dynamic-Time-Warping Lower Bound
<|reference_start|>Faster Sequential Search with a Two-Pass Dynamic-Time-Warping Lower Bound: The Dynamic Time Warping (DTW) is a popular similarity measure between time series. The DTW fails to satisfy the triangle inequality and its computation requires quadratic time. Hence, to find closest neighbors quickly, we use bounding techniques. We can avoid most DTW computations with an inexpensive lower bound (LB_Keogh). We compare LB_Keogh with a tighter lower bound (LB_Improved). We find that LB_Improved-based search is faster for sequential search. As an example, our approach is 3 times faster over random-walk and shape time series. We also review some of the mathematical properties of the DTW. We derive a tight triangle inequality for the DTW. We show that the DTW becomes the l_1 distance when time series are separated by a constant.<|reference_end|>
arxiv
@article{lemire2008faster, title={Faster Sequential Search with a Two-Pass Dynamic-Time-Warping Lower Bound}, author={Daniel Lemire}, journal={arXiv preprint arXiv:0807.1734}, year={2008}, archivePrefix={arXiv}, eprint={0807.1734}, primaryClass={cs.DB} }
lemire2008faster
arxiv-4292
0807.1753
The cost of probabilistic gathering in oblivious robot networks
<|reference_start|>The cost of probabilistic gathering in oblivious robot networks: In this paper we address the complexity issues of two agreement problems in oblivious robot networks namely gathering and scattering. These abstractions are fundamental coordination problems in cooperative mobile robotics. Moreover, their oblivious characteristics makes them appealing for self-stabilization since they are self-stabilizing with no extra-cost. Given a set of robots with arbitrary initial location and no initial agreement on a global coordinate system, gathering requires that all robots reach the exact same but not predetermined location while scattering aims at scatter robots such that no two robots share the same location. Both deterministic gathering and scattering have been proved impossible under arbitrary schedulers therefore probabilistic solutions have been recently proposed. The contribution of this paper is twofold. First, we propose a detailed complexity analysis of the existent probabilistic gathering algorithms in both fault-free and fault-prone environments. We consider both crash and byzantine-prone environments. Moreover, using Markov chains tools and additional assumptions on the environment we prove that the gathering convergence time can be reduced from O(n^2) (the best known tight bound) to O(nln(n)). Additionally, we prove that in crash-prone environments gathering is achieved in O(nln(n)+2f). Second, using the same technique we prove that the best known scattering strategy converges in fault-free systems is O(n) (which is one to optimal) while in crash-prone environments it needs O(n-f). Finally, we conclude the paper with a discussion related to different strategies to gather oblivious robots.<|reference_end|>
arxiv
@article{clement2008the, title={The cost of probabilistic gathering in oblivious robot networks}, author={Julien Clement (1), Xavier Defago (2), Maria Gradinariu Potop-Butucaru (2) and Stephane Messika (1) ((1) LRI, Universite Paris 11, France (2) JAIST, Japon, (3) LIP6, Universite Paris 6)}, journal={arXiv preprint arXiv:0807.1753}, year={2008}, archivePrefix={arXiv}, eprint={0807.1753}, primaryClass={cs.DC cs.CC} }
clement2008the
arxiv-4293
0807.1765
Archer: A Community Distributed Computing Infrastructure for Computer Architecture Research and Education
<|reference_start|>Archer: A Community Distributed Computing Infrastructure for Computer Architecture Research and Education: This paper introduces Archer, a community-based computing resource for computer architecture research and education. The Archer infrastructure integrates virtualization and batch scheduling middleware to deliver high-throughput computing resources aggregated from resources distributed across wide-area networks and owned by different participating entities in a seamless manner. The paper discusses the motivations leading to the design of Archer, describes its core middleware components, and presents an analysis of the functionality and performance of a prototype wide-area deployment running a representative computer architecture simulation workload.<|reference_end|>
arxiv
@article{figueiredo2008archer:, title={Archer: A Community Distributed Computing Infrastructure for Computer Architecture Research and Education}, author={Renato Figueiredo, P. Oscar Boykin, Jose A. B. Fortes, Tao Li, Jie-Kwon Peir, David Wolinsky, Lizy John, David Kaeli, David Lilja, Sally McKee, Gokhan Memik, Alain Roy, Gary Tyson}, journal={arXiv preprint arXiv:0807.1765}, year={2008}, doi={10.1007/978-3-642-03354-4_7}, archivePrefix={arXiv}, eprint={0807.1765}, primaryClass={cs.AR} }
figueiredo2008archer:
arxiv-4294
0807.1773
Spatial Interference Cancellation for Multi-Antenna Mobile Ad Hoc Networks
<|reference_start|>Spatial Interference Cancellation for Multi-Antenna Mobile Ad Hoc Networks: Interference between nodes is a critical impairment in mobile ad hoc networks (MANETs). This paper studies the role of multiple antennas in mitigating such interference. Specifically, a network is studied in which receivers apply zero-forcing beamforming to cancel the strongest interferers. Assuming a network with Poisson distributed transmitters and independent Rayleigh fading channels, the transmission capacity is derived, which gives the maximum number of successful transmissions per unit area. Mathematical tools from stochastic geometry are applied to obtain the asymptotic transmission capacity scaling and characterize the impact of inaccurate channel state information (CSI). It is shown that, if each node cancels L interferers, the transmission capacity decreases as the outage probability to the power of 1/(L+1) as the outage probability vanishes. For fixed outage probability, as L grows, the transmission capacity increases as L to the power of (1-2/alpha) where alpha is the path-loss exponent. Moreover, CSI inaccuracy is shown to have no effect on the transmission capacity scaling as the outage probability vanishes, provided that the CSI training sequence has an appropriate length, which we derived. Numerical results suggest that canceling merely one interferer by each node increases the transmission capacity by an order of magnitude or more, even when the CSI is imperfect.<|reference_end|>
arxiv
@article{huang2008spatial, title={Spatial Interference Cancellation for Multi-Antenna Mobile Ad Hoc Networks}, author={Kaibin Huang, Jeffrey G. Andrews, Dongning Guo, Robert W. Heath, Jr., and Randall A. Berry}, journal={arXiv preprint arXiv:0807.1773}, year={2008}, archivePrefix={arXiv}, eprint={0807.1773}, primaryClass={cs.IT math.IT} }
huang2008spatial
arxiv-4295
0807.1775
Towards Black-Box Accountable Authority IBE with Short Ciphertexts and Private Keys
<|reference_start|>Towards Black-Box Accountable Authority IBE with Short Ciphertexts and Private Keys: At Crypto'07, Goyal introduced the concept of Accountable Authority Identity-Based Encryption as a convenient tool to reduce the amount of trust in authorities in Identity-Based Encryption. In this model, if the Private Key Generator (PKG) maliciously re-distributes users' decryption keys, it runs the risk of being caught and prosecuted. Goyal proposed two constructions: the first one is efficient but can only trace well-formed decryption keys to their source; the second one allows tracing obfuscated decryption boxes in a model (called weak black-box model) where cheating authorities have no decryption oracle. The latter scheme is unfortunately far less efficient in terms of decryption cost and ciphertext size. In this work, we propose a new construction that combines the efficiency of Goyal's first proposal with a very simple weak black-box tracing mechanism. Our scheme is described in the selective-ID model but readily extends to meet all security properties in the adaptive-ID sense, which is not known to be true for prior black-box schemes.<|reference_end|>
arxiv
@article{libert2008towards, title={Towards Black-Box Accountable Authority IBE with Short Ciphertexts and Private Keys}, author={Beno^it Libert and Damien Vergnaud}, journal={arXiv preprint arXiv:0807.1775}, year={2008}, archivePrefix={arXiv}, eprint={0807.1775}, primaryClass={cs.CR} }
libert2008towards
arxiv-4296
0807.1891
Online Scheduling to Minimize the Maximum Delay Factor
<|reference_start|>Online Scheduling to Minimize the Maximum Delay Factor: In this paper two scheduling models are addressed. First is the standard model (unicast) where requests (or jobs) are independent. The other is the broadcast model where broadcasting a page can satisfy multiple outstanding requests for that page. We consider online scheduling of requests when they have deadlines. Unlike previous models, which mainly consider the objective of maximizing throughput while respecting deadlines, here we focus on scheduling all the given requests with the goal of minimizing the maximum {\em delay factor}.We prove strong lower bounds on the achievable competitive ratios for delay factor scheduling even with unit-time requests.For the unicast model we give algorithms that are $(1 + \eps)$-speed $O({1 \over \eps})$-competitive in both the single machine and multiple machine settings. In the broadcast model we give an algorithm for similar-sized pages that is $(2+ \eps)$-speed $O({1 \over \eps^2})$-competitive. For arbitrary page sizes we give an algorithm that is $(4+\eps)$-speed $O({1 \over \eps^2})$-competitive.<|reference_end|>
arxiv
@article{chekuri2008online, title={Online Scheduling to Minimize the Maximum Delay Factor}, author={Chandra Chekuri and Benjamin Moseley}, journal={arXiv preprint arXiv:0807.1891}, year={2008}, archivePrefix={arXiv}, eprint={0807.1891}, primaryClass={cs.DS} }
chekuri2008online
arxiv-4297
0807.1906
Extension of Inagaki General Weighted Operators and A New Fusion Rule Class of Proportional Redistribution of Intersection Masses
<|reference_start|>Extension of Inagaki General Weighted Operators and A New Fusion Rule Class of Proportional Redistribution of Intersection Masses: In this paper we extend Inagaki Weighted Operators fusion rule (WO) in information fusion by doing redistribution of not only the conflicting mass, but also of masses of non-empty intersections, that we call Double Weighted Operators (DWO). Then we propose a new fusion rule Class of Proportional Redistribution of Intersection Masses (CPRIM), which generates many interesting particular fusion rules in information fusion. Both formulas are presented for any number of sources of information. An application and comparison with other fusion rules are given in the last section.<|reference_end|>
arxiv
@article{smarandache2008extension, title={Extension of Inagaki General Weighted Operators and A New Fusion Rule Class of Proportional Redistribution of Intersection Masses}, author={Florentin Smarandache}, journal={International Journal of Artificial Intelligence, Vol. 3, No. A09, 79-85, 2009}, year={2008}, archivePrefix={arXiv}, eprint={0807.1906}, primaryClass={cs.AI} }
smarandache2008extension
arxiv-4298
0807.1919
The Johnson-Lindenstrauss lemma almost characterizes Hilbert space, but not quite
<|reference_start|>The Johnson-Lindenstrauss lemma almost characterizes Hilbert space, but not quite: Let $X$ be a normed space that satisfies the Johnson-Lindenstrauss lemma (J-L lemma, in short) in the sense that for any integer $n$ and any $x_1,\ldots,x_n\in X$ there exists a linear mapping $L:X\to F$, where $F\subseteq X$ is a linear subspace of dimension $O(\log n)$, such that $\|x_i-x_j\|\le\|L(x_i)-L(x_j)\|\le O(1)\cdot\|x_i-x_j\|$ for all $i,j\in \{1,\ldots, n\}$. We show that this implies that $X$ is almost Euclidean in the following sense: Every $n$-dimensional subspace of $X$ embeds into Hilbert space with distortion $2^{2^{O(\log^*n)}}$. On the other hand, we show that there exists a normed space $Y$ which satisfies the J-L lemma, but for every $n$ there exists an $n$-dimensional subspace $E_n\subseteq Y$ whose Euclidean distortion is at least $2^{\Omega(\alpha(n))}$, where $\alpha$ is the inverse Ackermann function.<|reference_end|>
arxiv
@article{johnson2008the, title={The Johnson-Lindenstrauss lemma almost characterizes Hilbert space, but not quite}, author={William B. Johnson and Assaf Naor}, journal={arXiv preprint arXiv:0807.1919}, year={2008}, archivePrefix={arXiv}, eprint={0807.1919}, primaryClass={math.FA cs.CG math.MG} }
johnson2008the
arxiv-4299
0807.1949
Virtual Transmission Method, A New Distributed Algorithm to Solve Sparse Linear System
<|reference_start|>Virtual Transmission Method, A New Distributed Algorithm to Solve Sparse Linear System: In this paper, we propose a new parallel algorithm which could work naturally on the parallel computer with arbitrary number of processors. This algorithm is named Virtual Transmission Method (VTM). Its physical backgroud is the lossless transmission line and microwave network. The basic idea of VTM is to insert lossless transmission lines into the sparse linear system to achieve distributed computing. VTM is proved to be convergent to solve SPD linear system. Preconditioning method and performance model are presented. Numerical experiments show that VTM is efficient, accurate and stable. Accompanied with VTM, we bring in a new technique to partition the symmetric linear system, which is named Generalized Node & Branch Tearing (GNBT). It is based on Kirchhoff's Current Law from circuit theory. We proved that GNBT is feasible to partition any SPD linear system.<|reference_end|>
arxiv
@article{wei2008virtual, title={Virtual Transmission Method, A New Distributed Algorithm to Solve Sparse Linear System}, author={Fei Wei, Huazhong Yang}, journal={arXiv preprint arXiv:0807.1949}, year={2008}, doi={10.1109/NCM.2008.160}, archivePrefix={arXiv}, eprint={0807.1949}, primaryClass={math.NA cs.DC} }
wei2008virtual
arxiv-4300
0807.1997
Multi-Instance Learning by Treating Instances As Non-IID Samples
<|reference_start|>Multi-Instance Learning by Treating Instances As Non-IID Samples: Multi-instance learning attempts to learn from a training set consisting of labeled bags each containing many unlabeled instances. Previous studies typically treat the instances in the bags as independently and identically distributed. However, the instances in a bag are rarely independent, and therefore a better performance can be expected if the instances are treated in an non-i.i.d. way that exploits the relations among instances. In this paper, we propose a simple yet effective multi-instance learning method, which regards each bag as a graph and uses a specific kernel to distinguish the graphs by considering the features of the nodes as well as the features of the edges that convey some relations among instances. The effectiveness of the proposed method is validated by experiments.<|reference_end|>
arxiv
@article{zhou2008multi-instance, title={Multi-Instance Learning by Treating Instances As Non-I.I.D. Samples}, author={Zhi-Hua Zhou, Yu-Yin Sun, Yu-Feng Li}, journal={arXiv preprint arXiv:0807.1997}, year={2008}, archivePrefix={arXiv}, eprint={0807.1997}, primaryClass={cs.LG cs.AI} }
zhou2008multi-instance