@article {12442,
title = {Special Issue on Video Analysis on Resource-Limited Systems},
journal = {IEEE Transactions on Circuits and Systems for Video Technology},
volume = {21},
year = {2011},
month = {2011/10//},
pages = {1349 - 1352},
abstract = {The 17 papers in this special issue focus on resource-limited systems.},
keywords = {computational complexity, Image Enhancement, Special issues and sections, Video compression},
isbn = {1051-8215},
doi = {10.1109/TCSVT.2011.2165795},
author = {Chellapa, Rama and Cavallaro, A. and Wu,Y. and Shan, C. and Fu, Y. and Pulli, K.}
}
@conference {17633,
title = {New Constructive Aspects of the Lovasz Local Lemma},
booktitle = {2010 51st Annual IEEE Symposium on Foundations of Computer Science (FOCS)},
year = {2010},
month = {2010/10/23/26},
pages = {397 - 406},
publisher = {IEEE},
organization = {IEEE},
abstract = {The Lov{\textquoteright}{a}sz Local Lemma (LLL) is a powerful tool that gives sufficient conditions for avoiding all of a given set of {\textquoteleft}{\textquoteleft}bad{\textquoteright}{\textquoteright} events, with positive probability. A series of results have provided algorithms to efficiently construct structures whose existence is non-constructively guaranteed by the LLL, culminating in the recent breakthrough of Moser \& Tardos. We show that the output distribution of the Moser-Tardos algorithm well-approximates the emph{conditional LLL-distribution} {\textendash} the distribution obtained by conditioning on all bad events being avoided. We show how a known bound on the probabilities of events in this distribution can be used for further probabilistic analysis and give new constructive and non-constructive results. We also show that when an LLL application provides a small amount of slack, the number of resamplings of the Moser-Tardos algorithm is nearly linear in the number of underlying independent variables (not events!), and can thus be used to give efficient constructions in cases where the underlying proof applies the LLL to super-polynomially many events. Even in cases where finding a bad event that holds is computationally hard, we show that applying the algorithm to avoid a polynomial-sized {\textquoteleft}{\textquoteleft}core{\textquoteright}{\textquoteright} subset of bad events leads to a desired outcome with high probability. We demonstrate this idea on several applications. We give the first constant-factor approximation algorithm for the Santa Claus problem by making an LLL-based proof of Feige constructive. We provide Monte Carlo algorithms for acyclic edge coloring, non-repetitive graph colorings, and Ramsey-type graphs. In all these applications the algorithm falls directly out of the non-constructive LLL-based proof. Our algorithms are very simple, often provide better bounds than previous algorithms, and are in several cases the first efficient algorithms known. As a second type of application we consider settings beyond the critical dependency threshold of the LLL: - - avoiding all bad events is impossible in these cases. As the first (even non-constructive) result of this kind, we show that by sampling from the LLL-distribution of a selected smaller core, we can avoid a fraction of bad events that is higher than the expectation. MAX $k$-SAT is an example of this.},
keywords = {acyclic edge coloring, Algorithm design and analysis, Approximation algorithms, Approximation methods, computational complexity, Computer science, constant factor approximation algorithm, graph colouring, Linearity, Lovasz Local Lemma, MAX k-SAT, Monte Carlo Algorithm, Monte Carlo methods, Moser-Tardos algorithm, nonrepetitive graph coloring, output distribution, polynomial sized core subset, Polynomials, Probabilistc Method, probabilistic analysis, probabilistic logic, probability, Ramsey type graph, Sampling methods, Santa Claus Problem},
isbn = {978-1-4244-8525-3},
doi = {10.1109/FOCS.2010.45},
author = {Haeupler,B. and Saha,B. and Srinivasan, Aravind}
}
@conference {15495,
title = {Towards Dynamic Adaptive Automated Test Generation for Graphical User Interfaces},
booktitle = {Software Testing, Verification and Validation Workshops, 2009. ICSTW {\textquoteright}09. International Conference on},
year = {2009},
month = {2009/04//},
pages = {263 - 266},
abstract = {Graphical user interfaces (GUIs) present an enormous number of potential event sequences to users. During testing it is necessary to cover this space, however the complexity of modern GUIs has made this an increasingly difficult task. Our past work has demonstrated that it is important to incorporate "context{\textquotedblright} into GUI test cases, in terms of event combinations, event sequence length, and by considering all possible starting and ending positions for each event. Despite the use of our most refined modeling techniques, many of the generated test cases remain unexecutable. In this paper, we posit that due to the dynamic state-based nature of GUIs, it is important to incorporate feedback from the execution of tests into test case generation algorithms. We propose the use of an evolutionary algorithm to generate test suites with fewer unexecutable test cases and higher event interaction coverage.},
keywords = {adaptive automated test generation, computational complexity, event sequence length, evolutionary algorithm, evolutionary computation, graphical user interface, Graphical user interfaces, GUI test case, program testing},
doi = {10.1109/ICSTW.2009.26},
author = {Xun Yuan and Cohen,M. B and Memon, Atif M.}
}
@conference {13369,
title = {Flow Algorithms for Parallel Query Optimization},
booktitle = {IEEE 24th International Conference on Data Engineering, 2008. ICDE 2008},
year = {2008},
month = {2008/04/07/12},
pages = {754 - 763},
publisher = {IEEE},
organization = {IEEE},
abstract = {We address the problem of minimizing the response time of a multi-way join query using pipelined (inter-operator) parallelism, in a parallel or a distributed environment. We observe that in order to fully exploit the parallelism in the system, we must consider a new class of ";interleaving"; plans, where multiple query plans are used simultaneously to minimize the response time of a query (or to maximize the tuple-throughput of the system). We cast the query planning problem in this environment as a ";flow maximization problem";, and present polynomial-time algorithms that (statically) find the optimal set of plans to use for a given query, for a large class of multi-way join queries. Our proposed algorithms also naturally extend to query optimization over web services. Finally we present an extensive experimental evaluation that demonstrates both the need to consider such plans in parallel query processing and the effectiveness of our algorithms.},
keywords = {Casting, computational complexity, Cost function, Databases, Delay, distributed environment, Educational institutions, flow maximization algorithm, Interleaved codes, interoperator parallelism, minimisation, multiway join query response time minimization problem, parallel database, Parallel databases, parallel query optimization, Partitioning algorithms, pipeline processing, pipelined parallelism, polynomial-time algorithm, query planning problem, Query processing, Web service, Web services},
isbn = {978-1-4244-1836-7},
doi = {10.1109/ICDE.2008.4497484},
author = {Deshpande, Amol and Hellerstein,L.}
}
@conference {14563,
title = {Algorithmic graph minor theory: Decomposition, approximation, and coloring},
booktitle = {Foundations of Computer Science, 2005. FOCS 2005. 46th Annual IEEE Symposium on},
year = {2005},
month = {2005///},
pages = {637 - 646},
abstract = {At the core of the seminal graph minor theory of Robertson and Seymour is a powerful structural theorem capturing the structure of graphs excluding a fixed minor. This result is used throughout graph theory and graph algorithms, but is existential. We develop a polynomial-time algorithm using topological graph theory to decompose a graph into the structure guaranteed by the theorem: a clique-sum of pieces almost-embeddable into bounded-genus surfaces. This result has many applications. In particular we show applications to developing many approximation algorithms, including a 2-approximation to graph coloring, constant-factor approximations to treewidth and the largest grid minor, combinatorial polylogarithmic approximation to half-integral multicommodity flow, subexponential fixed-parameter algorithms, and PTASs for many minimization and maximization problems, on graphs excluding a fixed minor.},
keywords = {algorithmic graph minor theory, approximation algorithm, combinatorial polylogarithmic approximation, computational complexity, constant-factor approximation, graph algorithm, graph coloring, graph colouring, half-integral multicommodity flow, largest grid minor, maximization problem, minimization problem, polynomial-time algorithm, subexponential fixed-parameter algorithm, topological graph theory, treewidth},
doi = {10.1109/SFCS.2005.14},
author = {Demaine,E. D and Hajiaghayi, Mohammad T. and Kawarabayashi,K.}
}
@conference {12388,
title = {CASPER: an integrated energy-driven approach for task graph scheduling on distributed embedded systems},
booktitle = {Application-Specific Systems, Architecture Processors, 2005. ASAP 2005. 16th IEEE International Conference on},
year = {2005},
month = {2005/07//},
pages = {191 - 197},
abstract = {For multiprocessor embedded systems, the dynamic voltage scaling (DVS) technique can be applied to scheduled applications for energy reduction. DVS utilizes slack in the schedule to slow down processes and save energy. Therefore, it is generally believed that the maximal energy saving is achieved on a schedule with the minimum makespan (maximal slack). Most current approaches treat task assignment, scheduling, and DVS separately. In this paper, we present a framework called CASPER (combined assignment, scheduling, and power-management) that challenges this common belief by integrating task scheduling and DVS under a single iterative optimization loop via genetic algorithm. We have conducted extensive experiments to validate the energy efficiency of CASPER. For homogeneous multiprocessor systems (in which all processors are of the same type), we consider a recently proposed slack distribution algorithm (PDP-SPM) by S. Hua and G. Qu (2005): applying PDP-SPM on the schedule with the minimal makespan gives an average of 53.8\% energy saving; CASPER finds schedules with slightly larger makespan but a 57.3\% energy saving, a 7.8\% improvement. For heterogeneous systems, we consider the power variation DVS (PV-DVS) algorithm by Schmitz et al. (2004), CASPER improves its energy efficiency by 8.2\%. Finally, our results also show that the proposed single loop CASPER framework saves 23.3\% more energy over GMA+EE-GLSA by Schmitz et al. (2002), the only other known integrated approach with a nested loop that combines scheduling and power management in the inner loop but leaves assignment in the outer loop.},
keywords = {CASPER, combined-assignment-scheduling-and-power-management, computational complexity, distributed system, dynamic voltage scaling, embedded systems, energy reduction, genetic algorithm, Genetic algorithms, graph theory, homogeneous multiprocessor system, maximal energy saving, Multiprocessing systems, multiprocessor embedded system, optimization loop, power management, power variation, Processor scheduling, slack distribution algorithm, task assignment, task graph scheduling},
doi = {10.1109/ASAP.2005.23},
author = {Kianzad,V. and Bhattacharyya, Shuvra S. and Gang Qu}
}
@conference {16944,
title = {A Rank-by-Feature Framework for Unsupervised Multidimensional Data Exploration Using Low Dimensional Projections},
booktitle = {IEEE Symposium on Information Visualization, 2004. INFOVIS 2004},
year = {2004},
month = {2004///},
pages = {65 - 72},
publisher = {IEEE},
organization = {IEEE},
abstract = {Exploratory analysis of multidimensional data sets is challenging because of the difficulty in comprehending more than three dimensions. Two fundamental statistical principles for the exploratory analysis are (1) to examine each dimension first and then find relationships among dimensions, and (2) to try graphical displays first and then find numerical summaries (D.S. Moore, (1999). We implement these principles in a novel conceptual framework called the rank-by-feature framework. In the framework, users can choose a ranking criterion interesting to them and sort 1D or 2D axis-parallel projections according to the criterion. We introduce the rank-by-feature prism that is a color-coded lower-triangular matrix that guides users to desired features. Statistical graphs (histogram, boxplot, and scatterplot) and information visualization techniques (overview, coordination, and dynamic query) are combined to help users effectively traverse 1D and 2D axis-parallel projections, and finally to help them interactively find interesting features},
keywords = {axis-parallel projections, boxplot, color-coded lower-triangular matrix, computational complexity, computational geometry, Computer displays, Computer science, Computer vision, Data analysis, data mining, data visualisation, Data visualization, Displays, dynamic query, Educational institutions, exploratory data analysis, feature detection, feature detection/selection, Feature extraction, feature selection, graph theory, graphical displays, histogram, Information Visualization, interactive systems, Laboratories, Multidimensional systems, Principal component analysis, rank-by-feature prism, scatterplot, statistical analysis, statistical graphics, statistical graphs, unsupervised multidimensional data exploration, very large databases},
isbn = {0-7803-8779-3},
doi = {10.1109/INFVIS.2004.3},
author = {Seo,J. and Shneiderman, Ben}
}
@conference {17583,
title = {Dependent rounding in bipartite graphs},
booktitle = {The 43rd Annual IEEE Symposium on Foundations of Computer Science, 2002. Proceedings},
year = {2002},
month = {2002///},
pages = {323 - 332},
publisher = {IEEE},
organization = {IEEE},
abstract = {We combine the pipage rounding technique of Ageev \& Sviridenko with a recent rounding method developed by Srinivasan (2001), to develop a new randomized rounding approach for fractional vectors defined on the edge-sets of bipartite graphs. We show various ways of combining this technique with other ideas, leading to the following applications: richer random-graph models for graphs with a given degree-sequence; improved approximation algorithms for: (i) throughput-maximization in broadcast scheduling, (ii) delay-minimization in broadcast scheduling, and (iii) capacitated vertex cover; fair scheduling of jobs on unrelated parallel machines. A useful feature of our method is that it lets us prove certain (probabilistic) per-user fairness properties.},
keywords = {Application software, Approximation algorithms, bipartite graph, bipartite graphs, broadcast channels, broadcast scheduling, Broadcasting, capacitated vertex cover, Character generation, computational complexity, Computer science, Delay, edge-sets, Educational institutions, fair scheduling, fractional vectors, graph theory, per-user fairness properties, pipage rounding technique, Processor scheduling, Random variables, random-graph models, randomized rounding approach, rounding method, scheduling, Scheduling algorithm, telecommunication computing, unrelated parallel machines},
isbn = {0-7695-1822-2},
doi = {10.1109/SFCS.2002.1181955},
author = {Gandhi,R. and Khuller, Samir and Parthasarathy,S. and Srinivasan, Aravind}
}
@conference {18666,
title = {Building dependable distributed applications using AQUA},
year = {1999},
month = {1999///},
pages = {189 - 196},
abstract = {Building dependable distributed systems using ad hoc methods is a challenging task. Without proper support, an application programmer must face the daunting requirement of having to provide fault tolerance at the application level, in addition to dealing with the complexities of the distributed application itself. This approach requires a deep knowledge of fault tolerance on the part of the application designer, and has a high implementation cost. What is needed is a systematic approach to providing dependability to distributed applications. Proteus, part of the AQuA architecture, fills this need and provides facilities to make a standard distributed CORBA application dependable, with minimal changes to an application. Furthermore, it permits applications to specify, either directly or via the Quality Objects (QuO) infrastructure, the level of dependability they expect of a remote object, and will attempt to configure the system to achieve the requested dependability level. Our previous papers have focused on the architecture and implementation of Proteus. This paper describes how to construct dependable applications using the AQuA architecture, by describing the interface that a programmer is presented with and the graphical monitoring facilities that it provides},
keywords = {ad hoc methods, application programmer, AQuA, complexities, computational complexity, dependable distributed applications, distributed CORBA application, distributed object management, Fault tolerance, fault tolerant computing, proteus},
doi = {10.1109/HASE.1999.809494},
author = {Ren,J. and Cukier, Michel and Rubel,P. and Sanders,W. H. and Bakken,D. E. and Karr,D. A.}
}
@conference {14621,
title = {Hardness of flip-cut problems from optical mapping [DNA molecules application]},
booktitle = {Compression and Complexity of Sequences 1997. Proceedings},
year = {1997},
month = {1997/06/11/13},
pages = {275 - 284},
publisher = {IEEE},
organization = {IEEE},
abstract = {Optical mapping is a new technology for constructing restriction maps. Associated computational problems include aligning multiple partial restriction maps into a single {\textquotedblleft}consensus{\textquotedblright} restriction map, and determining the correct orientation of each molecule, which was formalized as the exclusive binary flip cut (EBFC) problem by Muthukrishnan and Parida (see Proc. of the First ACM Conference on Computational Molecular Biology (RECOMB), Santa Fe, p.209-19, 1997). Here, the authors prove that the EBFC problem, as well as a number of its variants, are NP-complete. They also identify another problem formalized as binary shift cut (BSC) problem motivated by the fact that there might be missing fragments at the beginnings and/or the ends of the molecules, and prove it to be NP-complete. Therefore, they do not have efficient, that is, polynomial time solutions unless P=NP},
keywords = {binary shift cut, Biochemistry, Bioinformatics, biological techniques, Biology, biomedical optical imaging, combinatorial mathematics, combinatorial problem, computational complexity, computational problems, DNA, DNA molecule, exclusive binary flip cut, flip-cut problem hardness, Genetic engineering, Genomics, MATHEMATICS, molecular biology, molecular biophysics, molecule orientation, multiple partial restriction maps, NP-complete problem, optical mapping, polynomial time solutions, Polynomials, sequences},
isbn = {0-8186-8132-2},
doi = {10.1109/SEQUEN.1997.666922},
author = {Dancik,V. and Hannenhalli, Sridhar and Muthukrishnan,S.}
}
@conference {17607,
title = {Improved approximations for edge-disjoint paths, unsplittable flow, and related routing problems},
booktitle = {, 38th Annual Symposium on Foundations of Computer Science, 1997. Proceedings},
year = {1997},
month = {1997/10/20/22},
pages = {416 - 425},
publisher = {IEEE},
organization = {IEEE},
abstract = {We present improved approximation algorithms for a family of problems involving edge-disjoint paths and unsplittable flow, and for some related routing problems. The central theme of all our algorithms is the underlying multi-commodity flow relaxation},
keywords = {Approximation algorithms, Bandwidth, Channel allocation, computational complexity, Computer science, edge-disjoint paths, graph theory, High speed integrated circuits, IEL, Image motion analysis, Information systems, multi-commodity flow relaxation, Multiprocessor interconnection networks, network routing, Optical fiber networks, Routing, routing problems, unsplittable flow},
isbn = {0-8186-8197-7},
doi = {10.1109/SFCS.1997.646130},
author = {Srinivasan, Aravind}
}
@conference {12799,
title = {Efficient model checking via the equational μ-calculus},
booktitle = {, Eleventh Annual IEEE Symposium on Logic in Computer Science, 1996. LICS {\textquoteright}96. Proceedings},
year = {1996},
month = {1996/07/27/30},
pages = {304 - 312},
publisher = {IEEE},
organization = {IEEE},
abstract = {This paper studies the use of an equational variant of the modal μ-calculus as a unified framework for efficient temporal logic model checking. In particular we show how an expressive temporal logic, CTL*, may be efficiently translated into the μ-calculus. Using this translation, one may then employ μ-calculus model-checking techniques, including on-the-fly procedures, BDD-based algorithms and compositional model-checking approaches, to determine if systems satisfy formulas in CTL*},
keywords = {Automata, BDD-based algorithms, Boolean functions, Calculus, compositional model-checking approaches, computational complexity, Computer science, CTL*, Data structures, Encoding, equational variant, equational μ-calculus, Equations, expressive temporal logic, Logic, Maintenance, modal μ-calculus, Model checking, on-the-fly procedures, Surges, temporal logic, temporal logic model checking, unified framework},
isbn = {0-8186-7463-6},
doi = {10.1109/LICS.1996.561358},
author = {Bhat,G. and Cleaveland, Rance}
}
@conference {12787,
title = {Efficient on-the-fly model checking for CTL},
booktitle = {, Tenth Annual IEEE Symposium on Logic in Computer Science, 1995. LICS {\textquoteright}95. Proceedings},
year = {1995},
month = {1995/06/26/29},
pages = {388 - 397},
publisher = {IEEE},
organization = {IEEE},
abstract = {This paper gives an on-the-fly algorithm for determining whether a finite-state system satisfies a formula in the temporal logic CTL. The time complexity of our algorithm matches that of the best existing {\textquotedblleft}global algorithm{\textquotedblright} for model checking in this logic, and it performs as well as the best known global algorithms for the sublogics CTL and LTL. In contrast with these approaches, however, our routine constructs the state space of the system under consideration in a need-driven fashion and will therefore perform better in practice},
keywords = {Algorithm design and analysis, Automata, computational complexity, Computer science, CTL, Encoding, finite automata, finite-state system, global algorithm, Logic, LTL, on-the-fly model checking, Performance analysis, Safety, State-space methods, sublogic, temporal logic, time complexity},
isbn = {0-8186-7050-9},
doi = {10.1109/LICS.1995.523273},
author = {Bhat,G. and Cleaveland, Rance and Grumberg,O.}
}
@conference {17660,
title = {Splitters and near-optimal derandomization},
booktitle = {, 36th Annual Symposium on Foundations of Computer Science, 1995. Proceedings},
year = {1995},
month = {1995/10/23/25},
pages = {182 - 191},
publisher = {IEEE},
organization = {IEEE},
abstract = {We present a fairly general method for finding deterministic constructions obeying what we call k-restrictions; this yields structures of size not much larger than the probabilistic bound. The structures constructed by our method include (n,k)-universal sets (a collection of binary vectors of length n such that for any subset of size k of the indices, all 2k configurations appear) and families of perfect hash functions. The near-optimal constructions of these objects imply the very efficient derandomization of algorithms in learning, of fixed-subgraph finding algorithms, and of near optimal ΣIIΣ threshold formulae. In addition, they derandomize the reduction showing the hardness of approximation of set cover. They also yield deterministic constructions for a local-coloring protocol, and for exhaustive testing of circuits},
keywords = {Boosting, Circuit testing, computational complexity, computational linguistics, Computer science, Contracts, derandomization, deterministic constructions, Educational institutions, Engineering profession, exhaustive testing, fairly general method, fixed-subgraph finding algorithms, hardness of approximation, Information systems, k-restrictions, learning, local-coloring protocol, MATHEMATICS, near-optimal constructions, near-optimal derandomization, Parallel algorithms, probabilistic bound, probability, Protocols, randomised algorithms, Set cover, splitters},
isbn = {0-8186-7183-1},
doi = {10.1109/SFCS.1995.492475},
author = {Naor,M. and Schulman,L. J and Srinivasan, Aravind}
}
@conference {17577,
title = {Computing with very weak random sources},
booktitle = {, 35th Annual Symposium on Foundations of Computer Science, 1994 Proceedings},
year = {1994},
month = {1994/11/20/22},
pages = {264 - 275},
publisher = {IEEE},
organization = {IEEE},
abstract = {For any fixed ε>0, we show how to simulate RP algorithms in time nO(log n) using the output of a δ-source with min-entropy R(ε). Such a weak random source is asked once for R(ε) bits; it outputs an R-bit string such that any string has probability at most 2-R(ε). If ε>1-1/(k+1), our BPP simulations take time nO(log(k n)) (log(k) is the logarithm iterated k times). We also give a polynomial-time BPP simulation using Chor-Goldreich sources of min-entropy RΩ(1), which is optimal. We present applications to time-space tradeoffs, expander constructions, and the hardness of approximation. Also of interest is our randomness-efficient Leftover Hash Lemma, found independently by Goldreich and Wigderson},
keywords = {Application software, BPP simulations, Chor-Goldreich sources, computational complexity, Computational modeling, Computer science, Computer simulation, cryptography, distributed algorithms, expander constructions, hardness, MATHEMATICS, min-entropy, Physics computing, Polynomials, probability, R-bit string, randomness-efficient Leftover Hash Lemma, RP algorithms simulation, Testing, time-space tradeoffs, very weak random sources},
isbn = {0-8186-6580-7},
doi = {10.1109/SFCS.1994.365688},
author = {Srinivasan, Aravind and Zuckerman,D.}
}
@conference {14579,
title = {A SIMD solution to the sequence comparison problem on the MGAP},
booktitle = {International Conference on Application Specific Array Processors, 1994. Proceedings},
year = {1994},
month = {1994/08/22/24},
pages = {336 - 345},
publisher = {IEEE},
organization = {IEEE},
abstract = {Molecular biologists frequently compare an unknown biosequence with a set of other known biosequences to find the sequence which is maximally similar, with the hope that what is true of one sequence, either physically or functionally, could be true of its analogue. Even though efficient dynamic programming algorithms exist for the problem, when the size of the database is large, the time required is quite long, even for moderate length sequences. In this paper, we present an efficient pipelined SIMD solution to the sequence alignment problem on the Micro-Grain Array Processor (MGAP), a fine-grained massively parallel array of processors with nearest-neighbor connections. The algorithm compares K sequences of length O(M) with the actual sequence of length N, in O(M+N+K) time with O(MN) processors, which is AT-optimal. The implementation on the MGAP computes at the rate of about 0.1 million comparisons per second for sequences of length 128},
keywords = {AT-optimal algorithm, Biological information theory, biology computing, biosequence comparison problem, computational complexity, Computer science, Costs, database size, Databases, DNA computing, dynamic programming, dynamic programming algorithms, fine-grained massively parallel processor array, Genetics, Heuristic algorithms, maximally similar sequence, MGAP parallel computer, Micro-Grain Array Processor, Military computing, molecular biology, molecular biophysics, Nearest neighbor searches, nearest-neighbor connections, Parallel algorithms, pipeline processing, pipelined SIMD solution, sequence alignment problem, sequences},
isbn = {0-8186-6517-3},
doi = {10.1109/ASAP.1994.331791},
author = {Borah,M. and Bajwa,R. S and Hannenhalli, Sridhar and Irwin,M. J}
}
@conference {14576,
title = {A distributed algorithm for ear decomposition},
booktitle = {, Fifth International Conference on Computing and Information, 1993. Proceedings ICCI {\textquoteright}93},
year = {1993},
month = {1993/05/27/29},
pages = {180 - 184},
publisher = {IEEE},
organization = {IEEE},
abstract = {A distributed algorithm for finding an ear decomposition of an asynchronous communication network with n nodes and m links is presented. At the completion of the algorithm either the ears are correctly labeled or the nodes are informed that there exists no ear decomposition. First we present a novel algorithm to check the existence of an ear decomposition which uses O(m) messages. We also present two other algorithms, one which is time-optimal and the other which is message-optimal to determine the actual ears and their corresponding numbers after determining the existence of an ear decomposition},
keywords = {Asynchronous communication, asynchronous communication network, Automata, Communication networks, computational complexity, Computer networks, Computer science, decomposition graph, distributed algorithm, distributed algorithms, Distributed computing, Ear, ear decomposition, graph theory, message-optimal, network decomposition, sorting, Testing, time-optimal},
isbn = {0-8186-4212-2},
doi = {10.1109/ICCI.1993.315382},
author = {Hannenhalli, Sridhar and Perumalla,K. and Chandrasekharan,N. and Sridhar,R.}
}
@article {14997,
title = {On the difficulty of Manhattan channel routing},
journal = {Information Processing Letters},
volume = {44},
year = {1992},
month = {1992/12/21/},
pages = {281 - 284},
abstract = {We show that channel routing in the Manhattan model remains difficult even when all nets are single-sided. Given a set of n single-sided nets, we consider the problem of determining the minimum number of tracks required to obtain a dogleg-free routing. In addition to showing that the decision version of the problem isNP-complete, we show that there are problems requiring at least d+Ω(n) tracks, where d is the density. This existential lower bound does not follow from any of the known lower bounds in the literature.},
keywords = {combinatorial problems, computational complexity, lower bounds, VLSI channel routing},
isbn = {0020-0190},
doi = {10.1016/0020-0190(92)90214-G},
url = {http://www.sciencedirect.com/science/article/pii/002001909290214G},
author = {Greenberg,Ronald and JaJa, Joseph F. and Krishnamurthy,Sridhar}
}
@conference {14587,
title = {Efficient algorithms for computing matching and chromatic polynomials on series-parallel graphs},
booktitle = {, Fourth International Conference on Computing and Information, 1992. Proceedings. ICCI {\textquoteright}92},
year = {1992},
month = {1992/05/28/30},
pages = {42 - 45},
publisher = {IEEE},
organization = {IEEE},
abstract = {The authors present efficient algorithms for computing the matching polynomial and chromatic polynomial of a series-parallel graph in O(n3) and O(n2) time respectively. Their algorithm for computing the matching polynomial generalizes an existing result from Lovasz, Plummer (1986) and the chromatic polynomial algorithm improves the result given by Hunt, Ravi, Stearn (1988) from O(n4) time},
keywords = {chromatic polynomials, computational complexity, Computer science, graph colouring, graph theory, matching polynomial, Polynomials, series-parallel graphs, Terminology, Tree data structures, Tree graphs},
isbn = {0-8186-2812-X},
doi = {10.1109/ICCI.1992.227709},
author = {Chandrasekharan,N. and Hannenhalli, Sridhar}
}
@conference {18145,
title = {Towards a theory of nearly constant time parallel algorithms},
booktitle = {Foundations of Computer Science, 1991. Proceedings., 32nd Annual Symposium on},
year = {1991},
month = {1991/10//},
pages = {698 - 710},
abstract = {It is demonstrated that randomization is an extremely powerful tool for designing very fast and efficient parallel algorithms. Specifically, a running time of O(lg* n) (nearly-constant), with high probability, is achieved using n/lg* n (optimal speedup) processors for a wide range of fundamental problems. Also given is a constant time algorithm which, using n processors, approximates the sum of n positive numbers to within an error which is smaller than the sum by an order of magnitude. A variety of known and new techniques are used. New techniques, which are of independent interest, include estimation of the size of a set in constant time for several settings, and ways for deriving superfast optimal algorithms from superfast nonoptimal ones},
keywords = {computational complexity, Estimation, nearly constant time parallel algorithms, Parallel algorithms, positive numbers, randomization, running time, superfast optimal algorithms},
doi = {10.1109/SFCS.1991.185438},
author = {Gil,J. and Matias,Y. and Vishkin, Uzi}
}