peer-reviewed-conf.bib

@comment{{This file has been generated by bib2bib 1.92}}
@comment{{Command line: bib2bib -ob peer-reviewed-conf.bib -oc /dev/null -c 'type : "peer-reviewed-conference"' publications.bib}}
@inproceedings{azimi-etal:icml2012,
  type = {peer-reviewed-conference},
  author = {Javad Azimi and Alan Fern and Xiaoli Z. Fern and Glencora Borradaile and Brent Heeringa},
  booktitle = {Proceedings of the $29^{th}$ International Conference on Machine Learning (ICML)},
  year = 2012,
  title = {\url{./icml2012.pdf}{Batch Active Learning via Coordinated Matching}},
  abstract = {We propose a novel batch active learning method that leverages the availability of high-quality and efficient sequential active-learning policies by approximating their behavior when applied for $k$ steps. Specifically, our algorithm uses Monte-Carlo simulation to estimate the distribution of unlabeled examples selected by a sequential policy over $k$ steps. The algorithm then selects $k$ examples that best matches this distribution, leading to a combinatorial optimization problem that we term ``bounded coordinated matching.'' While we show this problem is NP-hard, we give an efficient greedy solution, which inherits approximation bounds from supermodular minimization theory. Experiments on eight benchmark datasets show that the proposed approach is highly effective.}
}
@inproceedings{berardi-et-al:cccg2011,
  type = {peer-reviewed-conference},
  author = {Matthew Berardi and Brent Heeringa and Justin Malestein and Louis Theran},
  booktitle = {Proceedings of the $23^{rd}$ Annual Canadian Conference on Computational Geometry (CCCG)},
  year = 2011,
  title = {\url{./cccg.pdf}{Rigid Components in Fixed-Latice and Cone Frameworks}},
  abstract = {We study the fundamental algorithmic rigidity problems for generic frameworks
periodic with respect to a fixed lattice or a finite-order rotation in the plane.
For fixed-lattice frameworks we give an $O(n^2)$ algorithm for deciding generic rigidity and
an $O(n^3)$ algorithm for computing rigid components.  If the order of rotation
is part of the input, we give an $O(n^4)$ algorithm for deciding rigidity; in the case
where the rotation's order is $3$, a more specialized algorithm solves all the
fundamental algorithmic rigidity problems in $O(n^2)$ time.}
}
@inproceedings{byers-etal:analco2011,
  type = {peer-reviewed-conference},
  author = {John Byers and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa} and Michael Mitzenmacher and Giorgos Zervas},
  booktitle = {Proceedings of the 8th Annual Workshop on Analytic Algorithmics and Combinatorics (ANALCO)},
  title = {\url{./heap.pdf}{Heapable Sequences and Subsequences}},
  year = 2011,
  abstract = { Let us call a sequence of numbers heapable if they can
                  be sequentially inserted to form a binary tree with
                  the heap property, where each insertion subsequent
                  to the first occurs at a previously placed
                  number. In this paper we consider a variety of
                  problems related to heapable sequences and
                  subsequences that do not appear to have been studied
                  previously. Our motivation for introducing these
                  concepts is two-fold. First, such problems
                  correspond to natural extensions of the well-known
                  secretary problem for hiring an organization with a
                  hierarchical structure. Second, from a purely
                  combinatorial perspective, our problems are
                  interesting variations on similar longest increasing
                  subsequence problems, a problem paradigm that has
                  led to many deep mathematical connections.  
   
                  We provide several basic results. We obtain an
                  efficient algorithm for determining the heapa-
                  bility of a sequence, and also prove that the
                  question of whether a sequence can be arranged in a
                  complete binary heap is NP-hard. Regarding
                  subsequences we show that, with high probability,
                  the longest heapable subsequence of a random
                  permutation of $n$ numbers has length $(1 − o(1))n$, and
                  a subsequence of length $(1 − o(1))n$ can in fact be
                  found online with high probability. We similarly
                  show that for a random permutation a subsequence
                  that yields a perfect heap of size $\alpha n$ for a constant
                  $\alpha$ can be found with high probability. Our work
                  highlights the interesting structure underlying this
                  class of subsequence problems, and we leave many
                  further interesting variations open for future work.
                   }
}
@inproceedings{gerbush-heeringa:ciaa2010,
  type = {peer-reviewed-conference},
  author = {Michael Gerbush and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa}},
  booktitle = {Proceedings of the 15th International Conference on Implementation and Application of Automata (CIAA)},
  title = {\url{./ciaa.pdf}{Approximating Minimum Reset Sequences}},
  year = 2010,
  abstract = {We consider the problem of finding minimum reset
                  sequences in synchronizing automata.  The well-known
                  Cerny conjecture states that {\em every $n$-state
                  synchronizing automaton has a reset sequence with
                  length at most $(n-1)^{2}$}.  While this conjecture
                  gives an upper bound on the length of every reset
                  sequence, it does not directly address the problem
                  of finding the {\em shortest} reset sequence.  We
                  call this the \mrs (\MRS) problem.  We give an
                  $O(kmn^{k}+n^{4}/k)$-time $\lceil \frac{n-1}{k-1}
                  \rceil$-approximation for the \MRS problem for any
                  $k \geq 2$.  We also show that our analysis is
                  tight.  When $k=2$ our algorithm reduces to
                  Eppstein's algorithm and yields an
                  $(n-1)$-approximation.  When $k=n$ our algorithm is
                  the familiar exponential-time, exact algorithm.  We
                  define a non-trivial class of \MRS which we call
                  \stackcover.  We show that \stackcover naturally
                  generalizes two classic optimization problems: {\sc
                  min} \setcover and \scs.  Both these problems are
                  known to be hard to approximate, although at
                  present, \setcover has a slightly stronger lower
                  bound.  In particular, it is $\mathbf{NP}$-hard to
                  approximate \setcover to within a factor of $c \cdot
                  \log n$ for some $c>0$.  Thus, the \mrs problem is
                  as least as hard to approximate as \setcover.  This
                  improves the previous best lower bound which showed
                  that it was $\mathbf{NP}$-hard to approximate the \MRS on
                  binary alphabets to within any constant factor.  Our
                  result requires an alphabet of arbitrary size.  }
}
@inproceedings{borradaile-et-al:arxiv2009,
  type = {peer-reviewed-conference},
  author = {Glencora Borradaile and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa} and Gordon Wilfong},
  title = {\url{./knapsack.pdf}{The 1-Neighbour Knapsack Problem}},
  notes = {\url{http://arxiv.org/abs/0910.0777}{arXiv version}},
  year = {2011},
  booktitle = {Proceedings of the 22nd International Workshop on Combinatorial Algorithms (IWOCA)},
  abstract = {We study a constrained version of the knapsack problem in
                  which dependencies between items are given by the
                  adjacencies of a graph.  In the {\em 1-neighbour
                  knapsack problem}, an item can be selected only if
                  at least one of its neighbours is also selected. We
                  give approximation algorithms and hardness results
                  when the nodes have both uniform and arbitrary
                  weight and profit functions, and when the dependency
                  graph is directed and undirected.}
}
@inproceedings{heeringa-et-al:wads2011,
  type = {peer-reviewed-conference},
  author = {Brent Heeringa and
               Marius Catalin Iordan and
               Louis Theran},
  title = {\url{./lltree.pdf}{Searching in Dynamic Tree-Like Partial Orders}},
  booktitle = {Proceedings of the 12th Algorithms and Data Structures
                  Symposium (formerly WADS)},
  year = {2011},
  notes = {\url{http://arxiv.org/abs/1010.1316}{arXiv version}},
  bibsource = {DBLP, http://dblp.uni-trier.de},
  abstract = {We give the first data structure for the problem of
                  maintaining a dynamic set of $n$ elements drawn from
                  a partially ordered universe described by a tree.
                  We define the {\sc Line-Leaf Tree}, a linear-sized
                  data structure that supports the operations: {\em
                  insert; delete; test membership; and predecessor}.
                  The performance of our data structure is within an
                  $O(\log w)$-factor of optimal. Here $w \leq n$ is
                  the width of the partial-order---a natural obstacle
                  in searching a partial order.  }
}
@inproceedings{adler-heeringa:dt,
  type = {peer-reviewed-conference},
  author = {Micah Adler and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa}},
  title = {\url{./approx2008.pdf}{Approximating Optimal Binary Decision Trees}},
  booktitle = {Proceedings of the 11th International Workshop on Approximation Algorithms for Combinatorial Optimization Problems (APPROX)},
  year = 2008,
  pages = {1--9},
  abstract = {We give a $(\ln n +1)$-approximation for the decision
  tree (DT) problem.  An instance of DT is a set of $m$ binary tests
  $T=(T_{1}, \ldots, T_{m})$ and a set of $n$ items $X=(X_{1}, \ldots,
  X_{n})$.  The goal is to output a binary tree where each internal
  node is a test, each leaf is an item and the total external path
  length of the tree is minimized.  Total external path length is the
  sum of the depths of all the leaves in the tree.  DT has a long
  history in computer science with applications ranging from medical
  diagnosis to experiment design.  It also generalizes the problem of
  finding optimal average-case search strategies in partially ordered
  sets which includes several alphabetic tree problems.  Our work
  decreases the previous upper bound on the approximation ratio by a
  constant factor.  We provide a new analysis of the greedy algorithm
  that uses a simple accounting scheme to spread the cost of a tree
  among pairs of items split at a particular node.  We conclude by
  showing that our upper bound also holds for the DT problem with
  weighted tests.}
}
@inproceedings{adler-heeringa:tamc2008,
  type = {peer-reviewed-conference},
  author = {Micah Adler and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa}},
  title = {\url{./heeringa-tamc.pdf}{Search Space Reductions for Nearest-Neighbor Queries}},
  booktitle = {Proceedings of the 5th Annual Conference on the Theory and Application of Models of Computation (TAMC)},
  year = {2008},
  pages = {554--567},
  abstract = {The vast number of applications featuring multimedia and
  geometric data has made the R-tree a ubiquitous data structure in
  databases.  A popular and fundamental operation on R-trees is
  nearest neighbor search.  While nearest neighbor on R-trees has
  received considerable experimental attention, it has received
  somewhat less theoretical consideration.  We study pruning
  heuristics for nearest neighbor queries on R-trees.  Our primary
  result is the construction of non-trivial families of R-trees where
  $k$-nearest neighbor queries based on pessimistic (i.e. min-max)
  distance estimates provide exponential speedup over queries based
  solely on optimistic (i.e. min) distance estimates.  The exponential
  speedup holds even when $k=1$.  This result provides strong
  theoretical evidence that min-max distance heuristics are an
  essential component to depth-first nearest-neighbor queries.  In
  light of this, we also consider the time-space tradeoffs of
  depth-first versus best-first nearest neighbor queries and construct
  a family of R-trees where best-first search performs exponentially
  better than depth-first search even when depth-first employs min-max
  distance heuristics.}
}
@inproceedings{heeringa-adler:icalp2004,
  type = {peer-reviewed-conference},
  author = {\url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa} and Micah Adler},
  title = {\url{./heeringa-icalp2004.pdf}{Optimal Website Design with the Constrained Subtree Selection Problem}},
  booktitle = {Proceedings of the 31st Annual International Colloquium \
               on Automata, Languages and Programming (ICALP)},
  year = {2004},
  notes = {A complete version of this paper is available as \
\url{./04-09.pdf}{University of Massachusetts Technical Report 04-09}},
  abstract = {We introduce the Constrained Subtree Selection (CSS) problem as a
model for the optimal design of websites.  Given a hierarchy of topics
represented as a DAG G and a probability distribution over the
topics, we select a subtree of the transitive closure of G which
minimizes the expected path cost.  We define path cost as the sum of
the page costs along a path from the root to a leaf.  Page cost,
gamma, is a function of the number of links on a page.  We give a
sufficient condition for gamma which makes CSS NP-Complete.  This
result holds even for the uniform probability distribution.  We give a
polynomial time algorithm for instances of CSS where G does not
constrain the choice of subtrees and gamma favors pages with at
most k links.  We show that CSS remains NP-Hard for constant degree
DAGs and a class of degree costs and give an O(log(k)gamma(d+1))
approximation for any gamma favoring pages with at most k links
and for any G with maximum degree d.  We give complete
characterizations of the optimal trees for the linear degree cost in
unconstrained graphs and uniform probability distributions, and the
logarithmic degree cost in arbitrary DAGs and uniform probability
distributions.}
}
@inproceedings{cohen-heeringa-adams:icdm2002,
  type = {peer-reviewed-conference},
  author = {Paul R. Cohen and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa} and Niall Adams},
  title = {\url{icdm2002.pdf}{An Unsupervised Algorithm for 
           Segmenting Categorical 
           Time Series into Episodes}},
  booktitle = {Proceedings of the 2002 IEEE International 
               Conference on Data Mining (ICDM 2002)},
  pages = {99--106},
  year = {2002},
  notes = {An earlier version is available in the \
           Working Notes of the 2002 ESF Exploratory \
           Workshop on Pattern Detection and Discovery in Data Mining},
  abstract = {This paper describes an unsupervised algorithm for \ segmenting
              categorical time series into episodes.  \ The
              Voting-Experts algorithm first collects statistics \
              about the frequency and boundary entropy of ngrams, then
              \ passes a window over the series and has two \ ``expert
              methods'' decide where in the window boundaries \ should
              be drawn.  The algorithm successfully segments \ text
              into words in four languages.  The algorithm also \
              segments time series of robot sensor data into
              subsequences\ that represent episodes in the life of the
              robot. We claim that {\sc Voting-Experts} finds
              meaningful episodes \ in categorical time series because
              it exploits two statistical \ characteristics of
              meaningful episodes.}
}
@inproceedings{oates-heeringa:icgi2002,
  type = {peer-reviewed-conference},
  author = {Tim Oates and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa}},
  title = {\url{./oates-heeringa-icgi2002.pdf}{Estimating Grammar Parameters using Bounded Memory}},
  booktitle = {Proceedings of the Sixth International \
               Colloquium on Grammatical Inference (ICGI)},
  pages = {185--198},
  editor = {P. Adriaans and H. Fernau and M. van Zaanen},
  date = {January 2002},
  year = {2002},
  publisher = {Springer-Verlag Heidelberg},
  abstract = {Estimating the parameters of stochastic context-free grammars 
(SCFGs) from data is an important, well-studied problem.  Almost without
exception, existing approaches make repeated passes over the training
data.  The memory requirements of such algorithms are ill-suited for
embedded agents exposed to large amounts of training data over long
periods of time.  We present a novel algorithm, called HOLA, for
estimating the parameters of SCFGs that computes summary statistics
for each string as it is observed and then discards the string.  The
memory used by HOLA is bounded by the size of the grammar, not by the
amount of training data.  Empirical results show that HOLA performs
as well as the Inside-Outside algorithm on a variety of standard
problems, despite the fact that it has access to much less
information.}
}
@inproceedings{king-et-al:wsc2002,
  type = {peer-reviewed-conference},
  author = {Gary W. King and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa} and Joe Catalano \
            and David L. Westbrook and Paul Cohen},
  title = {\url{./wsc2002.pdf}{Models of Defeat}},
  booktitle = {Proceedings of the 2002 Winter Simulation Conference},
  pages = {928--934},
  year = {2002},
  notes = {An earlier version appears in the Proceedings of \
           the Second International Conference on Knowledge \
           Systems for Coalition Operations. Pages 85-90},
  abstract = {Coalition operations are increasingly effects-based, 
which means they apply force only as necessary to achieve political
and psychological effects. Capture the Flag is a war gaming environment
that includes intelligent, autonomous adversaries.  Our planner
previously focused on two goals: occupying objectives and
attrition. But attrition is a means to the defeat of one's enemies,
not an end in itself. For Capture the Flag to plan for defeat, it
needs a model of defeat. We model the ``capacity for conflict'' as a
leaky bucket: when a unit's bucket is full, it has no more capacity
for conflict and it capitulates. Flow into and out of the bucket is
modulated by several factors including attrition and heroism. The
model is inherently dynamical, so it exhibits the time-dependent behaviors one
observes in real conflicts; for example, identical
attacks will have different effects on capitulation as a function of their
timing.}
}
@inproceedings{atkin-et-al:aa2001,
  type = {peer-reviewed-conference},
  author = {Marc Atkin and Gary W. King and David Westbrook and \url{http://www.cs.williams.edu/~heeringa}{Brent Heeringa} and Andrew Hannon and Paul Cohen},
  title = {\url{./agents2001.pdf}{Hierarchical Agent Control: 
           A Framework for Defining Agent Behavior}},
  booktitle = {Proceedings of the Fifth International \
               Conference on Autonomous Agents},
  pages = {452--432},
  year = {2001},
  abstract = {The Hierarchical Agent Control Architecture (HAC) is a general toolkit for specifying an agent's behavior.  HAC supports action abstraction, resource management, sensor integration, and is well suited to controlling large numbers of agents in dynamic environments.  It relies on three hierarchies:  action, sensor, and context.  The action hierarchy controls the agent's behavior.  It is organized around tasks to be accomplished, not the agents themselves.  This facilitates the integration of multi-agent actions and planning into the architecture.  The sensor hierarchy provides a principled means for structuring the complexity of reading and transforming sensor information.  Each level of the hierarchy integrates the data coming in from the environment into conceptual chunks appropriate for us by actions at this level.  Actions and sensors are written using the same formalism.  The context hierarchy is a hierarchy of goals.  In addition to their primary goals, most actions are simultaneously trying to achieve a set of implicit assumptions.  These assumptions are made explicit through the context hierarchy and provide the context under which the agent operates.  We have developed a planner, GRASP, implemented within HAC, which is capable of resolving multiple goals in real time.

HAC was intended to have wide applicability.  It has been used to control agents in commercial computer games and physical robots.  Our primary application domain is a simulator of land-based military-engagements called ``Capture the Flag.''  HAC's simulation substrate models physics at an abstract level.  HAC supports any domain in which behaviors can be reduced to a small set of primitive effectors such as MOVE and APPLY-FORCE.   At this time defining agent behavior requires lisp programming skills; we are moving towards more graphical programming languages.}
}
@inproceedings{heeringa-cohen:wsc2000,
  type = {peer-reviewed-conference},
  author = {Brent Heeringa and Paul R. Cohen},
  title = {\url{./wsc2000.pdf}{An Underlying Model for Defeat Mechanisms}},
  booktitle = {Proceedings of 2000 Winter Simulation Conference (WSC 2000)},
  pages = {933--939},
  year = {2002},
  notes = {An earlier version appears in Working Notes of the \
           2000 AAAI Fall Symposium on Simulating Human Agents.\  
           Pages 39--45.},
  abstract = {Defeat mechanisms are strategies for achieving victory over an
opponent.  Although defeat mechanisms often rely on influencing the
opponent psychologically and emotionally, most simulations of warfare
do not model these ``soft'' factors, they model only victory by
attrition.  To create more accurate, adaptable, and believable
systems, we must be able to model a variety of defeat mechanisms.  We
propose a model where parameters and attributes that affect emotional
and physical fatigue are combined to produce an overall measure of
fatigue called effective fatigue.  Effective fatigue, along with an
agent's state, is combined by a defeat model to produce probabilities
of surrender.  We create warfare scenarios involving catastrophe and
surprise, and then examine the model's behavior under these scenarios.
We conclude with a discussion of how the model is related to our own
Capture the Flag war gaming system.}
}