fixed compilation errors

dp2041 [2004-04-17 08:17:29]
fixed compilation errors
Filename
paper/ai2tv.bib
paper/ai2tv.tex
diff --git a/paper/ai2tv.bib b/paper/ai2tv.bib
index d559177..ecb1e85 100644
--- a/paper/ai2tv.bib
+++ b/paper/ai2tv.bib
@@ -1,586 +1,586 @@
-%% ------------------------------------------------------------------- %%
-%% Collaborative Education
-%% ------------------------------------------------------------------- %%
-@ARTICLE{BELLER,
-	AUTHOR = "Michal Beller and Ehud Or",
-	TITLE = "The crossroads between lifelong learning and information technology: A challenge facing leading universities",
-	JOURNAL = {Journal of Computer Mediated Communication},
-	VOLUME = {4(2)},
-	YEAR = {1999}
-}
-
-@ARTICLE{BURGESS,
-	AUTHOR = "Dr. Lesta A. Burgess and Dr. Shawn D. Strong",
-	TITLE = "Trends in Online Education: Case Study at Southwest Missouri State University",
-	JOURNAL = {Journal of Industrial Teacher Education},
-	VOLUME = {19(3)},
-	YEAR = {2003}
-}
-
-@article{ BAQAI,
-    AUTHOR = "Shabab Baqai and M. Farrukh Khan and Miae Woo and Seiichi Shinkai and Ashfaq A. Khokhar and Arif Ghafoor",
-    TITLE = "Quality-Based Evaluation of Multimedia Synchronization Protocols for Distributed Multimedia Information Systems",
-    JOURNAL = "IEEE Journal of Selected Areas in Communications",
-    VOLUME = "14",
-    NUMBER = "7",
-    PAGES = "1388-1403",
-    YEAR = "1996"
-}
-
-@INPROCEEDINGS{CHOI,
-	AUTHOR = "Jeong-Dan Choi and Ki-Jong Byun and Byung-Tae Jang and Chi-Jeong Hwang",
-	TITLE = "A synchronization method for real time surround display using clustered systems",
-	BOOKTITLE = "Proceedings of the ACM MultiMedia 2002",
-	PAGES = "259-262",
-	YEAR = {2002}
-}
-
-@Book{CONWAY2000,
-    AUTHOR = {Yao Wang and Jörn Ostermann and Ya-Qin Zhang},
-    TITLE = {Video Processing and Communications},
-    PUBLISHER = {Prentice Hall},
-    MONTH = {September},
-    YEAR = {2002},
-    ADDRESS = {Connecticut, USA},
-    ISBN = {ISBN 0-13-017547-1}
-}
-
-@article{CORTE,
-    author = "Aurelio La Corte and Alfio Lombardo and Sergio Palazzo and Giovanni Schembra",
-    title = "Control of Perceived Quality of Service in Multimedia Retrieval Services: Prediction-Based Mechanism vs. Compensation Buffers",
-    journal = "Multimedia Systems",
-    volume = "6",
-    number = "2",
-    pages = "102-112",
-    year = "1998",
-    url = "citeseer.ist.psu.edu/lacorte98control.html"
-}
-
-@ARTICLE{DOE,
-	AUTHOR = "US Department of Education, NCFES",
-	TITLE = "Distance education of postsecondary education institutions: 1997-1998",
-	JOURNAL = {NCES},
-	YEAR = {1999}
-}
-
-@INPROCEEDINGS{GUERRI,
-	AUTHOR = "Juan Carlos Guerri and Carlos Palau and Ana Pajares and Manuel Esteve",
-	TITLE = "A real-time e-learning system via satellite based on JMF and Windows Media",
-	BOOKTITLE = "Proceedings of the ACM MultiMedia 2002",
-	PAGES = "219-222",
-	YEAR = {2002}
-}
-
-%% shows stats that students like collaboration
-@Article{WELLS,
-   author="John G. Wells",
-   title="Effects Of An On-Line Computer-Mediated Communication Course",
-   journal=JITE,
-   volume=37(3),
-   year=2002
-}
-
-%   title="Effects Of An On-Line Computer-Mediated Communication
-%    Course, Prior Computer Experience and Internet Knowledge, and
-%    Learning Styles On Students' Internet Attitudes Computer-Mediated
-%    Technologies and New Educational Challenges",
-
-%% ------------------------------------------------------------------- %%
-%% quality of service
-%% ------------------------------------------------------------------- %%
-
-@InProceedings{CUI,
-   author="Yi Cui and Klara Nahrstedt",
-   title="Supporting QoS for ubiquitous multimedia service delivery",
-   journal="Proceedings of the ACM MultiMedia 2002",
-   pages=461-462,
-   year=2001
-}
-
-@InProceedings{LEI,
-   author="Zhijun Lei and Nicolas D. Georganas",
-   title="Rate adaptation transcoding for precoded video streams",
-   journal="Proceedings of the ACM MultiMedia 2002",
-   pages=127-136,
-   year=2002
-}
-
-@InProceedings{LIU2003,
-   author="Jiangchuan Liu and Bo Li and Ya-Qin Zhang",
-   title="Adaptive Video Multicast over the Internet",
-   journal="Proceedings of the 2003 IEEE MultiMedia",
-   pages=22-33,
-   year=2003
-}
-
-@InProceedings{KRASIC,
-   author="Charles Krasic and Jonathan Walpole",
-   title="Priority-progress streaming for quality-adaptive multimedia",
-   journal="Proceedings of the ACM MultiMedia 2001",
-   pages=463-464,
-   year=2001
-}
-
-%% couldn't get this paper
-@InProceedings{MILITZER,
-   author="Michael Militzer and Maciej Suchomski and Klaus Meyer-Wegener",
-   title="Improved p-domain rate control and perceived quality optimizations for MPEG-4 real-time video applications",
-   journal="Proceedings of the ACM MultiMedia 2003",
-   pages=402-411,
-   year=2003
-}
-
-@InProceedings{TAN,
-   author="Kun Tan and Richard Ribier and Shih-Ping Liou",
-   title="Content-sensitive video streaming over low bitrate and lossy wireless network",
-   journal="Proceedings of the ACM MultiMedia 2001",
-   pages=512-515,
-   year=2001
-}
-
-@InProceedings{THAKUR,
-   author="Aruna Thakur and Lenka Carr-Motycokva",
-   title="A dynamic controller for optimal layering of video",
-   journal="Proceedings of the ACM MultiMedia 2002",
-   pages=641-643,
-   year=2002
-}
-
-@InProceedings{WANG,
-   author="Zhiheng Wang and Sujata Banerjee and Sugih Jamin",
-   title="Studying streaming video quality: from an application point of view",
-   journal="Proceedings of the ACM MultiMedia 2003",
-   pages=327-330,
-   year=2003
-}
-
-%% ------------------------------------------------------------------- %%
-%% Semantic compression and video hierarchy related
-%% ------------------------------------------------------------------- %%
-
-@InProceedings{NEUMANN,
-   author="Christoph Neumann and Vincent Roca",
-   title="Multicast streaming of hierarchical MPEG-4 presentations",
-   journal="Proceedings of the ACM MultiMedia 2002",
-   pages=211-214,
-   year=2002
-}
-
-@InProceedings{SHIPMAN,
-   author="Frank Shipman and Andreas Girgensohn and Lynn Wilcox",
-   title="Generation of interactive multi-level video summaries",
-   journal="Proceedings of the ACM MultiMedia 2001",
-   pages=392-401,
-   year=2003
-}
-
-@InProceedings{TIECHENG,
-   author="Tiecheng Liu and John R. Kender",
-   title="Time-Constrained Dynamic Semantic Compression for Video Indexing and Interactive Searching",
-   journal="Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
-   volume=2,
-   pages=531-538,
-   year=2001
-}
-
-@InProceedings{MCCANNE,
-	author="S. McCanne and V. Jacobson and M. Vetterli",
-	title="Receiver-Driven Layered Multicast",
-	journal="Proceedings of the ACM SGCOMM Conference",
-	place="Stanford, Ca. USA",
-	month="August",
-	date="26-30",
-	year=1999
-}
-
-@Article{LI,
-	author="W. Li",
-	title="Overview of the Fine Granularity Scalability in MPEG-4 Video Standard",
-	journal="IEEE Transactions on Circuits and Systems for Video Technology",
-	volume=11,
-	number=3,
-	pages=301-317,
-	Month=March,
-	year=2001
-}
-%% ------------------------------------------------------------------- %%
-%% Shared control
-%% ------------------------------------------------------------------- %%
-
-% collaborative browsing
-@InProceedings{CAPPS,
-   author="Michael Capps and Brian Laddi and David Stotts and Lars Nyland",
-   title="Educational applications of multi-client synchronization through improved Web graph semantics",
-   journal="5th International Workshops on Enabling Technologies: Infrastructure for Collaborative Enterprises",
-   year=1996
-}
-
-@InProceedings{LIAO,
-   author="Chunyuan Liao and Qiong Liu and Don Kimber and Patrick Chiu and Jonathan Foote and Lynn Wilcox",
-   title="Shared interactive video for teleconferencing",
-   journal="Proceedings of the ACM MultiMedia 2003",
-   pages=546-554,
-   year=2003
-}
-
-% collaborative browsing
-@InProceedings{LIEBERMAN,
-   author="Henry Lieberman and Neil Van Dyke and Adriana Vivacqua",
-   title="Let's Browse: A Collaborative Web Browsing Agent",
-   journal="Proceedings of the 1999 International Conference on Intelligent User Interfaces",
-   year=1999
-}
-
-% collaborative browsing
-@InProceedings{SIDLER,
-   author="Gabriel Sidler and Adrew Scott and Heiner Wolf",
-   title="Collaborative Browsing in the World Wide Web",
-   journal="8th Joint European Networking Conference Proceedings",
-   year=1997
-}
-
-@InProceedings{STENZLER,
-   author="Michael K. Stenzler and Richard R. Eckert",
-   title="Interactive Video",
-   journal="ACM Special Interest Group on Computer-Human Interaction",
-   volume=28:2,
-   year=April 1996
-}
-
-@Article{WALTER,
-   author="Thomas Walter and Lukas Ruf and Bernhard Plattner",
-   title="Easy Teach and Learn: A Web-Based Adaptive Middleware for Creating Virtual Classrooms",
-   journal="European High Performance Computing and Networking",
-   year=2000
-}
-
-@InProceedings{VOGEL,
-   author="Jürgen Vogel and Martin Mauve",
-   title="Consistency control for distributed interactive media",
-   journal="Proceedings of the ACM MultiMedia 2001",
-   pages=221-230,
-   year=2001
-}
-
-%% ------------------------------------------------------------------- %%
-%% Synchronization schemes
-%% ------------------------------------------------------------------- %%
-
-@InProceedings{LIENHART,
-   author="Rainer Lienhart and  Igor Kozintsev and Stefan Wehr",
-   title="Universal synchronization scheme for distributed audio-video capture on heterogeneous computing platforms",
-   journal="Proceedings of the ACM MultiMedia 2003",
-   pages=263-266,
-   year=2003
-}
-
-@InProceedings{MIMAZE,
-	author = "Laurent Gautier and Christophe Diot",
-	title = "Design and Evaluation of MiMaze, a Multi-Player Game on the Internet",
-    	booktitle = "International Conference on Multimedia Computing and Systems",
-    	pages = "233-236",
-    	year = "1998"
-}
-
-%% ------------------------------------------------------------------- %%
-%% Misc
-%% ------------------------------------------------------------------- %%
-@InProceedings{CHIME,
-   AUTHOR="Stephen E. Dossick and Gail E. Kaiser",
-   TITLE="CHIME: A Metadata-Based Distributed Software Development Environment",
-   BOOKTITLE="Joint Seventh European Software Engineering Conference and Seventh ACM SIGSOFT International Symposium on the Foundations of Software Engineering",
-   PAGES={464-475},
-   YEAR={1999}
-}
-
-% @Article{COX,
-%    AUTHOR="Andy Cox and Eric Luiijf and Ron van Kampen and Rob Ripley",
-%    TITLE="Time Synchronization Experiments",
-%    JOURNAL="UNKNOWN",
-%    YEAR=2001
-% }
-
-@INPROCEEDINGS{KIM,
-   AUTHOR="Moon Hae Kim and Eun Hwan Jo",
-   TITLE="Global Time-Based Synchronization of Real-Time Multimedia Streaming",
-   JOURNAL="The Ninth IEEE International Workshop on Object-Oriented Real-Time Dependable Systems",
-   YEAR={2003}
-}
-
-@Article{SIENA,
-  author = 	 {Antonio Carzaniga and David S. Rosenblum and Alexander L Wolf},
-  title = 	 {Design and Evaluation of a Wide-Area Event Notification Service},
-  journal = 	 {ACM Transactions on Computer Systems},
-  year = 	 2001,
-  volume = 	 19,
-  number = 	 3,
-  pages = 	 {332--383},
-  month =	 Aug,
-  url =          {http://www.cs.colorado.edu/~carzanig/papers/},
-  abstract =     {The components of a loosely-coupled system are typically
-                  designed to operate by generating and responding to
-                  asynchronous events.  An \emph{event notification
-                  service} is an application-independent
-                  infrastructure that supports the construction of
-                  event-based systems, whereby generators of events
-                  publish event notifications to the infrastructure
-                  and consumers of events subscribe with the
-                  infrastructure to receive relevant notifications.
-                  The two primary services that should be provided to
-                  components by the infrastructure are notification
-                  selection (i.e., determining which notifications
-                  match which subscriptions) and notification delivery
-                  (i.e, routing matching notifications from publishers
-                  to subscribers).  Numerous event notification
-                  services have been developed for local-area
-                  networks, generally based on a centralized server to
-                  select and deliver event notifications.  Therefore,
-                  they suffer from an inherent inability to scale to
-                  wide-area networks, such as the Internet, where the
-                  number and physical distribution of the service's
-                  clients can quickly overwhelm a centralized
-                  solution.  The critical challenge in the setting of
-                  a wide-area network is to maximize the
-                  expressiveness in the selection mechanism without
-                  sacrificing scalability in the delivery mechanism.
-
-                  This paper presents \emph{Siena}, an event
-                  notification service that we have designed and
-                  implemented to exhibit both expressiveness and
-                  scalability.  We describe the service's interface to
-                  applications, the algorithms used by networks of
-                  servers to select and deliver event notifications,
-                  and the strategies used to optimize performance.  We
-                  also present results of simulation studies that
-                  examine the scalability and performance of the
-                  service.}
-}
-
-@INPROCEEDINGS{CHEUNG,
-   AUTHOR="Shun Yan Cheung and Mostafa H. Ammar and Xue Li",
-   TITLE="On the Use of Destination Set Grouping to Improve Fairness in Multicast Video Distribution",
-   BOOKTITLE="Proceedings IEEE INFOCOM '96",
-   PAGES = {553-560},
-   YEAR=1996
-}
-
-@INPROCEEDINGS{LI,
-   AUTHOR="Xue Li and Mostafa H. Ammar and Sanjoy Paul",
-   TITLE="Video Multicast over the Internet",
-   BOOKTITLE="IEEE Network Magazine",
-   VOLUME = {13},
-   NUMBER = {2},
-   PAGES = {46-60},
-   MONTH=April,
-   YEAR=1996
-}
-
-@Article{ACM,
-	author="Gail Kaiser and Janak Parekh and Philip Gross and Giuseppe Valetto",
-	title="Retrofitting Autonomic Capabilities onto Legacy Systems",
-	journal="Journal of Cluster Computing",
-	note="to appear"
-}
-
-@InProceedings{OSTERWEIL,
-	author="A. Wise and A.G. Cass and B. Staudt Lerner and E.K. McCall and L.J. Osterweil and S.M. Sutton Jr.",
-	title="Using Little-JIL to Coordinate Agents in Software Engineering",
-	journal="Automated Software Engineering Conference",
-	month="September",
-	year=2000
-}
-
-@TechReport{VECTORS,
-   author="Suhit Gupta and Gail Kaiser",
-   title="A Virtual Environment for Collaborative Distance Learning With Video Synchronization",
-   journal="CUCS Technical Report CUCS-008-04",
-   year=2004
-   note="\url{http://www.cs.columbia.edu/~library/TR-repository/reports/reports-2004/cucs-008-04.pdf}."
-}
-
-% NEED TO CHECK THESE OUT
-% gud: Golas, K. (2000). Guidelines for Designing Online Learning. [On-Line].
-% gud: Hill, J.R., & Raven, A. (2000). Online Learning Communities: If You Build Them, Will They Stay?. [On-Line].
-% gud: Miller, S.K. (2000). Collaboration at Warp Speed. [On-Line].
-% gud: NPTalk. (2000). Collaborative Virtual Office Tools. [On-Line].
-
-%%----------------------------------
-%% PEPPO Additions for relwork
-%% ----------------------------------
-@ARTICLE{ASP,
-	AUTHOR =	"K. Rothermel, and T. Helbig",
-	TITLE = 	"An Adaptive Protocol for Synchronizing Media Streams",
-	JOURNAL =	"Multimedia Systems",
-	VOLUME = 	{5},
-	PAGES =  	"324-336",
-	YEAR = 		"1997"
-	}
-
-@INPROCEEDINGS{Lancaster,
-	AUTHOR = 	"A. Campell and G. Coulson and F. Garcia and D. Hutchison",
-	TITLE =		"A continuous media transport and orchestration service",
-	booktitle = "{Proceedings of SIGCOMM92: Communications Architectures and Protocols}",
-    pages = "99-110",
-    year = "1992"
-    }
-
-@inproceedings{Clark92,
-    author = "D.D. Clark and S. Shenker and L. Zhang",
-    title = "Supporting Real-Time Applications in an Integrated Services Packet Network: Architecture and Mechanism",
-    booktitle = "{Proceedings of SIGCOMM92: Communications Architectures and Protocols}",
-    pages = "14-26",
-    year = "1992"
-}
-
-@ARTICLE{FSP,
-	AUTHOR = 	"J. Escobar and C. Partridge and D. Deutsch",
-	TITLE = 	"Flow synchronization protocol",
-	JOURNAL =	"IEEE Transactions on Networking",
-	year =		"1994"
-}
-
-@INPROCEEDINGS{GONZALEZ,
-	AUTHOR = 	"A. Gonzalez and H. Adbel-Wahab",
-	TITLE = 	"Lightweight Stream Synchronization Framework for Multimedia Collaborative Applications",
-	BOOKTITLE = "Proceedings of the Fifth IEEE Symposium on Computers and Communications (ISCC 2000)",
-	MONTH =		"July",
-	YEAR		"2000"
-}
-
-@INPROCEEDINGS{LIUSYNC,
-	AUTHOR =	"H. Liu and M. El Zarki",
-	TITLE =		"A Synchronization Control Scheme for Real-time Streaming Multimedia Applications",
-	BOOKTITLE = "Proceedings of Packet Video 2003"
-	CITY =		"Nantes",
-	COUNTRY = 	"France",
-	MONTH =		"April",
-	YEAR = 		"2003"
-}
-
-@INPROCEEDINGS{Concord,
-	AUTHOR = 	"N. Shivakumar and C. Sreenan and B. Narendran and P. Agarwal",
-	TITLE = 	"The Concord algorithm for synchronization of networked multimedia streams",
-	BOOKTITLE = "Proceedings of the IEEE International Conference on Multimedia Computing and Systems",
-	PAGES = 	"31-40",
-	CITY = 		"Washington",
-	COUNTRY = 	"USA",
-	YEAR = 		"1995"
-}
-
-@INPROCEEDINGS{Ferrari,
-    AUTHOR = "D. Ferrari",
-    TITLE = "Design and application of a delay jitter control scheme for packet-switching internetworks",
-    BOOKTITLE = "Proceedings of the second International Conference on Network and Operating System Support for Digital Audio and Video",
-    ADDRESS = "Heidelberg, Germany",
-    YEAR = "1991"
-    }
-
-@INPROCEEDINGS{AMS,
-    AUTHOR = "Gail Kaiser, Janak Parekh, Philip Gross and Giuseppe Valetto",
-    TITLE = "Kinesthetics eXtreme: An External Infrastructure for Monitoring Distributed Legacy Systems",
-    BOOKTITLE = "Fifth Annual International Active Middleware Workshop",
-    MONTH = "June",
-    YEAR = "2003"
-}
-
-@INPROCEEDINGS{ICSE,
-    AUTHOR = "Giuseppe Valetto and Gail Kaiser",
-    TITLE = "Using Process Technology to Control and Coordinate Software Adaptation",
-    BOOKTITLE = "International Conference on Software Engineering (ICSE 2003)",
-    MONTH = "May",
-    YEAR = "2003"
-}
-
-@INPROCEEDINGS{REFARCH,
-    AUTHOR = "Gail Kaiser and Phil Gross and Gaurav Kc and Janak Parekh and Giuseppe Valetto",
-    TITLE = "An Approach to Autonomizing Legacy Systems, in Workshop on Self-Healing, Adaptive and Self-Managed Systems",
-    BOOKTITLE = "Workshop on Self-Healing, Adaptive and Self-Managed Systems",
-    MONTH = "June",
-    YEAR = "2002"
-}
-
-@INPROCEEDINGS{CEN,
-    AUTHOR = "Jonathan Walpole and Rainer Koster and Shanwei Cen and Crispin Cowan and David Maier and Dylan McNamee and Calton Pu and David Steere and Liujin Yu",
-    TITLE = "A Player for Adaptive MPEG Video Streaming Over The Internet",
-    BOOKTITLE = {Proceedings 26th Applied Imagery Pattern Recognition Workshop AIPR-97, SPIE},
-    MONTH = {October 15-17},
-    YEAR = {1997}
-}
-
-@INPROCEEDINGS{LJIL,
-    AUTHOR = "Aaron G. Cass and Barbara Staudt Lerner and Eric K. McCall and Leon J. Osterweil and Stanley M. Sutton and Jr. and Alexander Wise",
-    TITLE = "Little-JIL/Juliette: A Process Definition Language and Interpreter",
-    BOOKTITLE = "Proceedings of the 22nd International Conference on Software Engineering (ICSE 2000)",
-    PAGES = {pp. 754-757},
-    MONTH = "June",
-    YEAR = "2000"
-}
-
-
-@INPROCEEDINGS{RAVAGES,
-    AUTHOR = "Gail Kaiser and Giuseppe Valetto",
-    TITLE = "Ravages of Time: Synchronized Multimedia for Internet-Wide Process-Centered Software Engineering Environments",
-    BOOKTITLE = "Third ICSE Workshop on Software Engineering over the Internet",
-    MONTH = "June",
-    YEAR = "2000",
-    NOTE = "position paper"
-}
-
-
-%%----------------------------------
-%% misc citations, like websites
-%% ----------------------------------
-@misc{NTP,
-   AUTHOR="David L. Mills",
-   TITLE="Network Time Protocol",
-   HOWPUBLISHED="RFC 958",
-   YEAR={1985}
-}
-
-@misc{IBM,
-   title = {International Business Machines (IBM) Research},
-   HOWPUBLISHED="\url{http://www.research.ibm.com/autonomic/}"
-}
-
-@misc{PHOENIX,
-   title = {University of Phoenix},
-   HOWPUBLISHED="\url{http://www.uopxonline.com/}"
-}
-
-@misc{CAPELLA,
-   title = {Capella University},
-   HOWPUBLISHED="\url{http://www.capella.edu/}"
-}
-
-@misc{CVN,
-   title = {Columbia Video Network},
-   HOWPUBLISHED="\url{http://http://www.cvn.columbia.edu/}"
-}
-
-
-@Misc{SCPD,
-   title="Stanford Center for Professional Development ",
-   howpublished="\url{http://scpd.stanford.edu/}"
-}
-
-
-@misc{SHAPERD,
-   author=" Leandro Santi",
-   title="A user-mode traffic shaper for tcp-ip networks.",
-   HOWPUBLISHED="\url{http://freshmeat.net/projects/shaperd/}"
-}
-
-@misc{COUGAAR,
-   title="Cognitive Agent Architecture (Cougaar) Open Source Project",
-   HOWPUBLISHED="\url{http://www.cougaar.org/}"
-}
-
-@misc{WEBEX,
-   title="WebEx: Web Conferencing, Video Conferencing and Online Meeting Services",
-   HOWPUBLISHED="\url{http://www.webex.com/}"
-}
-
-@misc{VNC,
-   title="VNC (Virtual Network Computing)",
-   HOWPUBLISHED="\url{http://www.realvnc.com/}"
+%% ------------------------------------------------------------------- %%
+%% Collaborative Education
+%% ------------------------------------------------------------------- %%
+@ARTICLE{BELLER,
+	AUTHOR = "Michal Beller and Ehud Or",
+	TITLE = "The crossroads between lifelong learning and information technology: A challenge facing leading universities",
+	JOURNAL = {Journal of Computer Mediated Communication},
+	VOLUME = {4(2)},
+	YEAR = {1999}
+}
+
+@ARTICLE{BURGESS,
+	AUTHOR = "Dr. Lesta A. Burgess and Dr. Shawn D. Strong",
+	TITLE = "Trends in Online Education: Case Study at Southwest Missouri State University",
+	JOURNAL = {Journal of Industrial Teacher Education},
+	VOLUME = {19(3)},
+	YEAR = {2003}
+}
+
+@article{ BAQAI,
+    AUTHOR = "Shabab Baqai and M. Farrukh Khan and Miae Woo and Seiichi Shinkai and Ashfaq A. Khokhar and Arif Ghafoor",
+    TITLE = "Quality-Based Evaluation of Multimedia Synchronization Protocols for Distributed Multimedia Information Systems",
+    JOURNAL = "IEEE Journal of Selected Areas in Communications",
+    VOLUME = "14",
+    NUMBER = "7",
+    PAGES = "1388-1403",
+    YEAR = "1996"
+}
+
+@INPROCEEDINGS{CHOI,
+	AUTHOR = "Jeong-Dan Choi and Ki-Jong Byun and Byung-Tae Jang and Chi-Jeong Hwang",
+	TITLE = "A synchronization method for real time surround display using clustered systems",
+	BOOKTITLE = "Proceedings of the ACM MultiMedia 2002",
+	PAGES = "259-262",
+	YEAR = {2002}
+}
+
+@Book{CONWAY2000,
+    AUTHOR = {Yao Wang and Jörn Ostermann and Ya-Qin Zhang},
+    TITLE = {Video Processing and Communications},
+    PUBLISHER = {Prentice Hall},
+    MONTH = {September},
+    YEAR = {2002},
+    ADDRESS = {Connecticut, USA},
+    ISBN = {ISBN 0-13-017547-1}
+}
+
+@article{CORTE,
+    author = "Aurelio La Corte and Alfio Lombardo and Sergio Palazzo and Giovanni Schembra",
+    title = "Control of Perceived Quality of Service in Multimedia Retrieval Services: Prediction-Based Mechanism vs. Compensation Buffers",
+    journal = "Multimedia Systems",
+    volume = "6",
+    number = "2",
+    pages = "102-112",
+    year = "1998",
+    url = "citeseer.ist.psu.edu/lacorte98control.html"
+}
+
+@ARTICLE{DOE,
+	AUTHOR = "US Department of Education, NCFES",
+	TITLE = "Distance education of postsecondary education institutions: 1997-1998",
+	JOURNAL = {NCES},
+	YEAR = {1999}
+}
+
+@INPROCEEDINGS{GUERRI,
+	AUTHOR = "Juan Carlos Guerri and Carlos Palau and Ana Pajares and Manuel Esteve",
+	TITLE = "A real-time e-learning system via satellite based on JMF and Windows Media",
+	BOOKTITLE = "Proceedings of the ACM MultiMedia 2002",
+	PAGES = "219-222",
+	YEAR = {2002}
+}
+
+%% shows stats that students like collaboration
+@Article{WELLS,
+   author="John G. Wells",
+   title="Effects Of An On-Line Computer-Mediated Communication Course",
+   journal=JITE,
+   volume=37(3),
+   year=2002
+}
+
+%   title="Effects Of An On-Line Computer-Mediated Communication
+%    Course, Prior Computer Experience and Internet Knowledge, and
+%    Learning Styles On Students' Internet Attitudes Computer-Mediated
+%    Technologies and New Educational Challenges",
+
+%% ------------------------------------------------------------------- %%
+%% quality of service
+%% ------------------------------------------------------------------- %%
+
+@InProceedings{CUI,
+   author="Yi Cui and Klara Nahrstedt",
+   title="Supporting QoS for ubiquitous multimedia service delivery",
+   journal="Proceedings of the ACM MultiMedia 2002",
+   pages=461-462,
+   year=2001
+}
+
+@InProceedings{LEI,
+   author="Zhijun Lei and Nicolas D. Georganas",
+   title="Rate adaptation transcoding for precoded video streams",
+   journal="Proceedings of the ACM MultiMedia 2002",
+   pages=127-136,
+   year=2002
+}
+
+@InProceedings{LIU2003,
+   author="Jiangchuan Liu and Bo Li and Ya-Qin Zhang",
+   title="Adaptive Video Multicast over the Internet",
+   journal="Proceedings of the 2003 IEEE MultiMedia",
+   pages=22-33,
+   year=2003
+}
+
+@InProceedings{KRASIC,
+   author="Charles Krasic and Jonathan Walpole",
+   title="Priority-progress streaming for quality-adaptive multimedia",
+   journal="Proceedings of the ACM MultiMedia 2001",
+   pages=463-464,
+   year=2001
+}
+
+%% couldn't get this paper
+@InProceedings{MILITZER,
+   author="Michael Militzer and Maciej Suchomski and Klaus Meyer-Wegener",
+   title="Improved p-domain rate control and perceived quality optimizations for MPEG-4 real-time video applications",
+   journal="Proceedings of the ACM MultiMedia 2003",
+   pages=402-411,
+   year=2003
+}
+
+@InProceedings{TAN,
+   author="Kun Tan and Richard Ribier and Shih-Ping Liou",
+   title="Content-sensitive video streaming over low bitrate and lossy wireless network",
+   journal="Proceedings of the ACM MultiMedia 2001",
+   pages=512-515,
+   year=2001
+}
+
+@InProceedings{THAKUR,
+   author="Aruna Thakur and Lenka Carr-Motycokva",
+   title="A dynamic controller for optimal layering of video",
+   journal="Proceedings of the ACM MultiMedia 2002",
+   pages=641-643,
+   year=2002
+}
+
+@InProceedings{WANG,
+   author="Zhiheng Wang and Sujata Banerjee and Sugih Jamin",
+   title="Studying streaming video quality: from an application point of view",
+   journal="Proceedings of the ACM MultiMedia 2003",
+   pages=327-330,
+   year=2003
+}
+
+%% ------------------------------------------------------------------- %%
+%% Semantic compression and video hierarchy related
+%% ------------------------------------------------------------------- %%
+
+@InProceedings{NEUMANN,
+   author="Christoph Neumann and Vincent Roca",
+   title="Multicast streaming of hierarchical MPEG-4 presentations",
+   journal="Proceedings of the ACM MultiMedia 2002",
+   pages=211-214,
+   year=2002
+}
+
+@InProceedings{SHIPMAN,
+   author="Frank Shipman and Andreas Girgensohn and Lynn Wilcox",
+   title="Generation of interactive multi-level video summaries",
+   journal="Proceedings of the ACM MultiMedia 2001",
+   pages=392-401,
+   year=2003
+}
+
+@InProceedings{TIECHENG,
+   author="Tiecheng Liu and John R. Kender",
+   title="Time-Constrained Dynamic Semantic Compression for Video Indexing and Interactive Searching",
+   journal="Proceedings of the 2001 IEEE Computer Society Conference on Computer Vision and Pattern Recognition",
+   volume=2,
+   pages=531-538,
+   year=2001
+}
+
+@InProceedings{MCCANNE,
+	author="S. McCanne and V. Jacobson and M. Vetterli",
+	title="Receiver-Driven Layered Multicast",
+	journal="Proceedings of the ACM SGCOMM Conference",
+	place="Stanford, Ca. USA",
+	month="August",
+	date="26-30",
+	year=1999
+}
+
+@Article{LI,
+	author="W. Li",
+	title="Overview of the Fine Granularity Scalability in MPEG-4 Video Standard",
+	journal="IEEE Transactions on Circuits and Systems for Video Technology",
+	volume=11,
+	number=3,
+	pages=301-317,
+	Month=March,
+	year=2001
+}
+%% ------------------------------------------------------------------- %%
+%% Shared control
+%% ------------------------------------------------------------------- %%
+
+% collaborative browsing
+@InProceedings{CAPPS,
+   author="Michael Capps and Brian Laddi and David Stotts and Lars Nyland",
+   title="Educational applications of multi-client synchronization through improved Web graph semantics",
+   journal="5th International Workshops on Enabling Technologies: Infrastructure for Collaborative Enterprises",
+   year=1996
+}
+
+@InProceedings{LIAO,
+   author="Chunyuan Liao and Qiong Liu and Don Kimber and Patrick Chiu and Jonathan Foote and Lynn Wilcox",
+   title="Shared interactive video for teleconferencing",
+   journal="Proceedings of the ACM MultiMedia 2003",
+   pages=546-554,
+   year=2003
+}
+
+% collaborative browsing
+@InProceedings{LIEBERMAN,
+   author="Henry Lieberman and Neil Van Dyke and Adriana Vivacqua",
+   title="Let's Browse: A Collaborative Web Browsing Agent",
+   journal="Proceedings of the 1999 International Conference on Intelligent User Interfaces",
+   year=1999
+}
+
+% collaborative browsing
+@InProceedings{SIDLER,
+   author="Gabriel Sidler and Adrew Scott and Heiner Wolf",
+   title="Collaborative Browsing in the World Wide Web",
+   journal="8th Joint European Networking Conference Proceedings",
+   year=1997
+}
+
+@InProceedings{STENZLER,
+   author="Michael K. Stenzler and Richard R. Eckert",
+   title="Interactive Video",
+   journal="ACM Special Interest Group on Computer-Human Interaction",
+   volume=28:2,
+   year=April 1996
+}
+
+@Article{WALTER,
+   author="Thomas Walter and Lukas Ruf and Bernhard Plattner",
+   title="Easy Teach and Learn: A Web-Based Adaptive Middleware for Creating Virtual Classrooms",
+   journal="European High Performance Computing and Networking",
+   year=2000
+}
+
+@InProceedings{VOGEL,
+   author="Jürgen Vogel and Martin Mauve",
+   title="Consistency control for distributed interactive media",
+   journal="Proceedings of the ACM MultiMedia 2001",
+   pages=221-230,
+   year=2001
+}
+
+%% ------------------------------------------------------------------- %%
+%% Synchronization schemes
+%% ------------------------------------------------------------------- %%
+
+@InProceedings{LIENHART,
+   author="Rainer Lienhart and  Igor Kozintsev and Stefan Wehr",
+   title="Universal synchronization scheme for distributed audio-video capture on heterogeneous computing platforms",
+   journal="Proceedings of the ACM MultiMedia 2003",
+   pages=263-266,
+   year=2003
+}
+
+@InProceedings{MIMAZE,
+	author = "Laurent Gautier and Christophe Diot",
+	title = "Design and Evaluation of MiMaze, a Multi-Player Game on the Internet",
+    	booktitle = "International Conference on Multimedia Computing and Systems",
+    	pages = "233-236",
+    	year = "1998"
+}
+
+%% ------------------------------------------------------------------- %%
+%% Misc
+%% ------------------------------------------------------------------- %%
+@InProceedings{CHIME,
+   AUTHOR="Stephen E. Dossick and Gail E. Kaiser",
+   TITLE="CHIME: A Metadata-Based Distributed Software Development Environment",
+   BOOKTITLE="Joint Seventh European Software Engineering Conference and Seventh ACM SIGSOFT International Symposium on the Foundations of Software Engineering",
+   PAGES={464-475},
+   YEAR={1999}
+}
+
+% @Article{COX,
+%    AUTHOR="Andy Cox and Eric Luiijf and Ron van Kampen and Rob Ripley",
+%    TITLE="Time Synchronization Experiments",
+%    JOURNAL="UNKNOWN",
+%    YEAR=2001
+% }
+
+@INPROCEEDINGS{KIM,
+   AUTHOR="Moon Hae Kim and Eun Hwan Jo",
+   TITLE="Global Time-Based Synchronization of Real-Time Multimedia Streaming",
+   JOURNAL="The Ninth IEEE International Workshop on Object-Oriented Real-Time Dependable Systems",
+   YEAR={2003}
+}
+
+@Article{SIENA,
+  author = 	 {Antonio Carzaniga and David S. Rosenblum and Alexander L Wolf},
+  title = 	 {Design and Evaluation of a Wide-Area Event Notification Service},
+  journal = 	 {ACM Transactions on Computer Systems},
+  year = 	 2001,
+  volume = 	 19,
+  number = 	 3,
+  pages = 	 {332--383},
+  month =	 Aug,
+  url =          {http://www.cs.colorado.edu/~carzanig/papers/},
+  abstract =     {The components of a loosely-coupled system are typically
+                  designed to operate by generating and responding to
+                  asynchronous events.  An \emph{event notification
+                  service} is an application-independent
+                  infrastructure that supports the construction of
+                  event-based systems, whereby generators of events
+                  publish event notifications to the infrastructure
+                  and consumers of events subscribe with the
+                  infrastructure to receive relevant notifications.
+                  The two primary services that should be provided to
+                  components by the infrastructure are notification
+                  selection (i.e., determining which notifications
+                  match which subscriptions) and notification delivery
+                  (i.e, routing matching notifications from publishers
+                  to subscribers).  Numerous event notification
+                  services have been developed for local-area
+                  networks, generally based on a centralized server to
+                  select and deliver event notifications.  Therefore,
+                  they suffer from an inherent inability to scale to
+                  wide-area networks, such as the Internet, where the
+                  number and physical distribution of the service's
+                  clients can quickly overwhelm a centralized
+                  solution.  The critical challenge in the setting of
+                  a wide-area network is to maximize the
+                  expressiveness in the selection mechanism without
+                  sacrificing scalability in the delivery mechanism.
+
+                  This paper presents \emph{Siena}, an event
+                  notification service that we have designed and
+                  implemented to exhibit both expressiveness and
+                  scalability.  We describe the service's interface to
+                  applications, the algorithms used by networks of
+                  servers to select and deliver event notifications,
+                  and the strategies used to optimize performance.  We
+                  also present results of simulation studies that
+                  examine the scalability and performance of the
+                  service.}
+}
+
+@INPROCEEDINGS{CHEUNG,
+   AUTHOR="Shun Yan Cheung and Mostafa H. Ammar and Xue Li",
+   TITLE="On the Use of Destination Set Grouping to Improve Fairness in Multicast Video Distribution",
+   BOOKTITLE="Proceedings IEEE INFOCOM '96",
+   PAGES = {553-560},
+   YEAR=1996
+}
+
+@INPROCEEDINGS{LI,
+   AUTHOR="Xue Li and Mostafa H. Ammar and Sanjoy Paul",
+   TITLE="Video Multicast over the Internet",
+   BOOKTITLE="IEEE Network Magazine",
+   VOLUME = {13},
+   NUMBER = {2},
+   PAGES = {46-60},
+   MONTH=April,
+   YEAR=1996
+}
+
+@Article{ACM,
+	author="Gail Kaiser and Janak Parekh and Philip Gross and Giuseppe Valetto",
+	title="Retrofitting Autonomic Capabilities onto Legacy Systems",
+	journal="Journal of Cluster Computing",
+	note="to appear"
+}
+
+@InProceedings{OSTERWEIL,
+	author="A. Wise and A.G. Cass and B. Staudt Lerner and E.K. McCall and L.J. Osterweil and S.M. Sutton Jr.",
+	title="Using Little-JIL to Coordinate Agents in Software Engineering",
+	journal="Automated Software Engineering Conference",
+	month="September",
+	year=2000
+}
+
+@TechReport{VECTORS,
+   author="Suhit Gupta and Gail Kaiser",
+   title="A Virtual Environment for Collaborative Distance Learning With Video Synchronization",
+   journal="CUCS Technical Report CUCS-008-04",
+   year=2004,
+   note="\url{http://www.cs.columbia.edu/~library/TR-repository/reports/reports-2004/cucs-008-04.pdf}"
+}
+
+% NEED TO CHECK THESE OUT
+% gud: Golas, K. (2000). Guidelines for Designing Online Learning. [On-Line].
+% gud: Hill, J.R., & Raven, A. (2000). Online Learning Communities: If You Build Them, Will They Stay?. [On-Line].
+% gud: Miller, S.K. (2000). Collaboration at Warp Speed. [On-Line].
+% gud: NPTalk. (2000). Collaborative Virtual Office Tools. [On-Line].
+
+%%----------------------------------
+%% PEPPO Additions for relwork
+%% ----------------------------------
+@ARTICLE{ASP,
+	AUTHOR =	"K. Rothermel, and T. Helbig",
+	TITLE = 	"An Adaptive Protocol for Synchronizing Media Streams",
+	JOURNAL =	"Multimedia Systems",
+	VOLUME = 	{5},
+	PAGES =  	"324-336",
+	YEAR = 		"1997"
+	}
+
+@INPROCEEDINGS{Lancaster,
+	AUTHOR = 	"A. Campell and G. Coulson and F. Garcia and D. Hutchison",
+	TITLE =		"A continuous media transport and orchestration service",
+	booktitle = "{Proceedings of SIGCOMM92: Communications Architectures and Protocols}",
+    pages = "99-110",
+    year = "1992"
+    }
+
+@inproceedings{Clark92,
+    author = "D.D. Clark and S. Shenker and L. Zhang",
+    title = "Supporting Real-Time Applications in an Integrated Services Packet Network: Architecture and Mechanism",
+    booktitle = "{Proceedings of SIGCOMM92: Communications Architectures and Protocols}",
+    pages = "14-26",
+    year = "1992"
+}
+
+@ARTICLE{FSP,
+	AUTHOR = 	"J. Escobar and C. Partridge and D. Deutsch",
+	TITLE = 	"Flow synchronization protocol",
+	JOURNAL =	"IEEE Transactions on Networking",
+	year =		"1994"
+}
+
+@INPROCEEDINGS{GONZALEZ,
+	AUTHOR = 	"A. Gonzalez and H. Adbel-Wahab",
+	TITLE = 	"Lightweight Stream Synchronization Framework for Multimedia Collaborative Applications",
+	BOOKTITLE = "Proceedings of the Fifth IEEE Symposium on Computers and Communications (ISCC 2000)",
+	MONTH =		"July",
+	YEAR		"2000"
+}
+
+@INPROCEEDINGS{LIUSYNC,
+	AUTHOR =	"H. Liu and M. El Zarki",
+	TITLE =		"A Synchronization Control Scheme for Real-time Streaming Multimedia Applications",
+	BOOKTITLE = "Proceedings of Packet Video 2003"
+	CITY =		"Nantes",
+	COUNTRY = 	"France",
+	MONTH =		"April",
+	YEAR = 		"2003"
+}
+
+@INPROCEEDINGS{Concord,
+	AUTHOR = 	"N. Shivakumar and C. Sreenan and B. Narendran and P. Agarwal",
+	TITLE = 	"The Concord algorithm for synchronization of networked multimedia streams",
+	BOOKTITLE = "Proceedings of the IEEE International Conference on Multimedia Computing and Systems",
+	PAGES = 	"31-40",
+	CITY = 		"Washington",
+	COUNTRY = 	"USA",
+	YEAR = 		"1995"
+}
+
+@INPROCEEDINGS{Ferrari,
+    AUTHOR = "D. Ferrari",
+    TITLE = "Design and application of a delay jitter control scheme for packet-switching internetworks",
+    BOOKTITLE = "Proceedings of the second International Conference on Network and Operating System Support for Digital Audio and Video",
+    ADDRESS = "Heidelberg, Germany",
+    YEAR = "1991"
+    }
+
+@INPROCEEDINGS{AMS,
+    AUTHOR = "Gail Kaiser, Janak Parekh, Philip Gross and Giuseppe Valetto",
+    TITLE = "Kinesthetics eXtreme: An External Infrastructure for Monitoring Distributed Legacy Systems",
+    BOOKTITLE = "Fifth Annual International Active Middleware Workshop",
+    MONTH = "June",
+    YEAR = "2003"
+}
+
+@INPROCEEDINGS{ICSE,
+    AUTHOR = "Giuseppe Valetto and Gail Kaiser",
+    TITLE = "Using Process Technology to Control and Coordinate Software Adaptation",
+    BOOKTITLE = "International Conference on Software Engineering (ICSE 2003)",
+    MONTH = "May",
+    YEAR = "2003"
+}
+
+@INPROCEEDINGS{REFARCH,
+    AUTHOR = "Gail Kaiser and Phil Gross and Gaurav Kc and Janak Parekh and Giuseppe Valetto",
+    TITLE = "An Approach to Autonomizing Legacy Systems, in Workshop on Self-Healing, Adaptive and Self-Managed Systems",
+    BOOKTITLE = "Workshop on Self-Healing, Adaptive and Self-Managed Systems",
+    MONTH = "June",
+    YEAR = "2002"
+}
+
+@INPROCEEDINGS{CEN,
+    AUTHOR = "Jonathan Walpole and Rainer Koster and Shanwei Cen and Crispin Cowan and David Maier and Dylan McNamee and Calton Pu and David Steere and Liujin Yu",
+    TITLE = "A Player for Adaptive MPEG Video Streaming Over The Internet",
+    BOOKTITLE = {Proceedings 26th Applied Imagery Pattern Recognition Workshop AIPR-97, SPIE},
+    MONTH = {October 15-17},
+    YEAR = {1997}
+}
+
+@INPROCEEDINGS{LJIL,
+    AUTHOR = "Aaron G. Cass and Barbara Staudt Lerner and Eric K. McCall and Leon J. Osterweil and Stanley M. Sutton and Jr. and Alexander Wise",
+    TITLE = "Little-JIL/Juliette: A Process Definition Language and Interpreter",
+    BOOKTITLE = "Proceedings of the 22nd International Conference on Software Engineering (ICSE 2000)",
+    PAGES = {pp. 754-757},
+    MONTH = "June",
+    YEAR = "2000"
+}
+
+
+@INPROCEEDINGS{RAVAGES,
+    AUTHOR = "Gail Kaiser and Giuseppe Valetto",
+    TITLE = "Ravages of Time: Synchronized Multimedia for Internet-Wide Process-Centered Software Engineering Environments",
+    BOOKTITLE = "Third ICSE Workshop on Software Engineering over the Internet",
+    MONTH = "June",
+    YEAR = "2000",
+    NOTE = "position paper"
+}
+
+
+%%----------------------------------
+%% misc citations, like websites
+%% ----------------------------------
+@misc{NTP,
+   AUTHOR="David L. Mills",
+   TITLE="Network Time Protocol",
+   HOWPUBLISHED="RFC 958",
+   YEAR={1985}
+}
+
+@misc{IBM,
+   title = {International Business Machines (IBM) Research},
+   HOWPUBLISHED="\url{http://www.research.ibm.com/autonomic/}"
+}
+
+@misc{PHOENIX,
+   title = {University of Phoenix},
+   HOWPUBLISHED="\url{http://www.uopxonline.com/}"
+}
+
+@misc{CAPELLA,
+   title = {Capella University},
+   HOWPUBLISHED="\url{http://www.capella.edu/}"
+}
+
+@misc{CVN,
+   title = {Columbia Video Network},
+   HOWPUBLISHED="\url{http://http://www.cvn.columbia.edu/}"
+}
+
+
+@Misc{SCPD,
+   title="Stanford Center for Professional Development ",
+   howpublished="\url{http://scpd.stanford.edu/}"
+}
+
+
+@misc{SHAPERD,
+   author=" Leandro Santi",
+   title="A user-mode traffic shaper for tcp-ip networks.",
+   HOWPUBLISHED="\url{http://freshmeat.net/projects/shaperd/}"
+}
+
+@misc{COUGAAR,
+   title="Cognitive Agent Architecture (Cougaar) Open Source Project",
+   HOWPUBLISHED="\url{http://www.cougaar.org/}"
+}
+
+@misc{WEBEX,
+   title="WebEx: Web Conferencing, Video Conferencing and Online Meeting Services",
+   HOWPUBLISHED="\url{http://www.webex.com/}"
+}
+
+@misc{VNC,
+   title="VNC (Virtual Network Computing)",
+   HOWPUBLISHED="\url{http://www.realvnc.com/}"
 }
\ No newline at end of file
diff --git a/paper/ai2tv.tex b/paper/ai2tv.tex
index e769de4..9425117 100644
--- a/paper/ai2tv.tex
+++ b/paper/ai2tv.tex
@@ -1,995 +1,999 @@
-% $RCSfile$
-% $Revision$
-% $Date$
-% $Source$
-%
-%
-% ---------------------------------------------------------------------------
-% TODO:
-%
-% FINAL READ
-% - check for consistent tense
-% - query replace: ai2tv -> $\mathrm{AI}^2$TV
-% - spell check
-%
-%
-% ---------------------------------------------------------------------------
-% This is "sig-alternate.tex" V1.3 OCTOBER 2002
-% This file should be compiled with V1.6 of "sig-alternate.cls" OCTOBER 2002
-%
-% This example file demonstrates the use of the 'sig-alternate.cls'
-% V1.6 LaTeX2e document class file. It is for those submitting
-% articles to ACM Conference Proceedings WHO DO NOT WISH TO
-% STRICTLY ADHERE TO THE SIGS (PUBS-BOARD-ENDORSED) STYLE.
-% The 'sig-alternate.cls' file will produce a similar-looking,
-% albeit, 'tighter' paper resulting in, invariably, fewer pages.
-%
-% ---------------------------------------------------------------------------
-% This .tex file (and associated .cls V1.6) produces:
-%       1) The Permission Statement
-%       2) The Conference (location) Info information
-%       3) The Copyright Line with ACM data
-%       4) NO page numbers
-%
-% as against the acm_proc_article-sp.cls file which
-% DOES NOT produce 1) thru' 3) above.
-%
-% Using 'sig-alternate.cls' you have control, however, from within
-% the source .tex file, over both the CopyrightYear
-% (defaulted to 2002) and the ACM Copyright Data
-% (defaulted to X-XXXXX-XX-X/XX/XX).
-% e.g.
-% \CopyrightYear{2003} will cause 2002 to appear in the copyright line.
-% \crdata{0-12345-67-8/90/12} will cause 0-12345-67-8/90/12 to appear in the
-%  copyright line.
-%
-% ---------------------------------------------------------------------------
-% This .tex source is an example which *does* use
-% the .bib file (from which the .bbl file % is produced).
-% REMEMBER HOWEVER: After having produced the .bbl file,
-% and prior to final submission, you *NEED* to 'insert'
-% your .bbl file into your source .tex file so as to provide
-% ONE 'self-contained' source file.
-%
-% ================= IF YOU HAVE QUESTIONS =======================
-% Questions regarding the SIGS styles, SIGS policies and
-% procedures, Conferences etc. should be sent to
-% Adrienne Griscti (griscti@acm.org)
-%
-% Technical questions _only_ to
-% Gerald Murray (murray@acm.org)
-% ===============================================================
-%
-% For tracking purposes - this is V1.3 - OCTOBER 2002
-\documentclass{sig-alternate}
-\usepackage{url}
-
-\begin{document}
-\usepackage{url}
-%
-% --- Author Metadata here ---
-\conferenceinfo{ACM-MM 2004}{New York, NY USA}
-%\CopyrightYear{2001}
-% Allows default copyright year (2000) to be over-ridden - IF NEED BE.
-
-%\crdata{0-12345-67-8/90/01}
-% Allows default copyright data (0-89791-88-6/97/05) to be over-ridden - IF NEED BE.
-% --- End of Author Metadata ---
-
-% \title{Optimizing Quality for Video Sharing in Synchronous Collaboration}
-\title{Optimizing Quality for Collaborative Video Viewing}
-%
-% You need the command \numberofauthors to handle the "boxing"
-% and alignment of the authors under the title, and to add
-% a section for authors number 4 through n.
-%
-% Up to the first three authors are aligned under the title;
-% use the \alignauthor commands below to handle those names
-% and affiliations. Add names, affiliations, addresses for
-% additional authors as the argument to \additionalauthors;
-% these will be set for you without further effort on your
-% part as the last section in the body of your article BEFORE
-% References or any Appendices.
-
-\numberofauthors{4}
-%
-% You can go ahead and credit authors number 4+ here;
-% their names will appear in a section called
-% "Additional Authors" just before the Appendices
-% (if there are any) or Bibliography (if there
-% aren't)
-
-% Put no more than the first THREE authors in the \author command
-\author{
-%
-% The command \alignauthor (no curly braces needed) should
-% precede each author name, affiliation/snail-mail address and
-% e-mail address. Additionally, tag each line of
-% affiliation/address with \affaddr, and tag the
-%% e-mail address with \email.
-\alignauthor Dan Phung\\
-       \affaddr{Computer Science Department}\\
-       \affaddr{Columbia University}\\
-       \affaddr{New York City, New York}\\
-       \email{phung@cs.columbia.edu}
-\alignauthor Giuseppe Valetto\\
-       \affaddr{Computer Science Department}\\
-       \affaddr{Columbia University}\\
-       \affaddr{New York City, New York}\\
-       \affaddr{and Telecom Italia Lab}\\
-       \affaddr{Turin, Italy}
-       \email{valetto@cs.columbia.edu}
-\alignauthor Gail Kaiser \\
-       \affaddr{Computer Science Department}\\
-       \affaddr{Columbia University}\\
-       \affaddr{New York City, New York}\\
-       \email{kaiser@cs.columbia.edu}
-}
-\additionalauthors{Additional authors: Suhit Gupta {\texttt{suhit@columbia.cs.edu}}}
-\date{\parbox[b][0ex]{0em}{\hspace*{-12.5em}\raisebox{37ex}{\fbox{For
-submission to \emph{ACM-MM 2004}, due 12:00 AM EDT: April 05, 2004.}}}}
-% \date{05 April 2004}
-\maketitle
-
-\begin{abstract}
-The increasing popularity of distance learning and online courses has
-highlighted the lack of collaborative tools for student groups.  In
-addition, the introduction of lecture videos into the online
-curriculum has drawn attention to the disparity in the network
-resources used by the students.  We present an architecture and
-adaptation model called $\mathrm{AI}^2$TV (Adaptive Internet
-Interactive Team Video), a system that allows geographically dispersed
-participants, possibly some or all disadvantaged in network resources,
-to collaboratively view a video in synchrony.  $\mathrm{AI}^2$TV
-upholds the invariant that each participant will view semantically
-equivalent content at all times. Video player actions, like play,
-pause and stop, can be initiated by any of the participants and the
-results of those actions are seen by all the members.  These features
-allow group members to review a lecture video in tandem to facilitate
-the learning process.  We employ an autonomic (feedback loop)
-controller that monitors clients' video status and adjusts the quality
-of the video according to the resources of each client.  We show in
-experimental trials that our system can successfully synchronize video
-for distributed clients while at the same time optimizing the video
-quality, given actual (fluctuating) bandwidth, by adaptively adjusting
-the quality level for each participant.
-\end{abstract}
-
-% A category with the (minimum) three required fields
-\category{C.2.4}{Distributed Systems}{Client/server, Distributed applications}
-\category{D.2.8}{Software Engineering}{Metrics -- performance measures}
-\category{H.5.1}{Information Interfaces and Presentation}{Multimedia Information Systems}
-\category{H.5.3}{Group and Organization Interfaces}{Computer-\\supported cooperative work, Synchronous interaction}
-\category{K.3.1}{Computer Uses In Education}{Collaborative learning, Distance learning}
-
-\terms{ALGORITHMS, MEASUREMENT, PERFORMANCE, EXPERIMENTATION, HUMAN
-FACTORS}
-
-\keywords{Synchronized Collaborative Video, Autonomic Controller}
-
-% -------------------------------------------------- %
-% FIGURES AT THE FRONT
-% -------------------------------------------------- %
-%% \begin{figure}
-%%   \centering
-%%   \epsfig{file=vidframes.eps, width=8cm}
-%%   \caption{Semantic video compression hierarchy.}
-%%   \label{vidframes}
-%% \end{figure}
-
-%% \begin{figure}
-%%   \centering
-%%   \epsfig{file=vidframes.eps, width=8cm}
-%%   \caption{Semantic video compression hierarchy.}
-%%   \label{vidframes}
-%% \end{figure}
-
-%% \begin{figure}
-%%   \centering
-%%   \epsfig{file=ai2tvarch.eps, width=8cm}
-%%   \caption{ai2tv Architecture}
-%%   \label{ai2tv_arch}
-%% \end{figure}
-
-
-%% \begin{figure}
-%%  \centering
-%%  \epsfig{file=refarch.eps, width=8cm}
-%%   \label{refarch}
-%%  \caption{Conceptual Reference Architecture}
-%% \end{figure}
-
-
-%% \begin{figure}
-%%   \centering
-%%   \hspace*{-5mm}
-%%   \epsfig{file=ljil.eps, width=8cm}
-%%   \caption{Workflow diagram }
-%%   \label{ljil}
-%% \end{figure}
-
-
-% -------------------------------------------------- %
-
-
-% tech report number CUCS-009-04
-\section{Introduction}
-
-Distance learning programs such as the Columbia Video Network have
-evolved from fedexing lecture video tapes to their off-campus students
-to instead streaming the videos over the Internet, which is also done
-by the Stanford Center for Professional Development.  The lectures are
-sometimes delivered ``live'', while in progress on campus, but
-frequently are post-processed and packaged for students to watch (and
-re-watch) at their convenience.  This introduces the possibility of
-forming ``study groups'' among off-campus students who view the
-lecture videos together, and pause the video for discussion when
-desired, thus approximating the pedagogically valuable discussions of
-on-campus students.  Although the instructor is probably not available
-for these discussions, this may be an advantage, since on-campus
-students are rarely afforded the opportunity to pause, rewind and
-fast-forward their instructors' lectures.
-
-However, what we call {\em collaborative video viewing} by multiple
-geographically dispersed users is not yet supported by conventional
-Internet-video technology.  It is particularly challenging to support
-WISIWYS (what I see is what you see) when some of the users are
-relatively disadvantaged with respect to bandwidth (e.g., dial-up
-modems) and local computer resources (e.g., archaic graphics cards,
-small disks).  We have adopted technology (developed by others, Liu
-and Kender \cite{TIECHENG}) for ``semantically compressing'' MPEG2
-videos into sequences of still JPG images.  This technology
-automatically selects the most semantically meaningful frames to show
-for each time epoch, and can generate different sequences of JPG
-images for a range of different compression (bandwidth) levels.  This
-approach works very well for typical lecture videos, where it is
-important, for instance, to see what the instructor has written on the
-blackboard after he/she stands aside, but probably not so important to
-see the instructor actually doing the writing, when his/her hand and
-body may partially occlude the blackboard.
-
-The remaining technical challenge is {\em synchronizing} the
-downloading and display of the image sequences among each of the
-distributed user clients, including support for shared video player
-actions such as pause, rewind, etc.  Further, if student groups do
-indeed sometimes pause the videos, or rewind to a point already
-available in local buffers (caches), it is desirable to take advantage
-of the then-idle network connection to prefetch future images at a
-higher quality level.
-
-We have developed an approach to achieving this, using three
-mechanisms working in tandem.  First, the software clocks of the video
-clients are synchronized using NTP.  This time is used for reference
-within the image sequences, where each image is associated with its
-start and end times relative to the beginning of the sequence.
-Second, the video clients communicate with each other over a
-distributed publish-subscribe event bus, which propagates video
-actions taken by one user in the group to all the other users in the
-group.  Thus any user can select a video action, not just a
-``leader''.
-
-Finally, the main innovation of this research concerns optimizing
-video quality in this context: A decentralized feedback control loop
-dynamically adjusts each video client's choice of both next image to
-display and also next image to retrieve from the semantic compression
-levels available.  The controller relies on sensors embedded in each
-client to periodically check what image is currently displaying,
-whether this image is ``correct'' for the current NTP time compared to
-what other clients are viewing, which images have already been
-buffered (cached) at that client, and what is the actual bandwidth
-recently perceived at that client.  Actuators are also inserted into
-the video clients, to modify local configuration parameters on
-controller command. The controller utilizes detailed information about
-the image sequences available at the video server, including image
-start and stop times (both the individual images and their start and
-stop times tend to be different at different compression levels), but
-unlike local client data, video server data is unlikely to change
-while the video is showing.  A single controller is used for all
-clients in the same user group, so it can detect ``skew'' across
-multiple clients, and may reside on the video server or on another
-host on the Internet.
-
-In the next section, we further motivate the collaborative video
-viewing problem, provide background on the semantically compressed
-video repository, and explain the technical difficulties of optimizing
-quality while synchronizing such semantically compressed videos. The
-following section presents our architecture and dynamic adaptation
-model, and its implementation in $\mathrm{AI}^2$TV (Adaptive
-Interactive Internet Team Video).  In the Evaluation section, we
-describe the criteria used to evaluate the effectiveness of our
-approach, and show empirical results obtained when applied to real
-lecture videos distributed for a recent Columbia Video Network
-course. We compare to related work, and then summarize our
-contributions.
-
-\section{Motivation and Background} \label{background}
-% - discuss other projects on multiple client synchronization
-The increasing popularity of online universities has prompted many
-universities to begin providing online certification tracks that use
-pre-taped on-campus lectures.  Traditionally, online courses are
-provided through a Web portal interface, such as those at the
-University of Phoenix or Capella University, which are designed for
-individual students with a self-motivated learning style.
-Contrastly, in many on-campus courses, students are encouraged to
-study in groups in order to better learn the material.  For off-campus
-students viewing the pre-taped lecture, this practice is difficult
-because of their separation by geographic distance.  Support of
-synchronous collaboration remains a major concern in courses where
-group work is encouraged \cite{WELLS}, yet there are few educational
-tools that allow synchronous collaboration across a group of online
-students \cite{BURGESS}.
-
-There are some available tools that will allow online collaboration,
-though they may not be geared specifically for educational purposes.
-Some of these tools use asynchronous forms of communication, such as
-bulletin boards and discussion groups.  Others tools, such as instant
-messaging, application and desktop sharing \cite{WEBEX, VNC}, and
-co-browsing \cite{CAPPS, LIEBERMAN, SIDLER} facilitate the
-communicative aspects of synchronous collaboration but are not
-designed specifically for educational purposes thus can be cumbersome
-when used as such.
-
-% - ai2tv project goals
-$\mathrm{AI}^2$TV contributes to the area of synchronous collaboration
-support, and more specifically, to collaborative video sharing.  The
-work presented in this paper is a part of a larger effort to provide a
-collaborative virtual environment for distributed team study.  That
-project's goal is to provide support for the collaborative use of
-multimedia content relevant to the work carried out by a team of users
-involved in the same project, such as audio/video recordings of
-lectures, meetings, workshops and other informational and educational
-events.  In this paper, we focus on the aspect of video sharing for
-synchronous collaboration.
-
-Viewing video on the Internet usually requires relatively high
-bandwidth resources and lossy network connections can lead to lost
-video content.  For group review of lecture videos, the lost content
-may severely disrupt the progression of the group.  Furthermore, the
-differences in network and computing resources available to dispersed
-users in the same collaboration group can prevent some students from
-participating.  Collaborative video sharing poses a twofold problem:
-on the one hand, it is mandatory to keep all users synchronized with
-respect to the content they are supposed to see at any moment during
-play time; on the other hand, it is important to provide each
-individual user with a frame rate that is optimized with respect to
-the user's available resources, which may vary during the course of
-the video.
-
-One solution to the problem of balancing the group synchronization
-requirement with the optimization of individual viewing experiences is
-to use videos with cumulative layering \cite{MCCANNE}, also known as
-scalable coding \cite{LI}.  In this approach, the client video player
-selects a quality level appropriate for that client's resources from a
-hierarchy of several different encodings or frame rates for that
-video. Thus a client could receive an appropriate quality of video
-content while staying in sync with the other members of the group.
-
-% - describe overview of semantic compression tool used
-In $\mathrm{AI}^2$TV, we use semantic summarization to produce a video
-with cumulative layering.  The semantic summarization package
-developed at Columbia University by Liu and Kender \cite{TIECHENG}
-reduces a video to a set of semantically significant key frames.  That
-tool operates on MPEG format videos and outputs sequences of JPG
-frames, some of which are displayed in figure \ref{sem_video}.  Its
-semantic compression algorithm profiles video frames within a sliding
-time window and selects key frames that have the most semantic
-information.  By increasing the size of the window, a key frame will
-represent a larger time slice, which means that a larger window size
-will produce less key frames as compared to a smaller window size
-setting.
-
-\begin{figure}
-  \centering
-  \epsfig{file=vidframes.eps, width=8cm}
-  \caption{Semantic Video Scenario}
-  \label{sem_video}
-\end{figure}
-
-A conceptual diagram of a layered video produced from semantic
-compression is shown in figure \ref{sem_video}.  Note that the
-semantic compression algorithm produces a random distribution of key
-frames, hence the video produced by the package plays back at a
-variable frame rate.  The variability in the frame rate implies that
-there are pockets of relatively high frequency semantic change that
-result in sections in the video that demand a higher frame rate.  The
-variable frame rate video adds complexity to the bandwidth demands of
-the client yet its semantic focus ensures that relevant content is
-unlikely to get lost, which is a significant property in the context
-of an educational application.
-
-Also, in figure \ref{sem_video}, the bottom-left in-set shows the
-juxtaposition of individual frames from two different quality levels.
-Each frame has a representative time interval \texttt{[start:end]}.
-For the higher level, Frame 1a represents the interval from 1:00 to
-1:03, and Frame 1b represents the interval from 1:04 to 1:10.  For the
-lower level, Frame 2 represents the entire interval from 1:00 to 1:10.
-In this diagram, Frame 2 is semantically equivalent to Frame 1a and
-1b, though in reality, the start and end times would not match up as
-ideally as our example.
-
-Through the use of the $\mathrm{AI}^2$TV video, we can provide
-semantically equivalent content to several clients with diverse
-resources by adjusting the compression level assigned to each client
-while the user is watching the video.  Thus for our purposes,
-synchronization of video boils down to showing semantically equivalent
-frames for a given time.
-
-To adjust the clients in response to the changing environment, we use
-an autonomic controller to maintain the synchronization of the group
-of video clients while fine tuning the video quality for each client.
-In \cite{RAVAGES}, we proposed the idea of using an autonomic
-controller to support group video synchronization and other multimedia
-applications.
-
-The autonomic controller remains conceptually separate from the
-controlled $\mathrm{AI}^2$TV video system and employs a software based
-workflow engine, named Workflakes \cite{ICSE}.  Note that the workflow
-used here coordinates the behavior of software entities, as opposed to
-human-oriented workflow systems.  The use of software based workflow
-for the specification and enactment of the plan that coordinates
-actuators is taken from Wise at al. \cite{OSTERWEIL} among others.
-The Workflakes engine has been developed for and used in a variety of
-domains \cite{AMS,ICSE}, in which it orchestrates the work of software
-entities to achieve the fully automated dynamic adaptation of
-distributed applications.  The design of the autonomic controller is a
-part of an externalized autonomic computing platform proposed by
-Kaiser \cite{REFARCH}.  In the context of $\mathrm{AI}^2$TV,
-Workflakes coordinates the adjustment of the compression level
-assigned to each client along the hierarchy of the $\mathrm{AI}^2$TV
-video.
-
-% (FIGURE: semantic compression )
-% (FIGURE: key frames hierarchy )
-
-\section{Architecture and Adaptation\\ Model}
-\subsection{System Architecture}
-% Design of a the system in general
-Our system involves several major components: a video server, video
-clients, an externalized autonomic controller and a common
-communications infrastructure, as seen in figure \ref{ai2tv_arch}
-
-\begin{figure}
-  \centering
-  \epsfig{file=ai2tvarch.eps, width=8cm}
-  \caption{$\mathrm{AI}^2$TV Architecture}
-  \label{ai2tv_arch}
-\end{figure}
-
-%(FIGURE: ai2tv synchronization arch)
-% video server
-The video server provides the educational video content to the clients
-for viewing.  The provided content has the form of an
-$\mathrm{AI}^2$TV video, i.e., a hierarchy of video versions produced
-by running the tool multiple times with settings for different
-compression levels, which produces several sets of JPG frames that are
-indexed by a frame index file.  The task of the video server is simply
-to provide remote download access to the frames and the index file
-over HTTP.
-
-% video client
-The task of video clients is to acquire video frames, display them at
-the correct time, and provide a set of basic video functions.  Taking
-a functional design perspective, the client is composed of three major
-modules: a video display, a video buffer and manager for fetching and
-storing downloaded frames, and a time controller.
-
-The video display renders the JPG frames into a window for display and
-provides a user interface for play, pause, goto, and stop.  When any
-participant initiates one of these actions, all the other group
-members receive the same command, thus all the video player actions
-are synchronized.  The video display knows which frame to display by
-using the current video time and display quality level to index into
-the frame index for the representative frame.  Before trying to render
-the frame, it asks the video buffer manager if the needed frame is
-available.  The video display also includes a control entity that
-enables external entities, like the autonomic controller, to adjust
-the current display quality level.
-
-The video buffer and manager constitute the downloading daemon that
-continuously downloads frames at a certain level.  It keeps a hash of
-the available frames and a count of the current reserve frames (frames
-buffered) for each quality level.  The buffer manager also includes a
-control hook that enables external entities to adjust the current
-downloading quality level.
-
-The time controller's task is to ensure that a common video clock is
-maintained across clients.  It relies on NTP \cite{NTP} to synchronize
-the system's software clock therefore ensuring a common time base from
-which each client can reference for the video clock.  The task of each
-then is to play the frames at the correct time and since all the
-clients refer to the same time base, then all the clients are showing
-semantically equivalent frames.
-
-% autonomic controller
-The task of the autonomic controller is to ensure that the clients
-within a video session stay synchronized and that each client plays at
-its highest attainable quality level.  The controller is a distributed
-system, whose design derives from a conceptual reference architecture
-for externalized autonomic computing platforms proposed by Kaiser
-\cite{REFARCH}, which is shown in figure \ref{refarch}. The
-architecture provides an end-to-end closed control loop, in which
-sensors attached to a generic (possibly legacy) target system and
-continuously collect and send streams of data to gauges.  The gauges
-analyze the incoming data streams and respond to conditions that need
-adaptation by relaying that information to controllers.  The
-controllers coordinate the expression and orchestration of the
-workflow needed to carry out the adaptation.  To close the loop,
-actuators at the target system effect the needed adjustments under the
-supervision of the controller.
-
-%
-%(figure of ref arch here).
-%
-
-\begin{figure}
- \centering
- \epsfig{file=refarch.eps, width=8cm}
-  \label{refarch}
- \caption{Conceptual Reference Architecture}
-\end{figure}
-
-
-The sensors provide the autonomic controller with data about the
-clients such as video display quality level, the buffer quality level,
-the buffer reserve frames, the currently displayed frame and the
-current bandwidth.  Gauges are embedded together with the coordination
-engine for expediency of design and to minimize the communication
-latency to it.  They receive the sensor reports from individual
-clients, collect them in buckets, similar to the approach in
-\cite{MIMAZE}, and pass the bucket data structure to the coordination
-engine.  The coordination engine directs the flow of the information
-through a predefined workflow plan described in the next section.
-
-During the evaluation of the data, a set of helper functions that are
-tailored specifically for the application are used to produce the
-triggers for the coordinator.  If a trigger is raised, the
-coordination engine enacts an adaptation scheme which is executed on
-the end hosts by hooks provided to the actuators by the end systems.
-
-% communications
-The infrastructure used for the communications among the video
-clients, as well as between the $\mathrm{AI}^2$TV system and the
-autonomic controller is provided by an event bus based on the
-publish/subscribe paradigm.  The reason for choosing this
-communication model is that it inherently decouples the physical
-location of the communicating entities.  Events transmitted onto that
-event bus are of three kinds: video player actions, sensor reports and
-adaptation directives (see figure \ref{ai2tv_arch}.  Video player
-actions pertain to the functionality of the $\mathrm{AI}^2$TV system,
-since they represent commands issued on a video client, such as pause,
-play or stop, which need to be propagated to all clients in the group
-to enforce the same behavior.  All video player actions are time
-stamped so that clients can respond to those commands in reference to
-the common time base.
-
-\subsection{Adaptation Model}
-
-The adaptation scheme falls into two levels: a higher level data flow,
-and a lower level adjustment heuristic.  The former directs the flow
-of data through a logical sequence to provide a formal decision
-process while the latter provides the criteria as to when to make
-certain adjustments.
-
-The higher level logic is shown in figure \ref{ljil}, according to the
-Little-JIL graphic workflow specification language \cite{LJIL}.  The
-diagram shows the task decomposition hierarchy according to which the
-adaptation workflow unfolds.  Note that the evaluation of clients'
-state with respect to the group (\texttt{EvaluateClient}) and the
-issuing of adaptation directives (\texttt{AdaptClient}) is carried out
-as a set of the parallel steps.  Also note that the multiplicity of
-those parallel steps is dynamically determined via the number of
-entries in the \texttt{client} variable, which maps to a collection of
-$\mathrm{AI}^2$TV clients.
-
-%
-%add Figure with AI2TV workflow diagram here
-%
-
-\begin{figure}
-  \centering
-  \hspace*{-5mm}
-  \epsfig{file=ljil.eps, width=8cm}
-  \caption{$\mathrm{AI}^2$TV Workflow diagram }
-  \label{ljil}
-\end{figure}
-
-The adaptation scheme at the lower level falls into two categories:
-directives that adjust the client in response to relatively low
-bandwidth situations, and those that take advantage of relatively high
-bandwidth situations.
-
-In the situation where a client has relatively low bandwidth, the
-client may not be able download next frame at the same quality level
-in time.  This situation will merit that both the client and the
-buffer quality levels are reduced one level. In the case in which the
-client is already at the lowest level, the controller will calculate
-the next possible frame that it can successfully complete in time - in
-order to remain synchronized with the rest of the team - and will ask
-the client to jump ahead to that frame.
-
-To take advantage of relatively high bandwidth situations, the buffer
-manager will start to accumulate a reserve buffer.  Once the buffer
-reaches a threshold value (for example, 10 buffered frames), the
-autonomic controller will direct the buffer manager to start fetching
-frames a higher quality level.  Once a sufficient reserve is
-accumulated also at that higher level, the client is then ordered to
-display frames at that quality level.  If the bandwidth drops before
-the buffer manager controller can accumulate enough frames in the
-higher-level reserve, then the buffer manager is dropped back down one
-level.
-
-%\section{Implementation} \label{implementation}
-
-The video client is implemented in Java. It naively uses the
-javax.swing package to render the JPG images.  The autonomic
-controller, Workflakes, is also Java-based, and is built on top of the
-open-source Cougaar multi-agent system \cite{COUGAAR}, which
-Workflakes adapts to operate as a decentralized workflow engine.  We
-used the Little-JIL graphic workflow specification language to produce
-the workflow used \cite{LJIL}.  We chose a content-based
-publish-subscribe event system, Siena, as our communication bus
-\cite{SIENA}.
-
-\comment{how many lines of code?}
-
-\section{Evaluation} \label{eval}
-
-We assess our system by evaluating its ability to synchronize the
-clients and its ability to adjust the clients video quality.  The
-evaluation results presented in this section were computed from a set
-of client configurations, specifically 1, 2, 3, and 5 clients running
-a video for 5 minutes and probing system state every 5 seconds. The
-compression hierarchy we employed has 5 different levels.
-
-For our evaluation, we define a baseline client against which the
-performance of our approach can be compared.  A baseline client is a
-client whose quality level is set at the beginning of the video and
-not changed thereafter.  To define the baseline client, we use a value
-that we identify as the average bandwidth per level. This value is
-computed by summing the total size in bytes of all frames produced at
-a certain compression level and dividing by the total video time.
-This value provides the bandwidth needed on average for the buffer
-controller to download the next frame on time.  We provide the
-baseline client with the needed bandwidth for its chosen level by
-using a bandwidth throttling tool (\cite{SHAPERD}) to adjust the
-bandwidth to that client from the video server.  Note that using the
-average as the baseline does not account for changes in the video
-frame rate and fluctuations in network bandwidth, which are situations
-in which adaptive control can make a difference.
-
-When carrying out the evaluation, each controller-assisted client is
-assigned an initial level in the compression hierarchy and the same
-bandwidth as the baseline client for that hierarchy level.  At the end
-of each experiment, we record any differences resulting from the
-adaptation of the clients' behavior on the part of the autonomic
-controller and the behavior of the baseline client, with respect to
-synchrony and quality of service (frame rate).
-
-%% To evaluate our system, we produced an $\mathrm{AI}^2$TV video that had 5 quality
-%% levels.  For a 17 minute video and five different window lengths, the
-%% total number of frames are 165, 71, 39, 21, and 13.  Our choice of the
-%% relatively low frame rate quality levels was influenced by the goal of
-%% the system being used by clients with low bandwidth resources.
-
-% the pathetic average frame rates (per minute!!!):
-%% 3.399831413 - high
-%% 1.46295776
-%% 0.806289939
-%% 0.434237734
-%% 0.268763313 - low
-
-\textit{Evaluating Synchrony}
-
-A major goal of the system is to provide synchronous viewing to all
-clients.  To measure the effectiveness of the synchrony, we probe the
-clients at periodic time intervals and log the frame currently being
-displayed.  This procedure effectively takes a snapshot of the system,
-which we can evaluate for correctness.  This evaluation proceeds by
-checking whether the frame being displayed at a certain time
-corresponds to one of the valid frames at that time, on any arbitrary
-level.  We allow any arbitrary level because the semantic compression
-algorithm ensures that all frames at a certain time will contain the
-same semantic information if the semantic windows overlap.  We score
-the system by summing the number of clients not showing an acceptable
-frame and normalizing over the total number of clients.  A score of 0
-indicates a synchronized system.
-
-Our experiments for the evaluation of synchronization initially
-involved groups of clients that were set to begin playing the test
-video at different levels in the compression hierarchy, and were
-assigned the corresponding baseline bandwidth. In those experiments,
-the results show a total score of 0 for all trials. Also,
-notwithstanding the variations in the frame rate and/or occasional
-fluctuations in the actual bandwidth of the clients, no frames were
-missed.  This result demonstrates that the chosen baseline
-combinations of compression levels and throttled bandwidths do not
-push the clients beyond their bandwidth resource capacity.
-
-We also ran another set of experiments, in which the clients in the
-group were assigned more casually selected levels of starting
-bandwidths.  This casual selection is representative of a real world
-situation, like listening to Internet radio, where users must choose a
-desired frame rate to receive.  The user may have been informed that
-she is allocated a level of bandwidth from her Internet service
-provider, but she may actually be receiving a significantly lower
-rate.  We ran experiments first without the aid of the autonomic
-controller and then with it. In the former case, clients with
-insufficient bandwidth were stuck at the compression level originally
-selected, and thus missed an average of 63\% of the needed frames.  In
-the latter case, the same clients only missed 35\% of the needed
-frames.  These results provide evidence of the benefits of the
-adaptive scheme implemented by the autonomic controller.
-
-%% selected, and thus only displayed an average of 37\% of the needed
-%% frames.  In the latter case, the same clients received 65\% of the
-%% needed frames.  These results provide evidence of the benefits of the
-%% adaptive scheme implemented by the autonomic controller.
-
-
-%% \begin{figure}
-%%   \centering
-%%   \hspace*{-5mm}
-%%   \epsfig{file=scores.eps, width=9cm}
-%%   \caption{Comparison of weighted scores}
-%%   \label{scores}
-%% \end{figure}
-
-\textit{Evaluating Quality of Service}
-
-A primary goal of the $\mathrm{AI}^2$TV system is to increase the
-video quality for the clients.  With respect to the evaluation of
-video quality of services, Liu et. al describes several local metrics
-such as frame rate, loss rate, delay jitter, image resolution, and
-human spatial-temporal contrast-sensitivity \cite{LIU2003}.  We do not
-address global metrics such as fairness, as described in
-\cite{LIU2003}.  For our situation, we focus on frame rate as a
-measure of video quality.
-
-To attain a quantitative measure of the quality of service provided by
-a client assisted by the autonomic controller, we use a scoring system
-relative to the baseline client's quality level.  We give a weighted
-score for each level above or below the baseline quality level.  The
-weighted score is calculated as the ratio of the frame rate of the two
-levels.  So, for example, if a client is able to play at one level
-higher then the baseline, and the baseline plays at an average
-\texttt{n} fps while the level higher plays at \texttt{2*n} fps, the
-given score for playing at the higher level is 2.  The weighted score
-is calculated between the computed average frame rates of the chosen
-quality levels.  Theoretically, the baseline client should receive a
-score of 1.  Note that we formulated this scoring system because other
-scoring systems \cite{BAQAI,CORTE,CONWAY2000} measure unrelated
-factors such as the synchronization between different streams (audio
-and video), image resolution, or human perceived quality, and are not
-restricted by the group synchronization requirement.  This restriction
-mandates that a scoring system be sensitive to the relative
-differences between quality hierarchies.
-
-% qos results
-The evaluation of the quality of service experiments shows that the
-baseline clients scored a group score of 1 (as expected) while the
-clients assisted by the autonomic controller scored a group score of
-1.25.  The one-tailed t-score of this difference is 3.01 which is
-significant for an $\alpha$ value of .005 (N=17).  This result
-demonstrates that using the autonomic controller, we are able to
-achieve a significant positive difference in the quality of services.
-Note that the t-score does not measure the degree of the positive
-difference achieved by the autonomic controller.  To demonstrate the
-degree of benefit of using the autonomic controller, we measure the
-proportion of additional frames that each client maintained by the
-controller is able to enjoy.  We found that overall, those clients
-received 20.4\% ($\pm$ 9.7, N=17) more frames then the clients
-operating at a baseline rate.
-
-% risk assessment
-The act of running the client close to or at a level higher than the
-average bandwidth needed puts the client at risk for missing more
-frames because the autonomic controller is trying to push the client
-to a better but more resource-demanding level.  To measure whether the
-controller-assisted client is exposed to a higher risk of missing
-frames we also count the number of missed frames during a video
-session.  The scoring of the missed frame is a simple count of the
-missed frames.  Note that the scoring of the missed frame is kept
-separate from the measure of the relative quality to discriminate
-between levels of concern, though they both indicate a characteristic
-of quality of service.
-
-In this assessment of the risk of optimizing the frame rate, we found
-that there was only one instance in which a controller-assisted client
-missed two consecutive frames.  Upon closer inspection, the time
-region during this event showed that the video demanded a higher frame
-rate while the network bandwidth assigned to that client was
-relatively low.  The client was able to consistently maintain a high
-video quality level after this time.
-
-% calculation used for the 20% number I got up there.
-% baselineFrames = number of frames base client gets
-% wfFrames = number of frames the wf client gets
-% (wfFrames - baselineFrames) / baselineFrames = proportion of frames higher
-%                                                then the baseline client
-
-Though in some cases, using this system without the autonomic
-controller may be sufficient, in most cases the network bandwidth may
-vary and the variable frame rate of the video do not permit the client
-to make an informed decision about the most appropriate quality level
-for the next frames.  In addition, an application that does not adjust
-its quality level to current bandwidth resources will not be able to
-offer a level of quality appropriate to the client's resources.  To
-address these issues, the autonomic controller provides an additional
-adaptive element to the clients.  We show in these experiments that
-the autonomic controller makes a significant positive difference in
-aiding the client in achieving a higher quality level.
-
-\section{Related Work} \label{related}
-Stream synchronization is a widely studied topic in multimedia
-research.  Some classifications of synchronization schemes are whether
-the scheme is local or distributed (i.e., one or multiple sinks),
-whether they take action reactively or pro-actively, and whether it
-requires the notion of a global clock.  Our work does not deal with
-the problem of inter-media synchronization of multiple modalities
-(i.e., video and audio) within a multimedia stream where the concern
-is to ensure the correct playback of related data originating from
-different streams.  Our problem is related to intra-stream
-synchronization, which is concerned with ensuring the temporal
-ordering of data packets transmitted across a network from a single
-streaming source to one or more delivery sinks
-
-Most intra-stream synchronization schemes are based on data buffering
-at the sink(s) and on the introduction of a delay before the play-out
-of buffered data packets (i.e., frames).  Those synchronization
-schemes can be rigid or adaptive \cite{Clark92}.  In rigid schemes,
-such as \cite{Ferrari}, the play-out delay is chosen a priori in such
-a way that it accounts for the maximum network transfer delay that can
-likely occur across the sinks.  Rigid schemes work under a worst-case
-scenario assumption and accept the introduction of delays that may be
-longer than necessary, in order to maximize the synchronization
-guarantees they can offer even in demanding situations.
-
-Contrary to a rigid approach, adaptive schemes \cite{ASP,Lancaster,FSP}
-recompute the delay parameter continuously while streaming: they
-try to "guess" the minimum delay that can be introduced, which can
-still ensure synchronization under actual operation conditions.  In
-order to enhance quality of service in terms of minimized play-out
-delay, those schemes must accept some temporary synchronization
-inconsistencies and/or some data loss, in case the computed delay
-results at times insufficient (due, for example, to variations in the
-conditions of the network) and needs to be corrected on the fly.
-
-Our approach to synchronization can be classified as a distributed
-adaptive scheme that employs a global clock and operates in a
-proactive way.  The main difference with respect to other approaches,
-such as the Adaptive Synchronization Protocol \cite{ASP}, the work of
-Gonzalez and Adbel-Wahab \cite{GONZALEZ}, or that of Liu and El
-Zarki\cite{LIU} (which can all be used equally for inter- and
-intra-stream applications) is that it is not based on the idea of
-play-out delay.  Instead, it takes advantage of layered semantic
-compression coupled with buffering to "buy more time" for clients that
-might not be able to remain in sync, by putting them on a less
-demanding level of the compression hierarchy.
-
-To ensure stream synchronization across a group of clients, it is
-usually necessary to implement some form of trade-off impacting the
-quality of service of some of the clients.  Many schemes trade off
-synchronization for longer delays, while some other approaches, like
-the Concord local synchronization algorithm \cite{Concord}, allows a
-choice among other quality parameters besides delay, like packet loss
-rate.  Our approach sacrifices frame rates to achieve synchronization
-when resources are low.
-
-Liu et al. provide a comprehensive summary of the mechanisms used in
-video multicast for quality and fairness adaptation as well as network
-and coding requirements \cite{LIU}.  To frame our work in that
-context, our current design models a single-rate server adaptation
-scheme to each of the clients because the video quality we provide is
-tailored specifically to that client's network resources.  The focus
-in our work is directed towards the client side end user perceived
-quality and synchrony, so we did not utilize the most efficient server
-model.  The authors believe that it would be trivial to substitute in
-a simulcast server adaptation model \cite{CHEUNG,LI}.  Our design also
-fits into the category of layered adaptation.  This adaptation model
-defines a base quality level that users must achieve.  Once users have
-acquired that level, the algorithm attempts to incrementally acquire
-more frames to present a higher quality video.  In the work presented
-here, the definition of quality translates to a higher frame rate.
-Liu's discussion of bandwidth fairness, coding techniques and network
-transport perspectives lie out of the scope of this paper.
-
-With respect to the software architecture, our approach most resembles
-the Lancaster Orchestration Service \cite{Lancaster} since it is based
-on a central controller that coordinates the behavior of remote
-controlled units placed within the clients via appropriate directives.
-(i.e., the $\mathrm{AI}^2$TV video buffer and manager).  Their
-approach employs the adaptive delay-based scheme described above,
-hence the playback of video focuses on adapting to the lowest
-bandwidth client.  That approach would degrade the playback experience
-of the other participants to accommodate the lowest bandwidth client.
-Our approach differs by allowing each client to receive video quality
-commensurate with its bandwidth resources.
-
-Cen et. al provide a distributed real-time MPEG video/audio player
-that uses a software feedback loop between a single server and a
-single client to adjust frame rates \cite{CEN}.  Their architecture
-provides the feedback logic within each video player and does not
-support synchronization across a group of players, while the work
-presented here provides the adaptation model within a central
-controller and explicitly supports the synchronization of semantically
-equivalent video frames across a group of clients.
-
-An earlier implementation of $\mathrm{AI}^2$TV is described in
-\cite{VECTORS}.  In that version, a collaborative virtual environment
-(CVE) supported a variety of team interactions \cite{CHIME}, with the
-optional video display embedded in the wall of a CVE ``room''.  The
-same semantic compression capability was used. Video synchronization
-data was piggybacked on top of the UDP peer-to-peer communication used
-primarily for CVE updates, such as tracking avatar movements in the
-style of multi-player 3D gaming.  The video synchronization did not
-work very well, due to the heavy-weight CVE burden on local
-resources. Video quality optimization was not addressed.  The new
-implementation of $\mathrm{AI}^2$TV presented here can run alongside
-the CVE in a separate window.
-
-\section{Conclusion}
-
-In this paper we present an architecture and adaptation model that
-allows geographically dispersed participants to collaboratively view a
-video in synchrony.  The system also employs a autonomic controller
-architecture that adapts the video quality according to client network
-bandwidth resources.  The other novel approach that we put present is
-the use of semantic compression to facilitate the synchronization of
-video content to clients with heterogeneous resources.  We rely on the
-semantic compression algorithm to guarantee the semantic composition
-of the video frames is equivalent for all clients.  We then distribute
-appropriate versions of the video to clients according to their
-current bandwidth resources.  Through the use of these tools, we hope
-to close the gap between students with varying network resources to
-allow collaboration to proceed in a fruitful manner.
-
-%ACKNOWLEDGMENTS are optional
-\section{Acknowledgments}
-We would like to thank John Kender, Tiecheng Liu, and other members of
-the High-Level Vision Lab for their assistance in using their
-lecture-video semantic compression software.  We would also like to
-thank the other members of the Programming Systems Lab for their
-support, particularly Matias Pelenur who ported the Little-JIL
-workflow notation to run on Workflakes/Cougaar.  Little-JIL was
-developed by Lee Osterweil's LASER lab at the University of
-Massachusetts, Amherst. Cougaar was developed by a DARPA-funded
-consortium; our main Cougaar contact was Nathan Combs of BBN.
-Information about the Columbia Video Network is available at
-\url{http://www.cvn.columbia.edu/}. PSL is funded in part by National
-Science Foundation grants CCR-0203876, EIA-0202063 and EIA-0071954,
-and by Microsoft Research.
-
-% The following two commands are all you need in the
-% initial runs of your .tex file to
-% produce the bibliography for the citations in your paper.
-\bibliographystyle{abbrv} \bibliography{ai2tv}
-% You must have a proper ".bib" file
-%  and remember to run:
-% latex bibtex latex latex
-% to resolve all references
-
-% ??? we'll need to do this right before submission
-% \subsection{References}
-%
-%% Generated by bibtex from your ~.bib file.  Run latex,
-%% then bibtex, then latex twice (to resolve references)
-%% to create the ~.bbl file.  Insert that ~.bbl file into
-%% the .tex source file and comment out
-%% the command \texttt{{\char'134}thebibliography}.
-
-% This next section command marks the start of
-% Appendix B, and does not continue the present hierarchy
-%% \section{More Help for the Hardy}
-%% The sig-alternate.cls file itself is chock-full of succinct
-%% and helpful comments.  If you consider yourself a moderately
-%% experienced to expert user of \LaTeX, you may find reading
-%% it useful but please remember not to change it.
-
-\balancecolumns % GM July 2000
-% That's all folks!
-\end{document}
-% ---------------------------------------------------------------
-% / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
-% / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
-% / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
+% $RCSfile$
+% $Revision$
+% $Date$
+% $Source$
+%
+%
+% ---------------------------------------------------------------------------
+% TODO:
+%
+% FINAL READ
+% - check for consistent tense
+% - query replace: ai2tv -> $\mathrm{AI}^2$TV
+% - spell check
+%
+%
+% ---------------------------------------------------------------------------
+% This is "sig-alternate.tex" V1.3 OCTOBER 2002
+% This file should be compiled with V1.6 of "sig-alternate.cls" OCTOBER 2002
+%
+% This example file demonstrates the use of the 'sig-alternate.cls'
+% V1.6 LaTeX2e document class file. It is for those submitting
+% articles to ACM Conference Proceedings WHO DO NOT WISH TO
+% STRICTLY ADHERE TO THE SIGS (PUBS-BOARD-ENDORSED) STYLE.
+% The 'sig-alternate.cls' file will produce a similar-looking,
+% albeit, 'tighter' paper resulting in, invariably, fewer pages.
+%
+% ---------------------------------------------------------------------------
+% This .tex file (and associated .cls V1.6) produces:
+%       1) The Permission Statement
+%       2) The Conference (location) Info information
+%       3) The Copyright Line with ACM data
+%       4) NO page numbers
+%
+% as against the acm_proc_article-sp.cls file which
+% DOES NOT produce 1) thru' 3) above.
+%
+% Using 'sig-alternate.cls' you have control, however, from within
+% the source .tex file, over both the CopyrightYear
+% (defaulted to 2002) and the ACM Copyright Data
+% (defaulted to X-XXXXX-XX-X/XX/XX).
+% e.g.
+% \CopyrightYear{2003} will cause 2002 to appear in the copyright line.
+% \crdata{0-12345-67-8/90/12} will cause 0-12345-67-8/90/12 to appear in the
+%  copyright line.
+%
+% ---------------------------------------------------------------------------
+% This .tex source is an example which *does* use
+% the .bib file (from which the .bbl file % is produced).
+% REMEMBER HOWEVER: After having produced the .bbl file,
+% and prior to final submission, you *NEED* to 'insert'
+% your .bbl file into your source .tex file so as to provide
+% ONE 'self-contained' source file.
+%
+% ================= IF YOU HAVE QUESTIONS =======================
+% Questions regarding the SIGS styles, SIGS policies and
+% procedures, Conferences etc. should be sent to
+% Adrienne Griscti (griscti@acm.org)
+%
+% Technical questions _only_ to
+% Gerald Murray (murray@acm.org)
+% ===============================================================
+%
+% For tracking purposes - this is V1.3 - OCTOBER 2002
+\documentclass{sig-alternate}
+\usepackage{url}
+
+\begin{document}
+
+%
+% --- Author Metadata here ---
+\conferenceinfo{ACM-MM 2004}{New York, NY USA}
+%\CopyrightYear{2001}
+% Allows default copyright year (2000) to be over-ridden - IF NEED BE.
+
+%\crdata{0-12345-67-8/90/01}
+% Allows default copyright data (0-89791-88-6/97/05) to be over-ridden - IF NEED BE.
+% --- End of Author Metadata ---
+
+% \title{Optimizing Quality for Video Sharing in Synchronous Collaboration}
+\title{Optimizing Quality for Collaborative Video Viewing}
+%
+% You need the command \numberofauthors to handle the "boxing"
+% and alignment of the authors under the title, and to add
+% a section for authors number 4 through n.
+%
+% Up to the first three authors are aligned under the title;
+% use the \alignauthor commands below to handle those names
+% and affiliations. Add names, affiliations, addresses for
+% additional authors as the argument to \additionalauthors;
+% these will be set for you without further effort on your
+% part as the last section in the body of your article BEFORE
+% References or any Appendices.
+
+\numberofauthors{4}
+%
+% You can go ahead and credit authors number 4+ here;
+% their names will appear in a section called
+% "Additional Authors" just before the Appendices
+% (if there are any) or Bibliography (if there
+% aren't)
+
+% Put no more than the first THREE authors in the \author command
+\author{
+%
+% The command \alignauthor (no curly braces needed) should
+% precede each author name, affiliation/snail-mail address and
+% e-mail address. Additionally, tag each line of
+% affiliation/address with \affaddr, and tag the
+%% e-mail address with \email.
+\alignauthor Dan Phung\\
+       \affaddr{Computer Science Department}\\
+       \affaddr{Columbia University}\\
+       \affaddr{New York City, New York}\\
+       \email{phung@cs.columbia.edu}
+\alignauthor Giuseppe Valetto\\
+       \affaddr{Computer Science Department}\\
+       \affaddr{Columbia University}\\
+       \affaddr{New York City, New York}\\
+       \affaddr{and Telecom Italia Lab}\\
+       \affaddr{Turin, Italy}
+       \email{valetto@cs.columbia.edu}
+\alignauthor Gail Kaiser \\
+       \affaddr{Computer Science Department}\\
+       \affaddr{Columbia University}\\
+       \affaddr{New York City, New York}\\
+       \email{kaiser@cs.columbia.edu}
+}
+\additionalauthors{Additional authors: Suhit Gupta {\texttt{suhit@columbia.cs.edu}}}
+\date{\parbox[b][0ex]{0em}{\hspace*{-12.5em}\raisebox{37ex}{\fbox{For
+submission to \emph{ACM-MM 2004}, due 12:00 AM EDT: April 05, 2004.}}}}
+% \date{05 April 2004}
+\maketitle
+
+\begin{abstract}
+The increasing popularity of distance learning and online courses has
+highlighted the lack of collaborative tools for student groups.  In
+addition, the introduction of lecture videos into the online
+curriculum has drawn attention to the disparity in the network
+resources used by the students.  We present an architecture and
+adaptation model called $\mathrm{AI}^2$TV (Adaptive Internet
+Interactive Team Video), a system that allows geographically dispersed
+participants, possibly some or all disadvantaged in network resources,
+to collaboratively view a video in synchrony.  $\mathrm{AI}^2$TV
+upholds the invariant that each participant will view semantically
+equivalent content at all times. Video player actions, like play,
+pause and stop, can be initiated by any of the participants and the
+results of those actions are seen by all the members.  These features
+allow group members to review a lecture video in tandem to facilitate
+the learning process.  We employ an autonomic (feedback loop)
+controller that monitors clients' video status and adjusts the quality
+of the video according to the resources of each client.  We show in
+experimental trials that our system can successfully synchronize video
+for distributed clients while at the same time optimizing the video
+quality, given actual (fluctuating) bandwidth, by adaptively adjusting
+the quality level for each participant.
+\end{abstract}
+
+% A category with the (minimum) three required fields
+\category{C.2.4}{Distributed Systems}{Client/server, Distributed applications}
+\category{D.2.8}{Software Engineering}{Metrics -- performance measures}
+\category{H.5.1}{Information Interfaces and Presentation}{Multimedia Information Systems}
+\category{H.5.3}{\\Group and Organization Interfaces}{Computer-\\supported cooperative work, Synchronous interaction}
+\category{K.3.1}{Computer Uses In Education}{Collaborative learning, Distance learning}
+
+\terms{ALGORITHMS, MEASUREMENT, PERFORMANCE, EXPERIMENTATION, HUMAN
+FACTORS}
+
+\keywords{Synchronized Collaborative Video, Autonomic Controller}
+
+% -------------------------------------------------- %
+% FIGURES AT THE FRONT
+% -------------------------------------------------- %
+%% \begin{figure}
+%%   \centering
+%%   \epsfig{file=vidframes.eps, width=8cm}
+%%   \caption{Semantic video compression hierarchy.}
+%%   \label{vidframes}
+%% \end{figure}
+
+%% \begin{figure}
+%%   \centering
+%%   \epsfig{file=vidframes.eps, width=8cm}
+%%   \caption{Semantic video compression hierarchy.}
+%%   \label{vidframes}
+%% \end{figure}
+
+%% \begin{figure}
+%%   \centering
+%%   \epsfig{file=ai2tvarch.eps, width=8cm}
+%%   \caption{ai2tv Architecture}
+%%   \label{ai2tv_arch}
+%% \end{figure}
+
+
+%% \begin{figure}
+%%  \centering
+%%  \epsfig{file=refarch.eps, width=8cm}
+%%   \label{refarch}
+%%  \caption{Conceptual Reference Architecture}
+%% \end{figure}
+
+
+%% \begin{figure}
+%%   \centering
+%%   \hspace*{-5mm}
+%%   \epsfig{file=ljil.eps, width=8cm}
+%%   \caption{Workflow diagram }
+%%   \label{ljil}
+%% \end{figure}
+
+
+% -------------------------------------------------- %
+
+
+% tech report number CUCS-009-04
+\section{Introduction}
+
+Distance learning programs such as the Columbia Video Network have
+evolved from fedexing lecture video tapes to their off-campus students
+to instead streaming the videos over the Internet, which is also done
+by the Stanford Center for Professional Development.  The lectures are
+sometimes delivered ``live'', while in progress on campus, but
+frequently are post-processed and packaged for students to watch (and
+re-watch) at their convenience.  This introduces the possibility of
+forming ``study groups'' among off-campus students who view the
+lecture videos together, and pause the video for discussion when
+desired, thus approximating the pedagogically valuable discussions of
+on-campus students.  Although the instructor is probably not available
+for these discussions, this may be an advantage, since on-campus
+students are rarely afforded the opportunity to pause, rewind and
+fast-forward their instructors' lectures.
+
+However, what we call {\em collaborative video viewing} by multiple
+geographically dispersed users is not yet supported by conventional
+Internet-video technology.  It is particularly challenging to support
+WISIWYS (what I see is what you see) when some of the users are
+relatively disadvantaged with respect to bandwidth (e.g., dial-up
+modems) and local computer resources (e.g., archaic graphics cards,
+small disks).  We have adopted technology (developed by others, Liu
+and Kender \cite{TIECHENG}) for ``semantically compressing'' MPEG2
+videos into sequences of still JPG images.  This technology
+automatically selects the most semantically meaningful frames to show
+for each time epoch, and can generate different sequences of JPG
+images for a range of different compression (bandwidth) levels.  This
+approach works very well for typical lecture videos, where it is
+important, for instance, to see what the instructor has written on the
+blackboard after he/she stands aside, but probably not so important to
+see the instructor actually doing the writing, when his/her hand and
+body may partially occlude the blackboard.
+
+The remaining technical challenge is {\em synchronizing} the
+downloading and display of the image sequences among each of the
+distributed user clients, including support for shared video player
+actions such as pause, rewind, etc.  Further, if student groups do
+indeed sometimes pause the videos, or rewind to a point already
+available in local buffers (caches), it is desirable to take advantage
+of the then-idle network connection to prefetch future images at a
+higher quality level.
+
+We have developed an approach to achieving this, using three
+mechanisms working in tandem.  First, the software clocks of the video
+clients are synchronized using NTP.  This time is used for reference
+within the image sequences, where each image is associated with its
+start and end times relative to the beginning of the sequence.
+Second, the video clients communicate with each other over a
+distributed publish-subscribe event bus, which propagates video
+actions taken by one user in the group to all the other users in the
+group.  Thus any user can select a video action, not just a
+``leader''.
+
+Finally, the main innovation of this research concerns optimizing
+video quality in this context: A decentralized feedback control loop
+dynamically adjusts each video client's choice of both next image to
+display and also next image to retrieve from the semantic compression
+levels available.  The controller relies on sensors embedded in each
+client to periodically check what image is currently displaying,
+whether this image is ``correct'' for the current NTP time compared to
+what other clients are viewing, which images have already been
+buffered (cached) at that client, and what is the actual bandwidth
+recently perceived at that client.  Actuators are also inserted into
+the video clients, to modify local configuration parameters on
+controller command. The controller utilizes detailed information about
+the image sequences available at the video server, including image
+start and stop times (both the individual images and their start and
+stop times tend to be different at different compression levels), but
+unlike local client data, video server data is unlikely to change
+while the video is showing.  A single controller is used for all
+clients in the same user group, so it can detect ``skew'' across
+multiple clients, and may reside on the video server or on another
+host on the Internet.
+
+In the next section, we further motivate the collaborative video
+viewing problem, provide background on the semantically compressed
+video repository, and explain the technical difficulties of optimizing
+quality while synchronizing such semantically compressed videos. The
+following section presents our architecture and dynamic adaptation
+model, and its implementation in $\mathrm{AI}^2$TV (Adaptive
+Interactive Internet Team Video).  In the Evaluation section, we
+describe the criteria used to evaluate the effectiveness of our
+approach, and show empirical results obtained when applied to real
+lecture videos distributed for a recent Columbia Video Network
+course. We compare to related work, and then summarize our
+contributions.
+
+\section{Motivation and Background} \label{background}
+% - discuss other projects on multiple client synchronization
+The increasing popularity of online universities has prompted many
+universities to begin providing online certification tracks that use
+pre-taped on-campus lectures.  Traditionally, online courses are
+provided through a Web portal interface, such as those at the
+University of Phoenix or Capella University, which are designed for
+individual students with a self-motivated learning style.
+Contrastly, in many on-campus courses, students are encouraged to
+study in groups in order to better learn the material.  For off-campus
+students viewing the pre-taped lecture, this practice is difficult
+because of their separation by geographic distance.  Support of
+synchronous collaboration remains a major concern in courses where
+group work is encouraged \cite{WELLS}, yet there are few educational
+tools that allow synchronous collaboration across a group of online
+students \cite{BURGESS}.
+
+There are some available tools that will allow online collaboration,
+though they may not be geared specifically for educational purposes.
+Some of these tools use asynchronous forms of communication, such as
+bulletin boards and discussion groups.  Others tools, such as instant
+messaging, application and desktop sharing \cite{WEBEX, VNC}, and
+co-browsing \cite{CAPPS, LIEBERMAN, SIDLER} facilitate the
+communicative aspects of synchronous collaboration but are not
+designed specifically for educational purposes thus can be cumbersome
+when used as such.
+
+% - ai2tv project goals
+$\mathrm{AI}^2$TV contributes to the area of synchronous collaboration
+support, and more specifically, to collaborative video sharing.  The
+work presented in this paper is a part of a larger effort to provide a
+collaborative virtual environment for distributed team study.  That
+project's goal is to provide support for the collaborative use of
+multimedia content relevant to the work carried out by a team of users
+involved in the same project, such as audio/video recordings of
+lectures, meetings, workshops and other informational and educational
+events.  In this paper, we focus on the aspect of video sharing for
+synchronous collaboration.
+
+Viewing video on the Internet usually requires relatively high
+bandwidth resources and lossy network connections can lead to lost
+video content.  For group review of lecture videos, the lost content
+may severely disrupt the progression of the group.  Furthermore, the
+differences in network and computing resources available to dispersed
+users in the same collaboration group can prevent some students from
+participating.  Collaborative video sharing poses a twofold problem:
+on the one hand, it is mandatory to keep all users synchronized with
+respect to the content they are supposed to see at any moment during
+play time; on the other hand, it is important to provide each
+individual user with a frame rate that is optimized with respect to
+the user's available resources, which may vary during the course of
+the video.
+
+One solution to the problem of balancing the group synchronization
+requirement with the optimization of individual viewing experiences is
+to use videos with cumulative layering \cite{MCCANNE}, also known as
+scalable coding \cite{LI}.  In this approach, the client video player
+selects a quality level appropriate for that client's resources from a
+hierarchy of several different encodings or frame rates for that
+video. Thus a client could receive an appropriate quality of video
+content while staying in sync with the other members of the group.
+
+% - describe overview of semantic compression tool used
+In $\mathrm{AI}^2$TV, we use semantic summarization to produce a video
+with cumulative layering.  The semantic summarization package
+developed at Columbia University by Liu and Kender \cite{TIECHENG}
+reduces a video to a set of semantically significant key frames.  That
+tool operates on MPEG format videos and outputs sequences of JPG
+frames, some of which are displayed in figure \ref{sem_video}.  Its
+semantic compression algorithm profiles video frames within a sliding
+time window and selects key frames that have the most semantic
+information.  By increasing the size of the window, a key frame will
+represent a larger time slice, which means that a larger window size
+will produce less key frames as compared to a smaller window size
+setting.
+
+\begin{figure}
+  \centering
+  \epsfig{file=vidframes.eps, width=8cm}
+  \caption{Semantic Video Scenario}
+  \label{sem_video}
+\end{figure}
+
+A conceptual diagram of a layered video produced from semantic
+compression is shown in figure \ref{sem_video}.  Note that the
+semantic compression algorithm produces a random distribution of key
+frames, hence the video produced by the package plays back at a
+variable frame rate.  The variability in the frame rate implies that
+there are pockets of relatively high frequency semantic change that
+result in sections in the video that demand a higher frame rate.  The
+variable frame rate video adds complexity to the bandwidth demands of
+the client yet its semantic focus ensures that relevant content is
+unlikely to get lost, which is a significant property in the context
+of an educational application.
+
+Also, in figure \ref{sem_video}, the bottom-left in-set shows the
+juxtaposition of individual frames from two different quality levels.
+Each frame has a representative time interval \texttt{[start:end]}.
+For the higher level, Frame 1a represents the interval from 1:00 to
+1:03, and Frame 1b represents the interval from 1:04 to 1:10.  For the
+lower level, Frame 2 represents the entire interval from 1:00 to 1:10.
+In this diagram, Frame 2 is semantically equivalent to Frame 1a and
+1b, though in reality, the start and end times would not match up as
+ideally as our example.
+
+Through the use of the $\mathrm{AI}^2$TV video, we can provide
+semantically equivalent content to several clients with diverse
+resources by adjusting the compression level assigned to each client
+while the user is watching the video.  Thus for our purposes,
+synchronization of video boils down to showing semantically equivalent
+frames for a given time.
+
+To adjust the clients in response to the changing environment, we use
+an autonomic controller to maintain the synchronization of the group
+of video clients while fine tuning the video quality for each client.
+The term \textit{autonomic} is borrowed from IBM to mean a
+self-managing system that uses a (software) feedback control
+loop.  Their terminology applies to the self-management of
+data centers whereas our application applies to the novel domain of
+multi-user video synchronization.  In \cite{RAVAGES}, we proposed the
+idea of using an autonomic controller to support group video
+synchronization and other multimedia applications.
+
+The autonomic controller remains conceptually separate from the
+controlled $\mathrm{AI}^2$TV video system and employs a software based
+workflow engine, named Workflakes \cite{ICSE}.  Note that the workflow
+used here coordinates the behavior of software entities, as opposed to
+human-oriented workflow systems.  The use of software based workflow
+for the specification and enactment of the plan that coordinates
+actuators is taken from Wise at al. \cite{OSTERWEIL} among others.
+The Workflakes engine has been developed for and used in a variety of
+domains \cite{AMS,ICSE}, in which it orchestrates the work of software
+entities to achieve the fully automated dynamic adaptation of
+distributed applications.  The design of the autonomic controller is a
+part of an externalized autonomic computing platform proposed by
+Kaiser \cite{REFARCH}.  In the context of $\mathrm{AI}^2$TV,
+Workflakes coordinates the adjustment of the compression level
+assigned to each client along the hierarchy of the $\mathrm{AI}^2$TV
+video.
+
+% (FIGURE: semantic compression )
+% (FIGURE: key frames hierarchy )
+
+\section{Architecture and Adaptation\\ Model}
+\subsection{System Architecture}
+% Design of a the system in general
+Our system involves several major components: a video server, video
+clients, an externalized autonomic controller and a common
+communications infrastructure, as seen in figure \ref{ai2tv_arch}
+
+\begin{figure}
+  \centering
+  \epsfig{file=ai2tvarch.eps, width=8cm}
+  \caption{$\mathrm{AI}^2$TV Architecture}
+  \label{ai2tv_arch}
+\end{figure}
+
+%(FIGURE: ai2tv synchronization arch)
+% video server
+The video server provides the educational video content to the clients
+for viewing.  The provided content has the form of an
+$\mathrm{AI}^2$TV video, i.e., a hierarchy of video versions produced
+by running the tool multiple times with settings for different
+compression levels, which produces several sets of JPG frames that are
+indexed by a frame index file.  The task of the video server is simply
+to provide remote download access to the frames and the index file
+over HTTP.
+
+% video client
+The task of video clients is to acquire video frames, display them at
+the correct time, and provide a set of basic video functions.  Taking
+a functional design perspective, the client is composed of three major
+modules: a video display, a video buffer and manager for fetching and
+storing downloaded frames, and a time controller.
+
+The video display renders the JPG frames into a window for display and
+provides a user interface for play, pause, goto, and stop.  When any
+participant initiates one of these actions, all the other group
+members receive the same command, thus all the video player actions
+are synchronized.  The video display knows which frame to display by
+using the current video time and display quality level to index into
+the frame index for the representative frame.  Before trying to render
+the frame, it asks the video buffer manager if the needed frame is
+available.  The video display also includes a control entity that
+enables external entities, like the autonomic controller, to adjust
+the current display quality level.
+
+The video buffer and manager constitute the downloading daemon that
+continuously downloads frames at a certain level.  It keeps a hash of
+the available frames and a count of the current reserve frames (frames
+buffered) for each quality level.  The buffer manager also includes a
+control hook that enables external entities to adjust the current
+downloading quality level.
+
+The time controller's task is to ensure that a common video clock is
+maintained across clients.  It relies on NTP \cite{NTP} to synchronize
+the system's software clock therefore ensuring a common time base from
+which each client can reference for the video clock.  The task of each
+then is to play the frames at the correct time and since all the
+clients refer to the same time base, then all the clients are showing
+semantically equivalent frames.
+
+% autonomic controller
+The task of the autonomic controller is to ensure that the clients
+within a video session stay synchronized and that each client plays at
+its highest attainable quality level.  The controller is a distributed
+system, whose design derives from a conceptual reference architecture
+for externalized autonomic computing platforms proposed by Kaiser
+\cite{REFARCH}, which is shown in figure \ref{refarch}. The
+architecture provides an end-to-end closed control loop, in which
+sensors attached to a generic (possibly legacy) target system and
+continuously collect and send streams of data to gauges.  The gauges
+analyze the incoming data streams and respond to conditions that need
+adaptation by relaying that information to controllers.  The
+controllers coordinate the expression and orchestration of the
+workflow needed to carry out the adaptation.  To close the loop,
+actuators at the target system effect the needed adjustments under the
+supervision of the controller.
+
+%
+%(figure of ref arch here).
+%
+
+\begin{figure}
+ \centering
+ \epsfig{file=refarch.eps, width=8cm}
+  \label{refarch}
+ \caption{Conceptual Reference Architecture}
+\end{figure}
+
+
+The sensors provide the autonomic controller with data about the
+clients such as video display quality level, the buffer quality level,
+the buffer reserve frames, the currently displayed frame and the
+current bandwidth.  Gauges are embedded together with the coordination
+engine for expediency of design and to minimize the communication
+latency to it.  They receive the sensor reports from individual
+clients, collect them in buckets, similar to the approach in
+\cite{MIMAZE}, and pass the bucket data structure to the coordination
+engine.  The coordination engine directs the flow of the information
+through a predefined workflow plan described in the next section.
+
+During the evaluation of the data, a set of helper functions that are
+tailored specifically for the application are used to produce the
+triggers for the coordinator.  If a trigger is raised, the
+coordination engine enacts an adaptation scheme which is executed on
+the end hosts by hooks provided to the actuators by the end systems.
+
+% communications
+The infrastructure used for the communications among the video
+clients, as well as between the $\mathrm{AI}^2$TV system and the
+autonomic controller is provided by an event bus based on the
+publish/subscribe paradigm.  The reason for choosing this
+communication model is that it inherently decouples the physical
+location of the communicating entities.  Events transmitted onto that
+event bus are of three kinds: video player actions, sensor reports and
+adaptation directives (see figure \ref{ai2tv_arch}.  Video player
+actions pertain to the functionality of the $\mathrm{AI}^2$TV system,
+since they represent commands issued on a video client, such as pause,
+play or stop, which need to be propagated to all clients in the group
+to enforce the same behavior.  All video player actions are time
+stamped so that clients can respond to those commands in reference to
+the common time base.
+
+\subsection{Adaptation Model}
+
+The adaptation scheme falls into two levels: a higher level data flow,
+and a lower level adjustment heuristic.  The former directs the flow
+of data through a logical sequence to provide a formal decision
+process while the latter provides the criteria as to when to make
+certain adjustments.
+
+The higher level logic is shown in figure \ref{ljil}, according to the
+Little-JIL graphic workflow specification language \cite{LJIL}.  The
+diagram shows the task decomposition hierarchy according to which the
+adaptation workflow unfolds.  Note that the evaluation of clients'
+state with respect to the group (\texttt{EvaluateClient}) and the
+issuing of adaptation directives (\texttt{AdaptClient}) is carried out
+as a set of the parallel steps.  Also note that the multiplicity of
+those parallel steps is dynamically determined via the number of
+entries in the \texttt{client} variable, which maps to a collection of
+$\mathrm{AI}^2$TV clients.
+
+%
+%add Figure with AI2TV workflow diagram here
+%
+
+\begin{figure}
+  \centering
+  \hspace*{-5mm}
+  \epsfig{file=ljil.eps, width=8cm}
+  \caption{$\mathrm{AI}^2$TV Workflow diagram }
+  \label{ljil}
+\end{figure}
+
+The adaptation scheme at the lower level falls into two categories:
+directives that adjust the client in response to relatively low
+bandwidth situations, and those that take advantage of relatively high
+bandwidth situations.
+
+In the situation where a client has relatively low bandwidth, the
+client may not be able download next frame at the same quality level
+in time.  This situation will merit that both the client and the
+buffer quality levels are reduced one level. In the case in which the
+client is already at the lowest level, the controller will calculate
+the next possible frame that it can successfully complete in time - in
+order to remain synchronized with the rest of the team - and will ask
+the client to jump ahead to that frame.
+
+To take advantage of relatively high bandwidth situations, the buffer
+manager will start to accumulate a reserve buffer.  Once the buffer
+reaches a threshold value (for example, 10 buffered frames), the
+autonomic controller will direct the buffer manager to start fetching
+frames a higher quality level.  Once a sufficient reserve is
+accumulated also at that higher level, the client is then ordered to
+display frames at that quality level.  If the bandwidth drops before
+the buffer manager controller can accumulate enough frames in the
+higher-level reserve, then the buffer manager is dropped back down one
+level.
+
+%\section{Implementation} \label{implementation}
+
+The video client is implemented in Java. It naively uses the
+javax.swing package to render the JPG images.  The autonomic
+controller, Workflakes, is also Java-based, and is built on top of the
+open-source Cougaar multi-agent system \cite{COUGAAR}, which
+Workflakes adapts to operate as a decentralized workflow engine.  We
+used the Little-JIL graphic workflow specification language to produce
+the workflow used \cite{LJIL}.  We chose a content-based
+publish-subscribe event system, Siena, as our communication bus
+\cite{SIENA}.
+
+% \comment{how many lines of code?}
+
+\section{Evaluation} \label{eval}
+
+We assess our system by evaluating its ability to synchronize the
+clients and its ability to adjust the clients video quality.  The
+evaluation results presented in this section were computed from a set
+of client configurations, specifically 1, 2, 3, and 5 clients running
+a video for 5 minutes and probing system state every 5 seconds. The
+compression hierarchy we employed has 5 different levels.
+
+For our evaluation, we define a baseline client against which the
+performance of our approach can be compared.  A baseline client is a
+client whose quality level is set at the beginning of the video and
+not changed thereafter.  To define the baseline client, we use a value
+that we identify as the average bandwidth per level. This value is
+computed by summing the total size in bytes of all frames produced at
+a certain compression level and dividing by the total video time.
+This value provides the bandwidth needed on average for the buffer
+controller to download the next frame on time.  We provide the
+baseline client with the needed bandwidth for its chosen level by
+using a bandwidth throttling tool (\cite{SHAPERD}) to adjust the
+bandwidth to that client from the video server.  Note that using the
+average as the baseline does not account for changes in the video
+frame rate and fluctuations in network bandwidth, which are situations
+in which adaptive control can make a difference.
+
+When carrying out the evaluation, each controller-assisted client is
+assigned an initial level in the compression hierarchy and the same
+bandwidth as the baseline client for that hierarchy level.  At the end
+of each experiment, we record any differences resulting from the
+adaptation of the clients' behavior on the part of the autonomic
+controller and the behavior of the baseline client, with respect to
+synchrony and quality of service (frame rate).
+
+%% To evaluate our system, we produced an $\mathrm{AI}^2$TV video that had 5 quality
+%% levels.  For a 17 minute video and five different window lengths, the
+%% total number of frames are 165, 71, 39, 21, and 13.  Our choice of the
+%% relatively low frame rate quality levels was influenced by the goal of
+%% the system being used by clients with low bandwidth resources.
+
+% the pathetic average frame rates (per minute!!!):
+%% 3.399831413 - high
+%% 1.46295776
+%% 0.806289939
+%% 0.434237734
+%% 0.268763313 - low
+
+\textit{Evaluating Synchrony}
+
+A major goal of the system is to provide synchronous viewing to all
+clients.  To measure the effectiveness of the synchrony, we probe the
+clients at periodic time intervals and log the frame currently being
+displayed.  This procedure effectively takes a snapshot of the system,
+which we can evaluate for correctness.  This evaluation proceeds by
+checking whether the frame being displayed at a certain time
+corresponds to one of the valid frames at that time, on any arbitrary
+level.  We allow any arbitrary level because the semantic compression
+algorithm ensures that all frames at a certain time will contain the
+same semantic information if the semantic windows overlap.  We score
+the system by summing the number of clients not showing an acceptable
+frame and normalizing over the total number of clients.  A score of 0
+indicates a synchronized system.
+
+Our experiments for the evaluation of synchronization initially
+involved groups of clients that were set to begin playing the test
+video at different levels in the compression hierarchy, and were
+assigned the corresponding baseline bandwidth. In those experiments,
+the results show a total score of 0 for all trials. Also,
+notwithstanding the variations in the frame rate and/or occasional
+fluctuations in the actual bandwidth of the clients, no frames were
+missed.  This result demonstrates that the chosen baseline
+combinations of compression levels and throttled bandwidths do not
+push the clients beyond their bandwidth resource capacity.
+
+We also ran another set of experiments, in which the clients in the
+group were assigned more casually selected levels of starting
+bandwidths.  This casual selection is representative of a real world
+situation, like listening to Internet radio, where users must choose a
+desired frame rate to receive.  The user may have been informed that
+she is allocated a level of bandwidth from her Internet service
+provider, but she may actually be receiving a significantly lower
+rate.  We ran experiments first without the aid of the autonomic
+controller and then with it. In the former case, clients with
+insufficient bandwidth were stuck at the compression level originally
+selected, and thus missed an average of 63\% of the needed frames.  In
+the latter case, the same clients only missed 35\% of the needed
+frames.  These results provide evidence of the benefits of the
+adaptive scheme implemented by the autonomic controller.
+
+%% selected, and thus only displayed an average of 37\% of the needed
+%% frames.  In the latter case, the same clients received 65\% of the
+%% needed frames.  These results provide evidence of the benefits of the
+%% adaptive scheme implemented by the autonomic controller.
+
+
+%% \begin{figure}
+%%   \centering
+%%   \hspace*{-5mm}
+%%   \epsfig{file=scores.eps, width=9cm}
+%%   \caption{Comparison of weighted scores}
+%%   \label{scores}
+%% \end{figure}
+
+\textit{Evaluating Quality of Service}
+
+A primary goal of the $\mathrm{AI}^2$TV system is to increase the
+video quality for the clients.  With respect to the evaluation of
+video quality of services, Liu et. al describes several local metrics
+such as frame rate, loss rate, delay jitter, image resolution, and
+human spatial-temporal contrast-sensitivity \cite{LIU2003}.  We do not
+address global metrics such as fairness, as described in
+\cite{LIU2003}.  For our situation, we focus on frame rate as a
+measure of video quality.
+
+To attain a quantitative measure of the quality of service provided by
+a client assisted by the autonomic controller, we use a scoring system
+relative to the baseline client's quality level.  We give a weighted
+score for each level above or below the baseline quality level.  The
+weighted score is calculated as the ratio of the frame rate of the two
+levels.  So, for example, if a client is able to play at one level
+higher then the baseline, and the baseline plays at an average
+\texttt{n} fps while the level higher plays at \texttt{2*n} fps, the
+given score for playing at the higher level is 2.  The weighted score
+is calculated between the computed average frame rates of the chosen
+quality levels.  Theoretically, the baseline client should receive a
+score of 1.  Note that we formulated this scoring system because other
+scoring systems \cite{BAQAI,CORTE,CONWAY2000} measure unrelated
+factors such as the synchronization between different streams (audio
+and video), image resolution, or human perceived quality, and are not
+restricted by the group synchronization requirement.  This restriction
+mandates that a scoring system be sensitive to the relative
+differences between quality hierarchies.
+
+% qos results
+The evaluation of the quality of service experiments shows that the
+baseline clients scored a group score of 1 (as expected) while the
+clients assisted by the autonomic controller scored a group score of
+1.25.  The one-tailed t-score of this difference is 3.01 which is
+significant for an $\alpha$ value of .005 (N=17).  This result
+demonstrates that using the autonomic controller, we are able to
+achieve a significant positive difference in the quality of services.
+Note that the t-score does not measure the degree of the positive
+difference achieved by the autonomic controller.  To demonstrate the
+degree of benefit of using the autonomic controller, we measure the
+proportion of additional frames that each client maintained by the
+controller is able to enjoy.  We found that overall, those clients
+received 20.4\% ($\pm$ 9.7, N=17) more frames then the clients
+operating at a baseline rate.
+
+% risk assessment
+The act of running the client close to or at a level higher than the
+average bandwidth needed puts the client at risk for missing more
+frames because the autonomic controller is trying to push the client
+to a better but more resource-demanding level.  To measure whether the
+controller-assisted client is exposed to a higher risk of missing
+frames we also count the number of missed frames during a video
+session.  The scoring of the missed frame is a simple count of the
+missed frames.  Note that the scoring of the missed frame is kept
+separate from the measure of the relative quality to discriminate
+between levels of concern, though they both indicate a characteristic
+of quality of service.
+
+In this assessment of the risk of optimizing the frame rate, we found
+that there was only one instance in which a controller-assisted client
+missed two consecutive frames.  Upon closer inspection, the time
+region during this event showed that the video demanded a higher frame
+rate while the network bandwidth assigned to that client was
+relatively low.  The client was able to consistently maintain a high
+video quality level after this time.
+
+% calculation used for the 20% number I got up there.
+% baselineFrames = number of frames base client gets
+% wfFrames = number of frames the wf client gets
+% (wfFrames - baselineFrames) / baselineFrames = proportion of frames higher
+%                                                then the baseline client
+
+Though in some cases, using this system without the autonomic
+controller may be sufficient, in most cases the network bandwidth may
+vary and the variable frame rate of the video do not permit the client
+to make an informed decision about the most appropriate quality level
+for the next frames.  In addition, an application that does not adjust
+its quality level to current bandwidth resources will not be able to
+offer a level of quality appropriate to the client's resources.  To
+address these issues, the autonomic controller provides an additional
+adaptive element to the clients.  We show in these experiments that
+the autonomic controller makes a significant positive difference in
+aiding the client in achieving a higher quality level.
+
+\section{Related Work} \label{related}
+Stream synchronization is a widely studied topic in multimedia
+research.  Some classifications of synchronization schemes are whether
+the scheme is local or distributed (i.e., one or multiple sinks),
+whether they take action reactively or pro-actively, and whether it
+requires the notion of a global clock.  Our work does not deal with
+the problem of inter-media synchronization of multiple modalities
+(i.e., video and audio) within a multimedia stream where the concern
+is to ensure the correct playback of related data originating from
+different streams.  Our problem is related to intra-stream
+synchronization, which is concerned with ensuring the temporal
+ordering of data packets transmitted across a network from a single
+streaming source to one or more delivery sinks
+
+Most intra-stream synchronization schemes are based on data buffering
+at the sink(s) and on the introduction of a delay before the play-out
+of buffered data packets (i.e., frames).  Those synchronization
+schemes can be rigid or adaptive \cite{Clark92}.  In rigid schemes,
+such as \cite{Ferrari}, the play-out delay is chosen a priori in such
+a way that it accounts for the maximum network transfer delay that can
+likely occur across the sinks.  Rigid schemes work under a worst-case
+scenario assumption and accept the introduction of delays that may be
+longer than necessary, in order to maximize the synchronization
+guarantees they can offer even in demanding situations.
+
+Contrary to a rigid approach, adaptive schemes \cite{ASP,Lancaster,FSP}
+recompute the delay parameter continuously while streaming: they
+try to "guess" the minimum delay that can be introduced, which can
+still ensure synchronization under actual operation conditions.  In
+order to enhance quality of service in terms of minimized play-out
+delay, those schemes must accept some temporary synchronization
+inconsistencies and/or some data loss, in case the computed delay
+results at times insufficient (due, for example, to variations in the
+conditions of the network) and needs to be corrected on the fly.
+
+Our approach to synchronization can be classified as a distributed
+adaptive scheme that employs a global clock and operates in a
+proactive way.  The main difference with respect to other approaches,
+such as the Adaptive Synchronization Protocol \cite{ASP}, the work of
+Gonzalez and Adbel-Wahab \cite{GONZALEZ}, or that of Liu and El
+Zarki\cite{LIU} (which can all be used equally for inter- and
+intra-stream applications) is that it is not based on the idea of
+play-out delay.  Instead, it takes advantage of layered semantic
+compression coupled with buffering to "buy more time" for clients that
+might not be able to remain in sync, by putting them on a less
+demanding level of the compression hierarchy.
+
+To ensure stream synchronization across a group of clients, it is
+usually necessary to implement some form of trade-off impacting the
+quality of service of some of the clients.  Many schemes trade off
+synchronization for longer delays, while some other approaches, like
+the Concord local synchronization algorithm \cite{Concord}, allows a
+choice among other quality parameters besides delay, like packet loss
+rate.  Our approach sacrifices frame rates to achieve synchronization
+when resources are low.
+
+Liu et al. provide a comprehensive summary of the mechanisms used in
+video multicast for quality and fairness adaptation as well as network
+and coding requirements \cite{LIU}.  To frame our work in that
+context, our current design models a single-rate server adaptation
+scheme to each of the clients because the video quality we provide is
+tailored specifically to that client's network resources.  The focus
+in our work is directed towards the client side end user perceived
+quality and synchrony, so we did not utilize the most efficient server
+model.  The authors believe that it would be trivial to substitute in
+a simulcast server adaptation model \cite{CHEUNG,LI}.  Our design also
+fits into the category of layered adaptation.  This adaptation model
+defines a base quality level that users must achieve.  Once users have
+acquired that level, the algorithm attempts to incrementally acquire
+more frames to present a higher quality video.  In the work presented
+here, the definition of quality translates to a higher frame rate.
+Liu's discussion of bandwidth fairness, coding techniques and network
+transport perspectives lie out of the scope of this paper.
+
+With respect to the software architecture, our approach most resembles
+the Lancaster Orchestration Service \cite{Lancaster} since it is based
+on a central controller that coordinates the behavior of remote
+controlled units placed within the clients via appropriate directives.
+(i.e., the $\mathrm{AI}^2$TV video buffer and manager).  Their
+approach employs the adaptive delay-based scheme described above,
+hence the playback of video focuses on adapting to the lowest
+bandwidth client.  That approach would degrade the playback experience
+of the other participants to accommodate the lowest bandwidth client.
+Our approach differs by allowing each client to receive video quality
+commensurate with its bandwidth resources.
+
+Cen et. al provide a distributed real-time MPEG video/audio player
+that uses a software feedback loop between a single server and a
+single client to adjust frame rates \cite{CEN}.  Their architecture
+provides the feedback logic within each video player and does not
+support synchronization across a group of players, while the work
+presented here provides the adaptation model within a central
+controller and explicitly supports the synchronization of semantically
+equivalent video frames across a group of clients.
+
+An earlier implementation of $\mathrm{AI}^2$TV is described in
+\cite{VECTORS}.  In that version, a collaborative virtual environment
+(CVE) supported a variety of team interactions \cite{CHIME}, with the
+optional video display embedded in the wall of a CVE ``room''.  The
+same semantic compression capability was used. Video synchronization
+data was piggybacked on top of the UDP peer-to-peer communication used
+primarily for CVE updates, such as tracking avatar movements in the
+style of multi-player 3D gaming.  The video synchronization did not
+work very well, due to the heavy-weight CVE burden on local
+resources. Video quality optimization was not addressed.  The new
+implementation of $\mathrm{AI}^2$TV presented here can run alongside
+the CVE in a separate window.
+
+\section{Conclusion}
+
+In this paper we present an architecture and adaptation model that
+allows geographically dispersed participants to collaboratively view a
+video in synchrony.  The system also employs a autonomic controller
+architecture that adapts the video quality according to client network
+bandwidth resources.  The other novel approach that we put present is
+the use of semantic compression to facilitate the synchronization of
+video content to clients with heterogeneous resources.  We rely on the
+semantic compression algorithm to guarantee the semantic composition
+of the video frames is equivalent for all clients.  We then distribute
+appropriate versions of the video to clients according to their
+current bandwidth resources.  Through the use of these tools, we hope
+to close the gap between students with varying network resources to
+allow collaboration to proceed in a fruitful manner.
+
+%ACKNOWLEDGMENTS are optional
+\section{Acknowledgments}
+We would like to thank John Kender, Tiecheng Liu, and other members of
+the High-Level Vision Lab for their assistance in using their
+lecture-video semantic compression software.  We would also like to
+thank the other members of the Programming Systems Lab for their
+support, particularly Matias Pelenur who ported the Little-JIL
+workflow notation to run on Workflakes/Cougaar.  Little-JIL was
+developed by Lee Osterweil's LASER lab at the University of
+Massachusetts, Amherst. Cougaar was developed by a DARPA-funded
+consortium; our main Cougaar contact was Nathan Combs of BBN.
+Information about the Columbia Video Network is available at
+http://www.cvn.columbia.edu/. PSL is funded in part by National
+Science Foundation grants CCR-0203876, EIA-0202063 and EIA-0071954,
+and by Microsoft Research.
+
+% The following two commands are all you need in the
+% initial runs of your .tex file to
+% produce the bibliography for the citations in your paper.
+\bibliographystyle{abbrv} \bibliography{ai2tv}
+% You must have a proper ".bib" file
+%  and remember to run:
+% latex bibtex latex latex
+% to resolve all references
+
+% ??? we'll need to do this right before submission
+% \subsection{References}
+%
+%% Generated by bibtex from your ~.bib file.  Run latex,
+%% then bibtex, then latex twice (to resolve references)
+%% to create the ~.bbl file.  Insert that ~.bbl file into
+%% the .tex source file and comment out
+%% the command \texttt{{\char'134}thebibliography}.
+
+% This next section command marks the start of
+% Appendix B, and does not continue the present hierarchy
+%% \section{More Help for the Hardy}
+%% The sig-alternate.cls file itself is chock-full of succinct
+%% and helpful comments.  If you consider yourself a moderately
+%% experienced to expert user of \LaTeX, you may find reading
+%% it useful but please remember not to change it.
+
+\balancecolumns % GM July 2000
+% That's all folks!
+\end{document}
+% ---------------------------------------------------------------
+% / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
+% / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
+% / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / / /
 % ---------------------------------------------------------------
\ No newline at end of file