@conference {DOI302, title = {Auto-Grading Jupyter Notebooks}, booktitle = {SIGCSE 2020}, year = {2020}, month = {03/2020}, author = {Hamza Manzoor and Amit Naik and Shaffer, Clifford A. and North, Chris and Stephen H. Edwards} } @conference {DOI296, title = {Machine Learning from User Interaction for Visualization and Analytics: A Workshop-Generated Research Agenda}, booktitle = {Proceedings of the IEEE VIS Workshop MLUI 2019: Machine Learning from User Interactions for Visualization and Analytics. VIS{\textquoteright}19.}, year = {2019}, month = {10/2019}, author = {Wenskovitch, John and Michelle Dowling and Grose, Laura and North, Chris and Chang, Remco and Endert, Alex and Rogers, David} } @article {DOI269, title = {Be the Data: Embodied Visual Analytics}, journal = {IEEE Transactions on Learning Technologies}, volume = {11}, year = {2018}, pages = {81-95}, doi = {10.1109/TLT.2017.2757481}, author = {Xin Chen and Self, Jessica Zeitz and House, Leanna and Wenskovitch, John and Sun, Maoyuan and Nathan Wycoff and Jane Robertson Evia and Leman, Scotland and North, Chris} } @conference {DOI265, title = {Bringing Interactive Visual Analytics to the Classroom for Developing EDA Skills}, booktitle = {Proceedings of the 33rd Annual Consortium of Computing Sciences in Colleges (CCSC) Eastern Regional Conference}, year = {2017}, month = {10/2017}, pages = {10}, author = {Self, Jessica Zeitz and Self, Nathan and House, Leanna and Jane Robertson Evia and Leman, Scotland and North, Chris} } @conference {DOI245, title = {Be the Data: An Embodied Experience for Data Analytics}, booktitle = { 2016 Annual Meeting of the American Educational Research Association (AERA)}, year = {2016}, month = {04/2016}, pages = {20}, author = {Xin Chen and House, Leanna and Self, Jessica Zeitz and Leman, Scotland and Jane Robertson Evia and James Thomas Fry and North, Chris} } @article {DOI244, title = {Bringing Interactive Visual Analytics to the Classroom for Developing EDA Skills}, year = {2015}, institution = {Virginia Tech}, type = {Technical Report}, address = {Blacksburg}, abstract = {This paper addresses the use of visual analytics in education for teaching what we call cognitive dimensionality (CD) and other EDA skills. We present the concept of CD to characterize students{\textquoteright} capacity for making dimensionally complex insights from data. Using this concept, we build a vocabulary and methodology to support a student{\textquoteright}s progression in terms of growth from low cognitive dimensionality (LCD) to high cognitive dimensionality (HCD). Crucially, students do not need high-level math skills to develop HCD. Rather, we use our own tool called Andromeda that enables human-computer interaction with a common, easy to interpret visualization method called Weighted Multidimensional Scaling (WMDS) to promote the idea of making high-dimensional insights. In this paper, we present Andromeda and report findings from a series of classroom assignments to 18 graduate students. These assignments progress from spreadsheet manipulations to statistical software such as R and finally to the use of Andromeda. In parallel with the assignments, we saw students{\textquoteright} CD begin low and improve.}, keywords = {dimension reduction, education, multidimensional scaling, multivariate analysis, Visual Analytics}, author = {Self, Jessica Zeitz and Self, Nathan and House, Leanna and Jane Robertson Evia and Leman, Scotland and North, Chris} } @article {DOI268, title = {Semantic Interaction: Coupling Cognition and Computation through Usable Interactive Analytics}, journal = {IEEE Computer Graphics and Applications}, year = {2015}, month = {07/2015}, pages = {6-11}, author = {Endert, Alex and Chang, Remco and North, Chris and Zhou, Michelle} } @article {DOI10.1007/s10844-014-0304-9, title = {The human is the loop: new directions for visual analytics}, journal = {Journal of Intelligent Information Systems}, volume = {43}, year = {2014}, pages = {411-435}, publisher = {Springer US}, abstract = {Visual analytics is the science of marrying interactive visualizations and analytic algorithms to support exploratory knowledge discovery in large datasets. We argue for a shift from a {\textquoteleft}human in the loop{\textquoteright} philosophy for visual analytics to a {\textquoteleft}human is the loop{\textquoteright} viewpoint, where the focus is on recognizing analysts{\textquoteright} work processes, and seamlessly fitting analytics into that existing interactive process. We survey a range of projects that provide visual analytic support contextually in the sensemaking loop, and outline a research agenda along with future challenges.}, keywords = {clustering, Semantic interaction, Spatialization, Storytelling, Visual Analytics}, issn = {0925-9902}, doi = {10.1007/s10844-014-0304-9}, author = {Endert, Alex and Hossain, M. Shahriar and Ramakrishnan, Naren and North, Chris and Fiaux, Patrick and Andrews, Christopher} } @article {6855271, title = {Semantic Interaction for Visual Analytics: Toward Coupling Cognition and Computation}, journal = {Computer Graphics and Applications, IEEE}, volume = {34}, number = {4}, year = {2014}, month = {July}, pages = {8-15}, keywords = {Alex Endert, Analytical models, Cognition, computation, Computational modeling, computer graphics, Data models, Data visualization, graphics, human computer interaction, human-computer interaction, IN-SPIRE, Semantic interaction, Semantics, Visual Analytics, visualization}, issn = {0272-1716}, doi = {10.1109/MCG.2014.73}, author = {Endert, Alex} } @conference {DOI215, title = {Toward Usable Interactive Analytics: Coupling Cognition and Computation}, booktitle = {KDD 2014 Workshop on Interactive Data Exploration and Analytics (IDEA)}, year = {2014}, url = {http://poloclub.gatech.edu/idea2014/papers/p52-endert.pdf}, author = {Endert, Alex and North, Chris and Chang, Remco and Zhou, Michelle} } @article {DOI10.1109/MCG.2013.53, title = {Beyond Control Panels: Direct Manipulation for Visual Analytics}, journal = {IEEE Computer Graphics and Applications}, volume = {33}, year = {2013}, month = {07/2013}, pages = {6 - 13}, issn = {0272-1716}, doi = {10.1109/MCG.2013.53}, author = {Endert, Alex and Lauren Bradel and North, Chris} } @article {DOI10.1109/MC.2013.269, title = {Bixplorer: Visual Analytics with Biclusters}, journal = {Computer}, volume = {46}, year = {2013}, month = {08/2013}, pages = {90 - 94}, issn = {0018-9162}, doi = {10.1109/MC.2013.269}, author = {Fiaux, Patrick and Sun, Maoyuan and Lauren Bradel and North, Chris and Ramakrishnan, Naren and Endert, Alex} } @conference {DOI10.1109/ISI.2013.6578780, title = {How analysts cognitively {\textquotedblleft}connect the dots{\textquotedblright}}, booktitle = {2013 IEEE International Conference on Intelligence and Security Informatics (ISI)}, year = {2013}, month = {6/2013}, pages = {24 - 26}, publisher = {IEEE}, organization = {IEEE}, address = {Seattle, WA, USA}, isbn = {978-1-4673-6214-6}, doi = {10.1109/ISI.2013.6578780}, author = {Lauren Bradel and Self, Jessica Zeitz and Endert, Alex and Hossain, M. Shahriar and North, Chris and Ramakrishnan, Naren} } @article {DOI10.1016/j.ijhcs.2013.07.004, title = {Large High Resolution Displays for Co-Located Collaborative Sensemaking: Display Usage and Territoriality}, journal = {International Journal of Human-Computer Studies}, volume = {71}, year = {2013}, month = {11/2013}, pages = {1078-1088}, issn = {10715819}, doi = {10.1016/j.ijhcs.2013.07.004}, author = {Lauren Bradel and Endert, Alex and Koch, Kristen and Andrews, Christopher and North, Chris} } @article {DOI10.1371/journal.pone.0050474, title = {Visual to Parametric Interaction (V2PI)}, journal = {PLoS ONE}, volume = {8}, year = {2013}, month = {03/2013}, pages = {e50474}, abstract = {Typical data visualizations result from linear pipelines that start by characterizing data using a model or algorithm to reduce the dimension and summarize structure, and end by displaying the data in a reduced dimensional form. Sensemaking may take place at the end of the pipeline when users have an opportunity to observe, digest, and internalize any information displayed. However, some visualizations mask meaningful data structures when model or algorithm constraints (e.g., parameter specifications) contradict information in the data. Yet, due to the linearity of the pipeline, users do not have a natural means to adjust the displays. In this paper, we present a framework for creating dynamic data displays that rely on both mechanistic data summaries and expert judgement. The key is that we develop both the theory and methods of a new human-data interaction to which we refer as {\textquotedblleft} Visual to Parametric Interaction{\textquotedblright} (V2PI). With V2PI, the pipeline becomes bi-directional in that users are embedded in the pipeline; users learn from visualizations and the visualizations adjust to expert judgement. We demonstrate the utility of V2PI and a bi-directional pipeline with two examples.}, doi = {10.1371/journal.pone.0050474}, author = {Leman, Scotland and House, Leanna and Maiti, Dipayan and Endert, Alex and North, Chris} } @conference {Endert:2012:DLH:2254556.2254570, title = {Designing large high-resolution display workspaces}, booktitle = {Proceedings of the International Working Conference on Advanced Visual Interfaces}, series = {AVI {\textquoteright}12}, year = {2012}, pages = {58{\textendash}65}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Large, high-resolution displays have enormous potential to aid in scenarios beyond their current usage. Their current usages are primarily limited to presentations, visualization demonstrations, or conducting experiments. In this paper, we present a new usage for such systems: an everyday workspace. We discuss how seemingly small large-display design decisions can have significant impacts on users{\textquoteright} perceptions of these workspaces, and thus the usage of the space. We describe the effects that various physical configurations have on the overall usability and perception of the display. We present conclusions on how to broaden the usage scenarios of large, high-resolution displays to enable frequent and effective usage as everyday workspaces while still allowing transformation to collaborative or presentation spaces.}, keywords = {large high-resolution displays}, isbn = {978-1-4503-1287-5}, doi = {10.1145/2254556.2254570}, url = {http://doi.acm.org/10.1145/2254556.2254570}, author = {Endert, Alex and Lauren Bradel and Zeitz, Jessica and Andrews, Christopher and North, Chris} } @conference {DOI140, title = {Dynamic Analysis of Large Datasets with Animated and Correlated Views}, booktitle = {IEEE VAST 2012 (Extended Abstract) (Honorable Mention for Good Use of Coordinated Displays)}, year = {2012}, abstract = {In this paper, we introduce a GPU-accelerated visual analytics tool, AVIST. By adopting the in-situ visualization architecture on the GPUs, AVIST supports real-time data analysis and visualization of massive scale datasets, such as VAST 2012 Challenge dataset. The design objective of the tool is to identify temporal patterns from large and complex data. To achieve this goal, we introduce three unique features: automatic animation, disjunctive data filters, and time-synced visualization of multiple datasets.}, author = {Yong Cao and Reese Moore and Peng Mi and Endert, Alex and North, Chris and Randy Marchany} } @conference {Ragan:2012:SLI:2254556.2254576, title = {How spatial layout, interactivity, and persistent visibility affect learning with large displays}, booktitle = {Proceedings of the International Working Conference on Advanced Visual Interfaces}, series = {AVI {\textquoteright}12}, year = {2012}, pages = {91{\textendash}98}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Visualizations often use spatial representations to aid understanding, but it is unclear what properties of a spatial information presentation are most important to effectively support cognitive processing. This research explores how spatial layout and view control impact learning and investigates the role of persistent visibility when working with large displays. We performed a controlled experiment with a learning activity involving memory and comprehension of a visually represented story. We compared performance between a slideshow-type presentation on a single monitor and a spatially distributed presentation among multiple monitors. We also varied the method of view control (automatic vs. interactive). Additionally, to separate effects due to location or persistent visibility with a spatially distributed layout, we controlled whether all story images could always be seen or if only one image could be viewed at a time. With the distributed layouts, participants maintained better memory of the associated locations where information was presented. However, learning scores were significantly better for the slideshow presentation than for the distributed layout when only one image could be viewed at a time.}, keywords = {interactivity, large displays, learning, memory, use of space}, isbn = {978-1-4503-1287-5}, doi = {10.1145/2254556.2254576}, url = {http://doi.acm.org/10.1145/2254556.2254576}, author = {Ragan, Eric D. and Endert, Alex and Bowman, Doug A. and Francis Quek} } @article {DOI10.1109/TVCG.2012.260, title = {Semantic Interaction for Sensemaking: Inferring Analytical Reasoning for Model Steering}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {18}, year = {2012}, month = {12/2012}, pages = {2879 - 2888}, abstract = {Visual analytic tools aim to support the cognitively demanding task of sensemaking. Their success often depends on the ability to leverage capabilities of mathematical models, visualization, and human intuition through flexible, usable, and expressive interactions. Spatially clustering data is one effective metaphor for users to explore similarity and relationships between information, adjusting the weighting of dimensions or characteristics of the dataset to observe the change in the spatial layout. Semantic interaction is an approach to user interaction in such spatializations that couples these parametric modifications of the clustering model with users{\textquoteright} analytic operations on the data (e.g., direct document movement in the spatialization, highlighting text, search, etc.). In this paper, we present results of a user study exploring the ability of semantic interaction in a visual analytic prototype, ForceSPIRE, to support sensemaking. We found that semantic interaction captures the analytical reasoning of the user through keyword weighting, and aids the user in co-creating a spatialization based on the user{\textquoteright}s reasoning and intuition.}, issn = {1077-2626}, doi = {10.1109/TVCG.2012.260}, author = {Endert, Alex and Fiaux, Patrick and North, Chris} } @conference {Endert:2012:SIV:2207676.2207741, title = {Semantic interaction for visual text analytics}, booktitle = {Proceedings of the 2012 ACM annual conference on Human Factors in Computing Systems}, series = {CHI {\textquoteright}12}, year = {2012}, pages = {473{\textendash}482}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Visual analytics emphasizes sensemaking of large, complex datasets through interactively exploring visualizations generated by statistical models. For example, dimensionality reduction methods use various similarity metrics to visualize textual document collections in a spatial metaphor, where similarities between documents are approximately represented through their relative spatial distances to each other in a 2D layout. This metaphor is designed to mimic analysts{\textquoteright} mental models of the document collection and support their analytic processes, such as clustering similar documents together. However, in current methods, users must interact with such visualizations using controls external to the visual metaphor, such as sliders, menus, or text fields, to directly control underlying model parameters that they do not understand and that do not relate to their analytic process occurring within the visual metaphor. In this paper, we present the opportunity for a new design space for visual analytic interaction, called semantic interaction, which seeks to enable analysts to spatially interact with such models directly within the visual metaphor using interactions that derive from their analytic process, such as searching, highlighting, annotating, and repositioning documents. Further, we demonstrate how semantic interactions can be implemented using machine learning techniques in a visual analytic tool, called ForceSPIRE, for interactive analysis of textual data within a spatial visualization. Analysts can express their expert domain knowledge about the documents by simply moving them, which guides the underlying model to improve the overall layout, taking the user{\textquoteright}s feedback into account.}, keywords = {interaction, Visual Analytics, visualization}, isbn = {978-1-4503-1015-4}, doi = {10.1145/2207676.2207741}, url = {http://doi.acm.org/10.1145/2207676.2207741}, author = {Endert, Alex and Fiaux, Patrick and North, Chris} } @conference {Endert:2012:SCA:2254556.2254660, title = {The semantics of clustering: analysis of user-generated spatializations of text documents}, booktitle = {Proceedings of the International Working Conference on Advanced Visual Interfaces}, series = {AVI {\textquoteright}12}, year = {2012}, pages = {555{\textendash}562}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Analyzing complex textual datasets consists of identifying connections and relationships within the data based on users{\textquoteright} intuition and domain expertise. In a spatial workspace, users can do so implicitly by spatially arranging documents into clusters to convey similarity or relationships. Algorithms exist that spatialize and cluster such information mathematically based on similarity metrics. However, analysts often find inconsistencies in these generated clusters based on their expertise. Therefore, to support sensemaking, layouts must be co-created by the user and the model. In this paper, we present the results of a study observing individual users performing a sensemaking task in a spatial workspace. We examine the users{\textquoteright} interactions during their analytic process, and also the clusters the users manually created. We found that specific interactions can act as valuable indicators of important structure within a dataset. Further, we analyze and characterize the structure of the user-generated clusters to identify useful metrics to guide future algorithms. Through a deeper understanding of how users spatially cluster information, we can inform the design of interactive algorithms to generate more meaningful spatializations for text analysis tasks, to better respond to user interactions during the analytics process, and ultimately to allow analysts to more rapidly gain insight.}, keywords = {clustering, text analytics, Visual Analytics, visualization}, isbn = {978-1-4503-1287-5}, doi = {10.1145/2254556.2254660}, url = {http://doi.acm.org/10.1145/2254556.2254660}, author = {Endert, Alex and Fox, Seth and Maiti, Dipayan and Leman, Scotland and North, Chris} } @conference {North:2011:APP:1979742.1979570, title = {Analytic provenance: process+interaction+insight}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, pages = {33{\textendash}36}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {analytic provenance, user interaction, Visual Analytics, visualization}, isbn = {978-1-4503-0268-5}, doi = {http://doi.acm.org/10.1145/1979742.1979570}, url = {http://doi.acm.org/10.1145/1979742.1979570}, author = {North, Chris and Chang, Remco and Endert, Alex and Dou, Wenwen and May, Richard and Pike, Bill and Fink, G.} } @conference {Endert:2011:CLN:1979742.1979628, title = {ChairMouse: leveraging natural chair rotation for cursor navigation on large, high-resolution displays}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, pages = {571{\textendash}580}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, abstract = {Large, high-resolution displays lead to more spatially based approaches. In such environments, the cursor (and hence the physical mouse) is the primary means of interaction. However, usability issues occur when standard mouse interaction is applied to workstations with large size and high pixel density. Previous studies show users navigate physically when interacting with information on large displays by rotating their chair. ChairMouse captures this natural chair movement and translates it into large-scale cursor movement while still maintaining standard mouse usage for local cursor movement. ChairMouse supports both active and passive use, reducing tedious mouse interactions by leveraging physical chair action.}, keywords = {Embodied Interaction, interaction design, large display}, isbn = {978-1-4503-0268-5}, doi = {http://doi.acm.org/10.1145/1979742.1979628}, url = {http://doi.acm.org/10.1145/1979742.1979628}, author = {Endert, Alex and Fiaux, Patrick and Chung, Haeyong and Stewart, Michael and Andrews, Christopher and North, Chris} } @conference {119, title = {Co-located Collaborative Sensemaking on a Large High-Resolution Display with Multiple Input Devices}, booktitle = {INTERACT 2011}, volume = {6947}, year = {2011}, pages = {589 - 604}, address = {Lisbon, Portugal}, keywords = {co-located, CSCW, Large High Resolution Display, large high-resolution display, sensemaking, Visual Analytics}, isbn = {978-3-642-23771-3}, issn = {1611-3349}, doi = {10.1007/978-3-642-23771-3_44}, author = {Katherine Vogt and Lauren Bradel and Andrews, Christopher and North, Chris and Endert, Alex and Duke Hutchings} } @conference {Ragan:2011:ESL:1979742.1979921, title = {The effects of spatial layout and view control on cognitive processing}, booktitle = {Proceedings of the 2011 annual conference extended abstracts on Human factors in computing systems}, series = {CHI EA {\textquoteright}11}, year = {2011}, pages = {2005{\textendash}2010}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {information processing, interactivity, learning, spatial memory, visualization}, isbn = {978-1-4503-0268-5}, doi = {http://doi.acm.org/10.1145/1979742.1979921}, url = {http://doi.acm.org/10.1145/1979742.1979921}, author = {Ragan, Eric D. and Endert, Alex and Bowman, Doug A. and Francis Quek} } @article {Andrews01102011, title = {Information visualization on large, high-resolution displays: Issues, challenges, and opportunities}, journal = {Information Visualization}, volume = {10}, number = {4}, year = {2011}, pages = {341-355}, abstract = {Larger, higher-resolution displays are becoming accessible to a greater number of users as display technologies decrease in cost and software for the displays improves. The additional pixels are especially useful for information visualization where scalability has typically been limited by the number of pixels available on a display. But how will visualizations for larger displays need to fundamentally differ from visualizations on desktop displays? Are the basic visualization design principles different? With this potentially new design paradigm comes questions such as whether the relative effectiveness of various graphical encodings are different on large displays, which visualizations and datasets benefit the most, and how interaction with visualizations on large, high-resolution displays will need to change. As we explore these possibilities, we shift away from the technical limitations of scalability imposed by traditional displays (e.g. number of pixels) to studying the human abilities that emerge when these limitations are removed. There is much potential for information visualizations to benefit from large, high-resolution displays, but this potential will only be realized through understanding the interaction between visualization design, perception, interaction techniques, and the display technology. In this paper we present critical design issues and outline some of the challenges and future opportunities for designing visualizations for large, high-resolution displays. We hope that these issues, challenges, and opportunities will provide guidance for future research in this area.}, doi = {10.1177/1473871611415997}, url = {http://ivi.sagepub.com/content/10/4/341.abstract}, author = {Andrews, Christopher and Endert, Alex and Yost, Beth and North, Chris} } @conference {6102449, title = {Observation-level interaction with statistical models for visual analytics}, booktitle = {Visual Analytics Science and Technology (VAST), 2011 IEEE Conference on}, year = {2011}, month = {oct.}, pages = {121 -130}, abstract = {In visual analytics, sensemaking is facilitated through interactive visual exploration of data. Throughout this dynamic process, users combine their domain knowledge with the dataset to create insight. Therefore, visual analytic tools exist that aid sensemaking by providing various interaction techniques that focus on allowing users to change the visual representation through adjusting parameters of the underlying statistical model. However, we postulate that the process of sensemaking is not focused on a series of parameter adjustments, but instead, a series of perceived connections and patterns within the data. Thus, how can models for visual analytic tools be designed, so that users can express their reasoning on observations (the data), instead of directly on the model or tunable parameters? Observation level (and thus $\#$x201C;observation $\#$x201D;) in this paper refers to the data points within a visualization. In this paper, we explore two possible observation-level interactions, namely exploratory and expressive, within the context of three statistical methods, Probabilistic Principal Component Analysis (PPCA), Multidimensional Scaling (MDS), and Generative Topographic Mapping (GTM). We discuss the importance of these two types of observation level interactions, in terms of how they occur within the sensemaking process. Further, we present use cases for GTM, MDS, and PPCA, illustrating how observation level interaction can be incorporated into visual analytic tools.}, keywords = {data analysis, data interactive visual exploration, data visualisation, exploratory interaction, expressive interaction, generative topographic mapping, multidimensional scaling, observation-level interaction, parameter adjustments, principal component analysis, probabilistic principal component analysis, probability, sensemaking process, statistical models, Visual Analytics}, doi = {10.1109/VAST.2011.6102449}, author = {Endert, Alex and Chao Han and Maiti, Dipayan and House, Leanna and Leman, Scotland and North, Chris} } @unpublished {120, title = {Space for Two to Think: Large, High-Resolution Displays for Co-located Collaborative Sensemaking}, journal = {Technical Report TR-11-11}, year = {2011}, publisher = {Computer Science, Virginia Tech}, keywords = {collaborative sensemaking, high-resolution displays, large, Large High Resolution Display, single display groupware, Visual Analytics}, author = {Lauren Bradel and Andrews, Christopher and Endert, Alex and Katherine Vogt and Duke Hutchings and North, Chris} } @conference {Singh:2011:SCA:2016904.2016907, title = {Supporting the cyber analytic process using visual history on large displays}, booktitle = {Proceedings of the 8th International Symposium on Visualization for Cyber Security}, series = {VizSec {\textquoteright}11}, year = {2011}, pages = {3:1{\textendash}3:8}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {interaction styles, large high-resolution displays, prototyping, screen design, user-centered design}, isbn = {978-1-4503-0679-9}, doi = {10.1145/2016904.2016907}, url = {http://doi.acm.org/10.1145/2016904.2016907}, author = {Singh, Ankit and Lauren Bradel and Endert, Alex and Kincaid, Robert and Andrews, Christopher and North, Chris} } @conference {123, title = {Unifying the Sensemaking Loop with Semantic Interaction}, booktitle = {IEEE Workshop on Interactive Visual Text Analytics for Decision Making at VisWeek 2011}, year = {2011}, month = {10/2011}, address = {Providence, RI}, keywords = {Visual Analytics}, author = {Endert, Alex and Fiaux, Patrick and North, Chris} } @conference {Endert:2011:VES:1992917.1992935, title = {Visual encodings that support physical navigation on large displays}, booktitle = {Proceedings of Graphics Interface 2011}, series = {GI {\textquoteright}11}, year = {2011}, pages = {103{\textendash}110}, publisher = {Canadian Human-Computer Communications Society}, organization = {Canadian Human-Computer Communications Society}, address = {School of Computer Science, University of Waterloo, Waterloo, Ontario, Canada}, keywords = {aggregation, high-resolution display, information visualization, large, perceptual scalability}, isbn = {978-1-4503-0693-5}, url = {http://dl.acm.org/citation.cfm?id=1992917.1992935}, author = {Endert, Alex and Andrews, Christopher and Lee, Yueh Hua and North, Chris} } @conference {1753336, title = {Space to think: large high-resolution displays for sensemaking}, booktitle = {CHI {\textquoteright}10: Proceedings of the 28th international conference on Human factors in computing systems}, year = {2010}, pages = {55{\textendash}64}, publisher = {ACM}, organization = {ACM}, address = {New York, NY, USA}, keywords = {LHRD}, isbn = {978-1-60558-929-9}, doi = {http://doi.acm.org/10.1145/1753326.1753336}, author = {Andrews, Christopher and Endert, Alex and North, Chris} } @conference {5478473, title = {Towards efficient collaboration in cyber security}, booktitle = {Collaborative Technologies and Systems (CTS), 2010 International Symposium on}, year = {2010}, pages = {489 -498}, keywords = {collaboration, cyber security analysts, groupware, security bulletins, security of data}, doi = {10.1109/CTS.2010.5478473}, author = {Hui, P. and Bruce, J. and Fink, G. and Gregory, M. and Best, D. and McGrath, L. and Endert, Alex} } @conference {102, title = {Professional Analysts using a Large, High-Resolution Display}, booktitle = {IEEE VAST 2009 (Extended Abstract) (Awarded Special Contributions to the VAST Challenge Contest)}, year = {2009}, keywords = {Large High Resolution Display, Visual Analytics}, author = {Endert, Alex and Andrews, Christopher and North, Chris} } @conference {5333245, title = {VAST contest dataset use in education}, booktitle = {Visual Analytics Science and Technology, 2009. IEEE VAST 2009.}, year = {2009}, pages = {115 -122}, keywords = {data visualisation, education, educational technology, evaluation metrics, IEEE visual analytics science and technology, information analysis, information analysts, VAST, Visual Analytics}, doi = {10.1109/VAST.2009.5333245}, author = {Whiting, M.A. and North, Chris and Endert, Alex and Scholtz, J. and Haack, J. and Varley, C. and Thomas, J.} } @conference {5375542, title = {Visualizing cyber security: Usable workspaces}, booktitle = {Visualization for Cyber Security, 2009. VizSec 2009. 6th International Workshop on}, year = {2009}, pages = {45 -56}, keywords = {cyber analytics work environment, cyber security visualization, data visualisation, digital infrastructures, information foraging, Large High Resolution Display, security of data, usability evaluation, usable workspaces, Visual Analytics}, doi = {10.1109/VIZSEC.2009.5375542}, author = {Fink, G. and North, Chris and Endert, Alex and Rose, S.} }