Publications
Search
Xiong, Butian; Liu, Rong; Xu, Kenneth; Chen, Meida; Feng, Andrew
Splat Feature Solver Miscellaneous
2025, (arXiv:2508.12216 [cs]).
@misc{xiong_splat_2025,
title = {Splat Feature Solver},
author = {Butian Xiong and Rong Liu and Kenneth Xu and Meida Chen and Andrew Feng},
url = {http://arxiv.org/abs/2508.12216},
doi = {10.48550/arXiv.2508.12216},
year = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {Feature lifting has emerged as a crucial component in 3D scene understanding, enabling the attachment of rich image feature descriptors (e.g., DINO, CLIP) onto splat-based 3D representations. The core challenge lies in optimally assigning rich general attributes to 3D primitives while addressing the inconsistency issues from multi-view images. We present a unified, kernel- and feature-agnostic formulation of the feature lifting problem as a sparse linear inverse problem, which can be solved efficiently in closed form. Our approach admits a provable upper bound on the global optimal error under convex losses for delivering high quality lifted features. To address inconsistencies and noise in multi-view observations, we introduce two complementary regularization strategies to stabilize the solution and enhance semantic fidelity. Tikhonov Guidance enforces numerical stability through soft diagonal dominance, while Post-Lifting Aggregation filters noisy inputs via feature clustering. Extensive experiments demonstrate that our approach achieves state-of-the-art performance on open-vocabulary 3D segmentation benchmarks, outperforming training-based, grouping-based, and heuristic-forward baselines while producing the lifted features in minutes. Code is available at textbackslashhrefhttps://github.com/saliteta/splat-distiller.gittextbackslashtextbfgithub. We also have a textbackslashhrefhttps://splat-distiller.pages.dev/},
note = {arXiv:2508.12216 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Han, Bin; Kwon, Deuksin; Lin, Spencer; Shrestha, Kaleen; Gratch, Jonathan
Can LLMs Generate Behaviors for Embodied Virtual Agents Based on Personality Traits? Miscellaneous
2025, (arXiv:2508.21087 [cs]).
@misc{han_can_2025,
title = {Can LLMs Generate Behaviors for Embodied Virtual Agents Based on Personality Traits?},
author = {Bin Han and Deuksin Kwon and Spencer Lin and Kaleen Shrestha and Jonathan Gratch},
url = {http://arxiv.org/abs/2508.21087},
doi = {10.48550/arXiv.2508.21087},
year = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {This study proposes a framework that employs personality prompting with Large Language Models to generate verbal and nonverbal behaviors for virtual agents based on personality traits. Focusing on extraversion, we evaluated the system in two scenarios: negotiation and ice breaking, using both introverted and extroverted agents. In Experiment 1, we conducted agent to agent simulations and performed linguistic analysis and personality classification to assess whether the LLM generated language reflected the intended traits and whether the corresponding nonverbal behaviors varied by personality. In Experiment 2, we carried out a user study to evaluate whether these personality aligned behaviors were consistent with their intended traits and perceptible to human observers. Our results show that LLMs can generate verbal and nonverbal behaviors that align with personality traits, and that users are able to recognize these traits through the agents' behaviors. This work underscores the potential of LLMs in shaping personality aligned virtual agents.},
note = {arXiv:2508.21087 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
King, Tyler; Gurney, Nikolos; Miller, John H.; Ustun, Volkan
Detecting AI Assistance in Abstract Complex Tasks Miscellaneous
2025, (arXiv:2507.10761 [cs]).
@misc{king_detecting_2025,
title = {Detecting AI Assistance in Abstract Complex Tasks},
author = {Tyler King and Nikolos Gurney and John H. Miller and Volkan Ustun},
url = {http://arxiv.org/abs/2507.10761},
doi = {10.48550/arXiv.2507.10761},
year = {2025},
date = {2025-07-01},
urldate = {2025-08-19},
publisher = {arXiv},
abstract = {Detecting assistance from artificial intelligence is increasingly important as they become ubiquitous across complex tasks such as text generation, medical diagnosis, and autonomous driving. Aid detection is challenging for humans, especially when looking at abstract task data. Artificial neural networks excel at classification thanks to their ability to quickly learn from and process large amounts of data – assuming appropriate preprocessing. We posit detecting help from AI as a classification task for such models. Much of the research in this space examines the classification of complex but concrete data classes, such as images. Many AI assistance detection scenarios, however, result in data that is not machine learning-friendly. We demonstrate that common models can effectively classify such data when it is appropriately preprocessed. To do so, we construct four distinct neural network-friendly image formulations along with an additional time-series formulation that explicitly encodes the exploration/exploitation of users, which allows for generalizability to other abstract tasks. We benchmark the quality of each image formulation across three classical deep learning architectures, along with a parallel CNN-RNN architecture that leverages the additional time series to maximize testing performance, showcasing the importance of encoding temporal and spatial quantities for detecting AI aid in abstract tasks.},
note = {arXiv:2507.10761 [cs]},
keywords = {},
pubstate = {published},
tppubtype = {misc}
}
Filter
2025
Xiong, Butian; Liu, Rong; Xu, Kenneth; Chen, Meida; Feng, Andrew
Splat Feature Solver Miscellaneous
2025, (arXiv:2508.12216 [cs]).
Abstract | Links | BibTeX | Tags: DTIC?, VGL
@misc{xiong_splat_2025,
title = {Splat Feature Solver},
author = {Butian Xiong and Rong Liu and Kenneth Xu and Meida Chen and Andrew Feng},
url = {http://arxiv.org/abs/2508.12216},
doi = {10.48550/arXiv.2508.12216},
year = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {Feature lifting has emerged as a crucial component in 3D scene understanding, enabling the attachment of rich image feature descriptors (e.g., DINO, CLIP) onto splat-based 3D representations. The core challenge lies in optimally assigning rich general attributes to 3D primitives while addressing the inconsistency issues from multi-view images. We present a unified, kernel- and feature-agnostic formulation of the feature lifting problem as a sparse linear inverse problem, which can be solved efficiently in closed form. Our approach admits a provable upper bound on the global optimal error under convex losses for delivering high quality lifted features. To address inconsistencies and noise in multi-view observations, we introduce two complementary regularization strategies to stabilize the solution and enhance semantic fidelity. Tikhonov Guidance enforces numerical stability through soft diagonal dominance, while Post-Lifting Aggregation filters noisy inputs via feature clustering. Extensive experiments demonstrate that our approach achieves state-of-the-art performance on open-vocabulary 3D segmentation benchmarks, outperforming training-based, grouping-based, and heuristic-forward baselines while producing the lifted features in minutes. Code is available at textbackslashhrefhttps://github.com/saliteta/splat-distiller.gittextbackslashtextbfgithub. We also have a textbackslashhrefhttps://splat-distiller.pages.dev/},
note = {arXiv:2508.12216 [cs]},
keywords = {DTIC?, VGL},
pubstate = {published},
tppubtype = {misc}
}
Han, Bin; Kwon, Deuksin; Lin, Spencer; Shrestha, Kaleen; Gratch, Jonathan
Can LLMs Generate Behaviors for Embodied Virtual Agents Based on Personality Traits? Miscellaneous
2025, (arXiv:2508.21087 [cs]).
Abstract | Links | BibTeX | Tags: DTIC?, LLM
@misc{han_can_2025,
title = {Can LLMs Generate Behaviors for Embodied Virtual Agents Based on Personality Traits?},
author = {Bin Han and Deuksin Kwon and Spencer Lin and Kaleen Shrestha and Jonathan Gratch},
url = {http://arxiv.org/abs/2508.21087},
doi = {10.48550/arXiv.2508.21087},
year = {2025},
date = {2025-08-01},
urldate = {2025-09-18},
publisher = {arXiv},
abstract = {This study proposes a framework that employs personality prompting with Large Language Models to generate verbal and nonverbal behaviors for virtual agents based on personality traits. Focusing on extraversion, we evaluated the system in two scenarios: negotiation and ice breaking, using both introverted and extroverted agents. In Experiment 1, we conducted agent to agent simulations and performed linguistic analysis and personality classification to assess whether the LLM generated language reflected the intended traits and whether the corresponding nonverbal behaviors varied by personality. In Experiment 2, we carried out a user study to evaluate whether these personality aligned behaviors were consistent with their intended traits and perceptible to human observers. Our results show that LLMs can generate verbal and nonverbal behaviors that align with personality traits, and that users are able to recognize these traits through the agents' behaviors. This work underscores the potential of LLMs in shaping personality aligned virtual agents.},
note = {arXiv:2508.21087 [cs]},
keywords = {DTIC?, LLM},
pubstate = {published},
tppubtype = {misc}
}
King, Tyler; Gurney, Nikolos; Miller, John H.; Ustun, Volkan
Detecting AI Assistance in Abstract Complex Tasks Miscellaneous
2025, (arXiv:2507.10761 [cs]).
Abstract | Links | BibTeX | Tags: AI, DTIC?
@misc{king_detecting_2025,
title = {Detecting AI Assistance in Abstract Complex Tasks},
author = {Tyler King and Nikolos Gurney and John H. Miller and Volkan Ustun},
url = {http://arxiv.org/abs/2507.10761},
doi = {10.48550/arXiv.2507.10761},
year = {2025},
date = {2025-07-01},
urldate = {2025-08-19},
publisher = {arXiv},
abstract = {Detecting assistance from artificial intelligence is increasingly important as they become ubiquitous across complex tasks such as text generation, medical diagnosis, and autonomous driving. Aid detection is challenging for humans, especially when looking at abstract task data. Artificial neural networks excel at classification thanks to their ability to quickly learn from and process large amounts of data – assuming appropriate preprocessing. We posit detecting help from AI as a classification task for such models. Much of the research in this space examines the classification of complex but concrete data classes, such as images. Many AI assistance detection scenarios, however, result in data that is not machine learning-friendly. We demonstrate that common models can effectively classify such data when it is appropriately preprocessed. To do so, we construct four distinct neural network-friendly image formulations along with an additional time-series formulation that explicitly encodes the exploration/exploitation of users, which allows for generalizability to other abstract tasks. We benchmark the quality of each image formulation across three classical deep learning architectures, along with a parallel CNN-RNN architecture that leverages the additional time series to maximize testing performance, showcasing the importance of encoding temporal and spatial quantities for detecting AI aid in abstract tasks.},
note = {arXiv:2507.10761 [cs]},
keywords = {AI, DTIC?},
pubstate = {published},
tppubtype = {misc}
}