Publications

Bridging Semantics and Pragmatics in Information-Theoretic Emergent Communication, Eleonora Gualdoni, Mycal Tucker, Roger Levy, Noga Zaslavsky. SciL 2024.

@inproceedings{gualdoniIBSemanticsPragmatics,title={Bridging Semantics and Pragmatics in Information-Theoretic Emergent Communication},author={Eleonora Gualdoni and Mycal Tucker and Roger Levy and Noga Zaslavsky},booktitle={2024 Meeting of the Society for Computation in Linguistics},year={2024},}
@inproceedings{sannemantuckerIBXAI,title={An Information Bottleneck Characterization of the Understanding-Workload Tradeoff in Human-Centered Explainable AI},author={Mycal Tucker* and Lindsay Sanneman* and Julie Shah},booktitle={ACM Conference on Fairness, Accountability, and Transparency},year={2024},}

Human-Guided Complexity-Controlled Abstractions, M. Tucker*, A. Peng*, Eoin Kenny, Noga Zaslavsky, Pulkit Agrawal, J. Shah. NeurIPS 2023.

@inproceedings{tucker2023humanguided,title={Human-Guided Complexity-Controlled Abstractions},author={Mycal Tucker and Andi Peng and Eoin Kenny and Noga Zaslavsky and Pulkit Agrawal and Julie Shah},booktitle={Advances in Neural Information Processing Systems},year={2023},url={https://openreview.net/forum?id=tSEeRl7ACo}}

Increasing Brain-LLM Alignment via Information-Theoretic Compression, M. Tucker* and G. Tuckute*. NeurIPS workshop on Unifying Representations in Neural Models 2023.

@inproceedings{  tucktuck2023increasing,  title={Increasing Brain-{LLM} Alignment via Information-Theoretic Compression},  author={Mycal Tucker* and Greta Tuckute*},  booktitle={UniReps:  the First Workshop on Unifying Representations in Neural Models},  year={2023}}

Towards Interpretable Deep Reinforcement Learning with Human-friendly Prototypes, E. Kenny, M. Tucker, J. Shah. International Conference on Learning Representations 2023. (Notable paper)

@inproceedings{kenny2023towards,title={Towards Interpretable Deep Reinforcement Learning with Human-Friendly Prototypes},author={Eoin M. Kenny and Mycal Tucker and Julie Shah},booktitle={The Eleventh International Conference on Learning Representations },year={2023},url={https://openreview.net/forum?id=hWwY_Jq0xsN}}

Interpretable Learned Emergent Communication for Human-Agent Teams, S. Karten, M. Tucker, H. Li, S. Kailas, M. Lewis, K. Sycara. IEEE Transactions on Cognitive and Developmental Systems 2023.

@ARTICLE{10015765, author={Karten, Seth and Tucker, Mycal and Li, Huao and Kailas, Siva and Lewis, Michael and Sycara, Katia},  journal={IEEE Transactions on Cognitive and Developmental Systems},   title={Interpretable Learned Emergent Communication for Human-Agent Teams},   year={2023},  volume={},  number={},  pages={1-1},  doi={10.1109/TCDS.2023.3236599}}

Generalization and Translatability in Emergent Communication, M. Tucker, R. Levy, J. Shah,  N. Zaslavsky. NeurIPS workshop on Information-Theoretic Principles in Cognitive Systems 2022. (Spotlight)

@inproceedings{tucker2022generalization,title={Generalization and Translatability in Emergent Communication via Informational Constraints},author={Mycal Tucker and Roger P. Levy and Julie Shah and Noga Zaslavsky},booktitle={NeurIPS 2022 Workshop on Information-Theoretic Principles in Cognitive Systems},year={2022},url={https://openreview.net/forum?id=yf8suFtNZ5v}}

Trading off Utility, Informativeness, and Complexity in Emergent Communication, M. Tucker, R. Levy, J. Shah,  N. Zaslavsky. NeurIPS 2022.

@inproceedings{  tucker2022trading,  title={Trading off Utility, Informativeness, and Complexity in Emergent Communication},  author={Mycal Tucker and Roger P. Levy and Julie Shah and Noga Zaslavsky},  booktitle={Advances in Neural Information Processing Systems},  editor={Alice H. Oh and Alekh Agarwal and Danielle Belgrave and Kyunghyun Cho},  year={2022},  url={https://openreview.net/forum?id=O5arhQvBdH}}



Towards Human-Agent Communication via the Information Bottleneck Principle , M. Tucker, J. Shah, R. Levy, N. Zaslavsky. RSS Workshop on Social Intelligence in Humans and Robots 2022.

@inproceedings{  tucker2022towards,  title={Trading off Utility, Informativeness, and Complexity in Emergent Communication},  author={Mycal Tucker and Roger P. Levy and Julie Shah and Noga Zaslavsky},  booktitle={RSS Workshop: Social Intelligence in Humans and Robots},  year={2022},  url={https://social-intelligence-human-ai.github.io/docs/camready_4.pdf}}



Prototype Based Classification from Hierarchy to Fairness, M. Tucker, J. Shah. International Conference on Machine Learning (ICML) 2022.

@InProceedings{pmlr-v162-tucker22a,  title = {Prototype Based Classification from Hierarchy to Fairness},  author =       {Tucker, Mycal and Shah, Julie A.},  booktitle = {Proceedings of the 39th International Conference on Machine Learning},  pages = {21884--21900},  year = {2022},  editor = {Chaudhuri, Kamalika and Jegelka, Stefanie and Song, Le and Szepesvari, Csaba and Niu, Gang and Sabato, Sivan},  volume = {162},  series = {Proceedings of Machine Learning Research},  month = {17--23 Jul},  publisher =    {PMLR},  pdf = {https://proceedings.mlr.press/v162/tucker22a/tucker22a.pdf},  url = {https://proceedings.mlr.press/v162/tucker22a.html},  abstract = {Artificial neural nets can represent and classify many types of high-dimensional data but are often tailored to particular applications – e.g., for “fair” or “hierarchical” classification. Once an architecture has been selected, it is often difficult for humans to adjust models for a new task; for example, a hierarchical classifier cannot be easily transformed into a fair classifier that shields a protected field. Our contribution in this work is a new neural network architecture, the concept subspace network (CSN), which generalizes existing specialized classifiers to produce a unified model capable of learning a spectrum of multi-concept relationships. We demonstrate that CSNs reproduce state-of-the-art results in fair classification when enforcing concept independence, may be transformed into hierarchical classifiers, or may even reconcile fairness and hierarchy within a single classifier. The CSN is inspired by and matches the performance of existing prototype-based classifiers that promote interpretability.}}


When Does Syntax Mediate Neural Language Model Performance? Evidence from Dropout Probes, M. Tucker, T. Eisape, P. Qian, R. Levy, J. Shah. North American Chapter of the Association of Computational Linguistics (NAACL) 2022.

@inproceedings{tucker-etal-2022-syntax,    title = "When Does Syntax Mediate Neural Language Model Performance? Evidence from Dropout Probes",    author = "Tucker, Mycal  and      Eisape, Tiwalayo  and      Qian, Peng  and      Levy, Roger  and      Shah, Julie",    booktitle = "Proceedings of the 2022 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies",    month = jul,    year = "2022",    address = "Seattle, United States",    publisher = "Association for Computational Linguistics",    url = "https://aclanthology.org/2022.naacl-main.394",    doi = "10.18653/v1/2022.naacl-main.394",    pages = "5393--5408",    abstract = "Recent causal probing literature reveals when language models and syntactic probes use similar representations. Such techniques may yield {``}false negative{''} causality results: models may use representations of syntax, but probes may have learned to use redundant encodings of the same syntactic information. We demonstrate that models do encode syntactic information redundantly and introduce a new probe design that guides probes to consider all syntactic information present in embeddings. Using these probes, we find evidence for the use of syntax in models where prior methods did not, allowing us to boost model performance by injecting syntactic information into representations.",}


Latent Space Alignment Using Adversarially Guided Self-Play, M. Tucker, Y. Zhou, J. Shah. International Journal of Human-Computer Interaction 2022.

@article{doi:10.1080/10447318.2022.2083463,author = {Mycal Tucker and Yilun Zhou and Julie Shah},title = {Latent Space Alignment Using Adversarially Guided Self-Play},journal = {International Journal of Human–Computer Interaction},volume = {0},number = {0},pages = {1-19},year  = {2022},publisher = {Taylor & Francis},doi = {10.1080/10447318.2022.2083463},URL = {         https://doi.org/10.1080/10447318.2022.2083463},eprint = {         https://doi.org/10.1080/10447318.2022.2083463}}



Emergent Discrete Communication in Semantic Spaces, M. Tucker, H. Li, S. Agrawal, D. Hughes, M. Lewis, K. Sycara, J. Shah. NeurIPS 2021.

@inproceedings{tucker2021emergent,title={Emergent Discrete Communication in Semantic Spaces},author={Mycal Tucker and Huao Li and Siddharth Agrawal and Dana Hughes and Katia P. Sycara and Michael Lewis and Julie Shah},booktitle={Advances in Neural Information Processing Systems},editor={A. Beygelzimer and Y. Dauphin and P. Liang and J. Wortman Vaughan},year={2021},url={https://openreview.net/forum?id=EuvW3-Lmoxm}}

What if This Modified That? Syntactic Interventions via Counterfactual Embeddings, M. Tucker, P. Qian, and R. Levy. IJCNLP Findings 2021.

@inproceedings{tucker-etal-2021-modified,    title = "What if This Modified That? Syntactic Interventions with Counterfactual Embeddings",    author = "Tucker, Mycal  and      Qian, Peng  and      Levy, Roger",    booktitle = "Findings of the Association for Computational Linguistics: ACL-IJCNLP 2021",    month = aug,    year = "2021",    address = "Online",    publisher = "Association for Computational Linguistics",    url = "https://aclanthology.org/2021.findings-acl.76",    doi = "10.18653/v1/2021.findings-acl.76",    pages = "862--875",}

@article{DBLP:journals/corr/abs-2001-05994,  author    = {Mycal Tucker and               Yilun Zhou and               Julie Shah},  title     = {Adversarially Guided Self-Play for Adopting Social Conventions},  journal   = {CoRR},  volume    = {abs/2001.05994},  year      = {2020},  url       = {https://arxiv.org/abs/2001.05994},  eprinttype = {arXiv},  eprint    = {2001.05994},  timestamp = {Fri, 17 Jan 2020 14:07:30 +0100},  biburl    = {https://dblp.org/rec/journals/corr/abs-2001-05994.bib},  bibsource = {dblp computer science bibliography, https://dblp.org}}

Learning Unknown Groundings for Natural Language Interaction with Mobile Robots, M. Tucker, D. Aksaray, R. Paul, GJ Stein, and N. Roy. International Symposium of Robotics Research 2017.


@inproceedings{Tucker2017LearningUG, title={Learning Unknown Groundings for Natural Language Interaction with Mobile Robots}, author={Mycal Tucker and Derya Aksaray and Rohan Paul and Gregory J. Stein and Nicholas Roy}, booktitle={ISRR}, year={2017} }