@inproceedings{10.1145/3715275.3732095, author = {Lameiro, Francesca and Dunagan, Lavinia and Card, Dallas and Gilbert, Eric and Haimson, Oliver}, title = {TIDEs: A Transgender and Nonbinary Community-Labeled Dataset and Model for Transphobia Identification in Digital Environments}, year = {2025}, isbn = {9798400714825}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, url = {https://doi.org/10.1145/3715275.3732095}, doi = {10.1145/3715275.3732095}, abstract = {Transphobic rhetoric is a prevalent problem on social media that existing platform policies fail to meaningfully address. As such, trans people often create or adopt technologies independent from (but deployed within) platforms that help them mitigate the effects of facing transphobia online. In this paper, we introduce TIDEs (Transphobia Identification in Digital Environments), a dataset and model for detecting transphobic speech to contribute to the growing space of trans technologies for content moderation. We outline care-centered data practices, a methodology for constructing and labeling datasets for hate speech classification, which we developed while working closely with trans and nonbinary data annotators. Our fine-tuned DeBERTa model succeeds at detecting several ideologically distinct types of transphobia, achieving an F1 score of 0.81. As a publicly available dataset and model, TIDEs can serve as the base for future trans technologies and research that confronts and addresses the problem of online transphobia. Our results suggest that downstream applications of TIDEs may be deployable for reducing online harm for trans people.}, booktitle = {Proceedings of the 2025 ACM Conference on Fairness, Accountability, and Transparency}, pages = {1411–1423}, numpages = {13}, keywords = {transgender, content moderation, natural language processing, hate speech detection, trans technologies}, location = { }, series = {FAccT '25} }