D. Chen, Z. Zhang, Y. Liu, X. YangIV Workshops 2026Workshop
TL;DRINSIGHT leverages Vision-Language Models to detect context-aware hazards in autonomous driving, enabling more reliable safety reasoning under complex real-world conditions.
Z. Zhang, Z. Peng, H. Yu, M. Chen, Y. LiuIEEE Network 2025Journal
TL;DRA comprehensive survey and framework for building, optimizing, and deploying digital network twins in next-generation wireless systems, covering creation pipelines, FL-based optimization, and open research challenges.
@article{zhang2025digital,
title = {Digital Network Twins for Next-Generation Wireless: Creation, Optimization, and Challenges},
author = {Zhang, Zifan and Peng, Zijian and Yu, Hongyang and Chen, Mingzhe and Liu, Yuchen},
journal = {IEEE Network},
year = {2025}
}
Z. Zhang, M. Fang, M. Chen, Y. LiuMobiWac 2025Workshop
TL;DRIntroduces principled operations --- transfer, merge, and split --- for task-oriented network digital twins, enabling flexible composition and reuse of twin models across heterogeneous network environments.
@inproceedings{zhang2025transferring,
title = {On Transferring, Merging, and Splitting Task-Oriented Network Digital Twins},
author = {Zhang, Zifan and Fang, Minghong and Chen, Mingzhe and Liu, Yuchen},
booktitle = {International Symposium on Mobility Management and Wireless Access (MobiWac)},
year = {2025}
}
Z. Zhang, M. Fang, D. Chen, X. Yang, Y. LiuIEEE Wireless Commun. 2025Journal
TL;DRSurveys the synergy between AI techniques and digital twins for jointly solving network optimization, traffic forecasting, and adversarial security problems in next-generation wireless systems.
@article{zhang2025synergizing,
title = {Synergizing AI and Digital Twins for Next-Generation Network Optimization, Forecasting, and Security},
author = {Zhang, Zifan and Fang, Minghong and Chen, Dingxi and Yang, Xiao and Liu, Yuchen},
journal = {IEEE Wireless Communications},
volume = {32},
number = {3},
pages = {98--105},
year = {2025}
}
W. Wang, Q. Ma, Z. Zhang, Y. Liu, Z. Liu, M. FangWWW 2025Workshop
TL;DRReveals that federated unlearning is vulnerable to poisoning attacks that manipulate what gets forgotten, and proposes defense strategies to restore the integrity of the unlearning process.
@inproceedings{wang2025poisoning,
title = {Poisoning Attacks and Defenses to Federated Unlearning},
author = {Wang, Wenbo and Ma, Qiang and Zhang, Zifan and Liu, Yuchen and Liu, Zhifeng and Fang, Minghong},
booktitle = {Companion Proceedings of the ACM on Web Conference 2025},
pages = {1365--1369},
year = {2025}
}
M. Fang, Z. Zhang, Hairi, P. Khanduri, J. Liu, S. Lu, Y. Liu, Z. GongCCS 2024Conference
TL;DRProvides the first theoretically-grounded Byzantine-robust algorithm for fully decentralized federated learning (no central server), closing a fundamental gap between decentralized and server-based FL robustness guarantees.
@inproceedings{fang2024toward,
title = {Toward Byzantine-Robust Decentralized Federated Learning},
author = {Fang, Minghong and Zhang, Zifan and Hairi and Khanduri, Prashant and Liu, Jia and Lu, Songtao and Liu, Yuchen and Gong, Neil},
booktitle = {Proceedings of the 2024 ACM SIGSAC Conference on Computer and Communications Security},
year = {2024}
}
M. Fang, Z. Zhang, A. Velasquez, J. LiuWiOpt 2024Conference
TL;DREstablishes fundamental hardness results showing that Byzantine-robust policy evaluation in fully decentralized MARL is significantly harder than in centralized or server-based settings, with theoretical lower bounds.
@inproceedings{fang2024hardness,
title = {On the Hardness of Decentralized Multi-Agent Policy Evaluation under Byzantine Attacks},
author = {Fang, Minghong and Zhang, Zifan and Velasquez, Alvaro and Liu, Jia},
booktitle = {22nd International Symposium on Modeling and Optimization in Mobile, Ad Hoc, and Wireless Networks (WiOpt)},
year = {2024}
}
Z. Zhang, M. Fang, M. Chen, G. Li, X. Lin, Y. LiuIoT-J 2024Journal
TL;DRFirst systematic study of model poisoning attacks on distributed network digital twin systems; proposes a two-stage defense framework combining anomaly detection and robust aggregation that restores NDT accuracy under attack.
@article{zhang2024securing,
title = {Securing Distributed Network Digital Twin Systems Against Model Poisoning Attacks},
author = {Zhang, Zifan and Fang, Minghong and Chen, Mingzhe and Li, Gaofei and Lin, Xin and Liu, Yuchen},
journal = {IEEE Internet of Things Journal},
volume = {11},
number = {21},
pages = {34312--34324},
year = {2024}
}
Z. Zhang, M. Chen, Z. Yang, Y. LiuIFIP Networking 2024Conference
TL;DRProposes a joint vertical-and-horizontal federated learning scheme that maps heterogeneous wireless network entities into a unified digital twin representation, enabling cross-domain synchronization and inference.
@inproceedings{zhang2024mapping,
title = {Mapping Wireless Networks into Digital Reality through Joint Vertical and Horizontal Learning},
author = {Zhang, Zifan and Chen, Mingzhe and Yang, Zhaoyang and Liu, Yuchen},
booktitle = {2024 IFIP Networking Conference (IFIP Networking)},
pages = {359--367},
year = {2024}
}
Z. Zhang, M. Fang, J. Huang, Y. LiuIFIP Networking 2024Conference
TL;DRDemonstrates that wireless traffic prediction models trained via federated learning are highly vulnerable to data poisoning, and proposes a detection-and-mitigation scheme tailored to the spatio-temporal structure of network traffic.
Z. Zhang, Y. Liu, Z. Peng, M. Chen, D. Xu, S. CuiJSAC 2024Journal
TL;DRUses a network digital twin as a risk-free simulation environment to train a reinforcement learning agent for edge caching, achieving reliable content placement without disrupting live network operations.
@article{zhang2024digital,
title = {Digital Twin-Assisted Data-Driven Optimization for Reliable Edge Caching in Wireless Networks},
author = {Zhang, Zifan and Liu, Yuchen and Peng, Zijian and Chen, Mingzhe and Xu, Daquan and Cui, Shuguang},
journal = {IEEE Journal on Selected Areas in Communications},
volume = {42},
number = {11},
pages = {3306--3320},
year = {2024}
}
TL;DRIntroduces a local TD-update approach for decentralized MARL policy evaluation that simultaneously reduces both sample complexity and communication overhead, with finite-time convergence guarantees.
@inproceedings{hairi2024sample,
title = {Sample and Communication Efficient Fully Decentralized MARL Policy Evaluation via a New Approach: Local TD Update},
author = {Hairi and Zhang, Zifan and Liu, Jia},
booktitle = {International Conference on Autonomous Agents and Multiagent Systems (AAMAS)},
year = {2024}
}
TL;DRMaster's thesis investigating communication-efficient and Byzantine-robust algorithms for decentralized multi-agent reinforcement learning, unifying efficiency and security in peer-to-peer collaborative learning.
@mastersthesis{zhang2023communication,
title = {Communication Efficiency and Security for Multi-Agent Reinforcement Learning},
author = {Zhang, Zifan},
school = {The Ohio State University},
year = {2023}
}