Despite the ability to train capable LLMs, the methodology for maintaining
their relevancy and rectifying errors remains elusive. To this end, the past
few years have witnessed a surge in techniques for editing LLMs, the objective
of which is to efficiently alter the behavior of LLMs within a specific domain
without negatively impacting performance across other inputs. This paper
embarks on a deep exploration of the problems, methods, and opportunities
related to model editing for LLMs. In particular, we provide an exhaustive
overview of the task definition and challenges associated with model editing,
along with an in-depth empirical analysis of the most progressive methods
currently at our disposal. We also build a new benchmark dataset to facilitate
a more robust evaluation and pinpoint enduring issues intrinsic to existing
techniques. Our objective is to provide valuable insights into the
effectiveness and feasibility of each editing technique, thereby assisting the
community in making informed decisions on the selection of the most appropriate
method for a specific task or context. Code and datasets are available at
https://github.com/zjunlp/EasyEdit.
Beschreibung
[2305.13172] Editing Large Language Models: Problems, Methods, and Opportunities
%0 Generic
%1 yao2023editing
%A Yao, Yunzhi
%A Wang, Peng
%A Tian, Bozhong
%A Cheng, Siyuan
%A Li, Zhoubo
%A Deng, Shumin
%A Chen, Huajun
%A Zhang, Ningyu
%D 2023
%K llm network neural nlp
%T Editing Large Language Models: Problems, Methods, and Opportunities
%U http://arxiv.org/abs/2305.13172
%X Despite the ability to train capable LLMs, the methodology for maintaining
their relevancy and rectifying errors remains elusive. To this end, the past
few years have witnessed a surge in techniques for editing LLMs, the objective
of which is to efficiently alter the behavior of LLMs within a specific domain
without negatively impacting performance across other inputs. This paper
embarks on a deep exploration of the problems, methods, and opportunities
related to model editing for LLMs. In particular, we provide an exhaustive
overview of the task definition and challenges associated with model editing,
along with an in-depth empirical analysis of the most progressive methods
currently at our disposal. We also build a new benchmark dataset to facilitate
a more robust evaluation and pinpoint enduring issues intrinsic to existing
techniques. Our objective is to provide valuable insights into the
effectiveness and feasibility of each editing technique, thereby assisting the
community in making informed decisions on the selection of the most appropriate
method for a specific task or context. Code and datasets are available at
https://github.com/zjunlp/EasyEdit.
@misc{yao2023editing,
abstract = {Despite the ability to train capable LLMs, the methodology for maintaining
their relevancy and rectifying errors remains elusive. To this end, the past
few years have witnessed a surge in techniques for editing LLMs, the objective
of which is to efficiently alter the behavior of LLMs within a specific domain
without negatively impacting performance across other inputs. This paper
embarks on a deep exploration of the problems, methods, and opportunities
related to model editing for LLMs. In particular, we provide an exhaustive
overview of the task definition and challenges associated with model editing,
along with an in-depth empirical analysis of the most progressive methods
currently at our disposal. We also build a new benchmark dataset to facilitate
a more robust evaluation and pinpoint enduring issues intrinsic to existing
techniques. Our objective is to provide valuable insights into the
effectiveness and feasibility of each editing technique, thereby assisting the
community in making informed decisions on the selection of the most appropriate
method for a specific task or context. Code and datasets are available at
https://github.com/zjunlp/EasyEdit.},
added-at = {2023-12-06T14:58:17.000+0100},
author = {Yao, Yunzhi and Wang, Peng and Tian, Bozhong and Cheng, Siyuan and Li, Zhoubo and Deng, Shumin and Chen, Huajun and Zhang, Ningyu},
biburl = {https://www.bibsonomy.org/bibtex/27e45c7f43a004365b73c764a0a242088/jaeschke},
description = {[2305.13172] Editing Large Language Models: Problems, Methods, and Opportunities},
interhash = {151c76eeddc4bb4be2bc49d828be6881},
intrahash = {7e45c7f43a004365b73c764a0a242088},
keywords = {llm network neural nlp},
note = {cite arxiv:2305.13172Comment: EMNLP 2023. Updated with new experiments},
timestamp = {2023-12-06T14:58:17.000+0100},
title = {Editing Large Language Models: Problems, Methods, and Opportunities},
url = {http://arxiv.org/abs/2305.13172},
year = 2023
}