This research delves into the impact of Chat Generative Pre-trained Transformer, one of Open Artificial Intelligence Generative Pretrained Transformer models. This model underwent extensive training on a vast corpus of internet text to gain insights into the mechanics of human language and its role in forming phrases, sentences, and paragraphs. The urgency of this inquiry arises from Chat Generative Pre-trained Transformer emergence, which has stirred significant debate and captured widespread attention in both research and educational circles. Since its debut in November 2022, Chat Generative Pre-trained Transformer has demonstrated substantial potential across numerous domains. However, concerns voiced on Twitter have centered on potential negative consequences, such as increased
forgery and misinformation. Consequently, understanding public sentiment toward Chat Generative Pre-trained Transformer technology through sentiment analysis has become crucial. The research’s primary objective is to conduct Sentiment Analysis Classification of Chat Generative Pre-trained Transformer regarding public opinions on Twitter in Indonesia. This goal involves quantifying and categorizing public sentiment from Twitter’s vast data pool into three clusters: positive, negative, or neutral. In the data clustering stage, the Self-Organizing Map technique is used. After the text data has been weighted and clustered, the next step involves using the classification technique with Long
Short-Term Memory to determine the public sentiment outcomes resulting from the presence of Chat Generative Pre-trained Transformer technology. Rigorous testing has demonstrated the robust performance of the model, with optimal parameters: relu activation function, som size of 5, num epoch som and num epoch lstm both at 128, yielding an impressive 95.07% accuracy rate.
%0 Journal Article
%1 sinaga2023analyzing
%A Sinaga, Frans Mikael
%A Pipin, Sio Jurnalis
%A Winardi, Sunaryo
%A Tarigan, Karina Mannita
%A Brahmana, Ananda Putra
%D 2023
%J MATRIK: Jurnal Manajemen, Teknik Informatika dan Rekayasa Komputer
%K Algoritms Analysis Long Map Memory Self-Organizing Sentiment Short-Term myown
%N 1
%P 131-142
%R 10.30812/matrik.v23i1.3332
%T Analyzing Sentiment with Self-Organizing Map and Long Short-Term Memory Algorithms
%U https://journal.universitasbumigora.ac.id/index.php/matrik/article/view/3332
%V 23
%X This research delves into the impact of Chat Generative Pre-trained Transformer, one of Open Artificial Intelligence Generative Pretrained Transformer models. This model underwent extensive training on a vast corpus of internet text to gain insights into the mechanics of human language and its role in forming phrases, sentences, and paragraphs. The urgency of this inquiry arises from Chat Generative Pre-trained Transformer emergence, which has stirred significant debate and captured widespread attention in both research and educational circles. Since its debut in November 2022, Chat Generative Pre-trained Transformer has demonstrated substantial potential across numerous domains. However, concerns voiced on Twitter have centered on potential negative consequences, such as increased
forgery and misinformation. Consequently, understanding public sentiment toward Chat Generative Pre-trained Transformer technology through sentiment analysis has become crucial. The research’s primary objective is to conduct Sentiment Analysis Classification of Chat Generative Pre-trained Transformer regarding public opinions on Twitter in Indonesia. This goal involves quantifying and categorizing public sentiment from Twitter’s vast data pool into three clusters: positive, negative, or neutral. In the data clustering stage, the Self-Organizing Map technique is used. After the text data has been weighted and clustered, the next step involves using the classification technique with Long
Short-Term Memory to determine the public sentiment outcomes resulting from the presence of Chat Generative Pre-trained Transformer technology. Rigorous testing has demonstrated the robust performance of the model, with optimal parameters: relu activation function, som size of 5, num epoch som and num epoch lstm both at 128, yielding an impressive 95.07% accuracy rate.
@article{sinaga2023analyzing,
abstract = {This research delves into the impact of Chat Generative Pre-trained Transformer, one of Open Artificial Intelligence Generative Pretrained Transformer models. This model underwent extensive training on a vast corpus of internet text to gain insights into the mechanics of human language and its role in forming phrases, sentences, and paragraphs. The urgency of this inquiry arises from Chat Generative Pre-trained Transformer emergence, which has stirred significant debate and captured widespread attention in both research and educational circles. Since its debut in November 2022, Chat Generative Pre-trained Transformer has demonstrated substantial potential across numerous domains. However, concerns voiced on Twitter have centered on potential negative consequences, such as increased
forgery and misinformation. Consequently, understanding public sentiment toward Chat Generative Pre-trained Transformer technology through sentiment analysis has become crucial. The research’s primary objective is to conduct Sentiment Analysis Classification of Chat Generative Pre-trained Transformer regarding public opinions on Twitter in Indonesia. This goal involves quantifying and categorizing public sentiment from Twitter’s vast data pool into three clusters: positive, negative, or neutral. In the data clustering stage, the Self-Organizing Map technique is used. After the text data has been weighted and clustered, the next step involves using the classification technique with Long
Short-Term Memory to determine the public sentiment outcomes resulting from the presence of Chat Generative Pre-trained Transformer technology. Rigorous testing has demonstrated the robust performance of the model, with optimal parameters: relu activation function, som size of 5, num epoch som and num epoch lstm both at 128, yielding an impressive 95.07% accuracy rate.},
added-at = {2024-03-23T16:48:52.000+0100},
author = {Sinaga, Frans Mikael and Pipin, Sio Jurnalis and Winardi, Sunaryo and Tarigan, Karina Mannita and Brahmana, Ananda Putra},
biburl = {https://www.bibsonomy.org/bibtex/25d1ee51bbbcab158bf3f9e43919cc592/siopipin},
doi = {10.30812/matrik.v23i1.3332},
interhash = {23d2bce2e2176001aa77713d87023777},
intrahash = {5d1ee51bbbcab158bf3f9e43919cc592},
journal = {MATRIK: Jurnal Manajemen, Teknik Informatika dan Rekayasa Komputer},
keywords = {Algoritms Analysis Long Map Memory Self-Organizing Sentiment Short-Term myown},
number = 1,
pages = {131-142},
timestamp = {2024-03-23T16:48:52.000+0100},
title = {Analyzing Sentiment with Self-Organizing Map and Long Short-Term Memory Algorithms},
url = {https://journal.universitasbumigora.ac.id/index.php/matrik/article/view/3332},
volume = 23,
year = 2023
}