@article{oai:doshisha.repo.nii.ac.jp:00029397, author = {花本, 凪 and Hanamoto, Nagi and 幾島, 直哉 and Ikushima, Naoya and 小野, 景子 and Ono, Keiko and 槇原, 絵里奈 and Makihara, Erina}, issue = {4}, journal = {同志社大学ハリス理化学研究報告, The Harris science review of Doshisha University}, month = {Jan}, note = {様々なNeural Networkモデルが提案されているが,ビッグデータの出現によりモデルの学習や利用には高い計算能力が要求される.十分な計算機が利用できない場合は,小規模で高性能なモデルを構築することは困難である.そこで,我々は差分進化に基づくモデル圧縮手法を提案する.実験の結果,提案手法は重みパラメータを適切に削減し,圧縮率を調整することで圧縮しない元のモデルと同程度の精度を持つことが確認された., Various Deep Neural Network models have been proposed; however, with the emergence of Big Data, high computing power is required to train and use such models. A small adequate model should be modeled if a rich computing environment is unavailable, but it is challenging to clarify how to build a relatively small, highperformance model. Therefore, we propose a model compression method based on differential evolution. Specifically, the proposed method optimizes not only network structures and also weights simultaneously by differential evolution. Experiment results showed that the proposed method appropriately reduced weight parameters during optimization and had similar accuracy as the original model without compression by adjusting the compression rate., application/pdf}, pages = {181--187}, title = {適応的差分進化を用いたNeural Networkにおけるモデル圧縮}, volume = {63}, year = {2023}, yomi = {ハナモト, ナギ and イクシマ, ナオヤ and オノ, ケイコ and マキハラ, エリナ} }