대박이다!! 1,000회를 돌렸는데 0.87 정확도를 보였는데, verb까지 검사하니 0.93에서 시작한다!! kkma가 동사로 끝나는 명사형 단어를 동사로 인식한다!!
사용자 입력을 받아들여 단어를 분석하는 부분을 아래와 같이 했다.
from konlpy.tag import Okt okt=Okt() from gensim.models import Word2Vec from keras.layers import Dense, Flatten, SimpleRNN, Dropout from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.layers.embeddings import Embedding from keras.utils import to_categorical import numpy as np from sklearn.cluster import KMeans def main(): model=Word2Vec.load('./TagWord2VecModel') print(model) MAX_VOCAB=len(model.wv.vocab) WV_SIZE=model.wv.vectors.shape[1] WORD_MAX=6 CATEGORIY_SIZE=7 print("로드한 모델 vocab 최대값은", MAX_VOCAB) print("로드한 모델 vectror 크기는", WV_SIZE) #단어 표시 showWord2VecClusters(Model=model) #keras 모델 설정. model2= Sequential() model2.add(Embedding(input_dim=MAX_VOCAB, output_dim=WV_SIZE, input_length=WORD_MAX, weights=[model.wv.vectors], trainable=False)) #model2.add(Flatten()) model2.add(SimpleRNN(256, input_shape=(4,4))) model2.add(Dropout(0.2)) model2.add(Dense(128)) model2.add(Dropout(0.2)) model2.add(Dense(64, activation='relu')) model2.add(Dropout(0.2)) model2.add(Dense(CATEGORIY_SIZE, activation='softmax')) #load model 경로. weight_path = "./saved_network_weight.h5" model2.load_weights(weight_path) print("저장된 weights를 불름") model2.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) model2.summary() checkSentenceWord(Model=model) userInput(Word2Vec=model, Networks=model2, Maxword=WORD_MAX) def showWord2VecClusters(Model): #현재 입력된 단어 표시 #클러스터링 사용. word_vectors = Model.wv.syn0 # 어휘의 feature vector num_clusters = int(word_vectors.shape[0]/50) # 어휘 크기의 1/5나 평균 5단어 print(num_clusters) num_clusters = int(num_clusters) kmeans_clustering = KMeans(n_clusters=num_clusters) idx = kmeans_clustering.fit_predict(word_vectors) idx = list(idx) names = Model.wv.index2word word_centroid_map = {names[i]: idx[i] for i in range(len(names))} for c in range(num_clusters): # 클러스터 번호를 출력 print("\ncluster {}".format(c)) words = [] cluster_values = list(word_centroid_map.values()) for i in range(len(cluster_values)): if (cluster_values[i] == c): words.append(list(word_centroid_map.keys())[i]) print(words) def userInput(Word2Vec, Networks, Maxword): EndFlag=False while True: #사용자 입력 확인 ErrorFlag=False ZeroFlag=False wordToPredictSentence = [] wordToPredictSentenceStr = [] index = 0 #총 MAX개수만큼 입력을 받아들이고, #단어를 판단 # 인덱스를 증가하지 말지 판단하기 위해 while루프 사용 #print("WORD_MAX는",Maxword) while (index < Maxword): #for index in range(Maxword): print("%d/6 단어 입력"%(index+1)) print("끝내려면 END!!를 입력") print("마지막까지 0을 채우려면 ZERO!!를 입력") if(not ZeroFlag): userInput=input() #input_predict =model.wv.vocab.get(word[0]).index #print(repr(userInput)) if(userInput == "END!!"): EndFlag=True break if(userInput == "ZERO!!"): ZeroFlag=True try: #Try에서 에러플래그를 다시 초기화 ErrorFlag=False num = Word2Vec.wv.vocab.get(userInput).index print(num) except AttributeError: if(not ZeroFlag): print("리스트에 없는 단어 입력함. 다시 입력하세요") ErrorFlag=True #전에 정확하게 입력했는지 확인 if(ErrorFlag == True): continue else: index=index+1 if(not ZeroFlag): wordToPredictSentence.append(num) wordToPredictSentenceStr.append(userInput) print(wordToPredictSentence) else: wordToPredictSentence.append(0) wordToPredictSentenceStr.append(userInput) print(wordToPredictSentence) #input_predict = np.asarray([[num0, num1, num2, 0, 0, 0]]) input_predict = np.asarray([wordToPredictSentence]) #print("input_predict 는",input_predict.shape) if(EndFlag == False): print("입력한 단어는",wordToPredictSentenceStr) myPrediction = Networks.predict_classes(input_predict, batch_size=100, verbose=0) myPredictionAcc = Networks.predict(input_predict, batch_size=100, verbose=0) print("내 예상", myPrediction, "확률", myPredictionAcc) else: #while 루프 탈출. break def checkSentenceWord(Model): print(Model) sample_sentence="B220ST YD5 CLAMP 잠김" tokenlist = okt.pos(sample_sentence, stem=True, norm=True) for word in tokenlist: print(word) if __name__=="__main__": main()
네트웍은 LSTM을 복잡한 형식으로 썼다. 네트웍 구조가 json으로, 웨이트가 h5로 저장된다. 나중에 네트웍 구조를 json으로 저장하고, json으로 부르는 부분으로 수정해야 겠다.
위에 “B220ST YD5 CLAMP 잠김”을 분석하면 아래로 분석한다.
('B', 'Alpha') ('220', 'Number') ('ST', 'Alpha') ('YD', 'Alpha') ('5', 'Number') ('CLAMP', 'Alpha') ('잠기다', 'Verb')
from konlpy.tag import Okt okt=Okt() from gensim.models import Word2Vec from keras.layers import Dense, Flatten, SimpleRNN, Dropout, LSTM from keras.models import Sequential from keras.preprocessing.sequence import pad_sequences from keras.layers.embeddings import Embedding from keras.utils import to_categorical from keras.optimizers import Adam import numpy as np #label encoder로 text를 int로 변경. from sklearn.preprocessing import LabelEncoder #model save from keras.callbacks import ModelCheckpoint targetFile = open("./tag정리.txt", "r", encoding='UTF-8') #lines=targetFile.readline() #model=Word2Vec.load('./myModelV1') model=Word2Vec.load('./TagWord2VecModel') MAX_VOCAB=len(model.wv.vocab) WV_SIZE=model.wv.vectors.shape[1] print("로드한 모델 vocab 최대값은", MAX_VOCAB) print("로드한 모델 vectror 크기는", WV_SIZE) i=0 sentence_by_index=[] training_result=[] result=[] WORD_MAX=16 while True: lines = targetFile.readline() firstColumn = lines.split(',') #print(lines) if not lines:break #if i == 1000:break i=i+1 #word2vec를 만든 형태소 분석기를 사용.. tokenlist = okt.pos(firstColumn[1], stem=True, norm=True) temp=[] for word in tokenlist: #word[0]은 단어. #word[1]은 품사. #print("word[0]은",word[0]) #print("word[1]은",word[1]) if word[1] in ["Noun","Alpha","Number","Verb"]: #temp.append(model.wv[word[0]]) #word[0]를 index로 변경. #단어장에 없는 단어를 예외처리 #입력과 출력을 같이 맞추기 위해, 입출력 동시에 append try: #print("---------") #print(i) #print(word[0]) temp.append(model.wv.vocab.get(word[0]).index) #print(model.wv.vocab.get(word[0]).index) except AttributeError: #값을 못찾으면 0값 입력 temp.append(0) #print(temp) #print("index is ", i) #print("temp is", temp) #가져단 쓴 코드는 temp에 값이 있을 경우에만 append. #출력과 맞추기 위해, list가 비어있어도 append로 변경. #if temp: # sentence_by_index.append(temp) sentence_by_index.append(temp) #결과를 배열로 입력 tempResult=firstColumn[2].strip('\n') training_result.append(tempResult) targetFile.close() #print(tokenlist) #출력을 categorical로 변경. label_encoder = LabelEncoder() training_result_asarray = np.asarray(training_result) integer_encoded = label_encoder.fit_transform(training_result_asarray) categorical_training_result = to_categorical(integer_encoded, dtype='int') #입력, 출력 확인 fixed_sentence_by_index = pad_sequences(sentence_by_index, maxlen=WORD_MAX, padding='post', dtype='int') #print("입력은",fixed_sentence_by_index) #print("출력은",integer_encoded) #print("출력은",categorical_training_result) size_categorical_training_result = categorical_training_result.shape[1] print("출력 크기는",size_categorical_training_result) #keras 모델 설정. model2= Sequential() model2.add(Embedding(input_dim=MAX_VOCAB, output_dim=WV_SIZE, input_length=WORD_MAX, weights=[model.wv.vectors], trainable=False)) #model2.add(Flatten()) model2.add(LSTM(1024, input_shape=(4,4))) model2.add(Dropout(0.2)) model2.add(Dense(512)) model2.add(Dropout(0.2)) model2.add(Dense(256, activation='relu')) model2.add(Dropout(0.2)) model2.add(Dense(size_categorical_training_result, activation='softmax')) model2.compile(loss='categorical_crossentropy', optimizer=Adam(lr=0.001, epsilon=1e-08, decay=0.0), metrics=['accuracy']) #save model 경로. weight_path = "./saved_network_weight.h5" checkpoint = ModelCheckpoint(weight_path, monitor='acc', verbose=2, save_best_only=True, mode='auto') callbacks_list = [checkpoint] model2.fit(x=fixed_sentence_by_index, y=categorical_training_result, epochs=1000, verbose=2, validation_split=0.2, callbacks=callbacks_list, batch_size=200) model2.summary()
로 분석하면, 대충 아래와 같다.
로드한 모델 vocab 최대값은 694 로드한 모델 vectror 크기는 10 출력 크기는 7 Train on 7602 samples, validate on 1901 samples Epoch 1/1000 - 7s - loss: 1.0435 - acc: 0.5389 - val_loss: 1.7584 - val_acc: 0.7564 Epoch 00001: acc improved from -inf to 0.53894, saving model to ./saved_network_weight.h5 Epoch 2/1000 - 3s - loss: 0.4565 - acc: 0.8172 - val_loss: 2.1145 - val_acc: 0.7480 Epoch 00002: acc improved from 0.53894 to 0.81715, saving model to ./saved_network_weight.h5 Epoch 3/1000 - 3s - loss: 0.4405 - acc: 0.8508 - val_loss: 2.5999 - val_acc: 0.7575 Epoch 00003: acc improved from 0.81715 to 0.85083, saving model to ./saved_network_weight.h5 Epoch 4/1000 - 3s - loss: 0.2354 - acc: 0.9100 - val_loss: 2.1261 - val_acc: 0.7475 Epoch 00004: acc improved from 0.85083 to 0.91002, saving model to ./saved_network_weight.h5 Epoch 5/1000 - 3s - loss: 0.1733 - acc: 0.9323 - val_loss: 2.2204 - val_acc: 0.7522 Epoch 00005: acc improved from 0.91002 to 0.93225, saving model to ./saved_network_weight.h5 Epoch 6/1000 - 3s - loss: 0.2758 - acc: 0.9025 - val_loss: 3.7245 - val_acc: 0.7543