# CSCL 2023 Automated Multi-Dimensional Analysis of Peer Feedback in Middle School Mathematics

!pip install tensorflow
!pip install tensorflow_hub
!pip install tensorflow-addons
Requirement already satisfied: tensorflow in /usr/local/lib/python3.10/dist-packages (2.17.1)
Requirement already satisfied: absl-py>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.4.0)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.6.3)
Requirement already satisfied: flatbuffers>=24.3.25 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (24.3.25)
Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.6.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.2.0)
Requirement already satisfied: h5py>=3.10.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (3.12.1)
Requirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (18.1.1)
Requirement already satisfied: ml-dtypes<0.5.0,>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.4.1)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (3.4.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorflow) (24.2)
Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (4.25.5)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (2.32.3)
Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tensorflow) (75.1.0)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.16.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (2.5.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (4.12.2)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.17.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.68.1)
Requirement already satisfied: tensorboard<2.18,>=2.17 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (2.17.1)
Requirement already satisfied: keras>=3.2.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (3.5.0)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.37.1)
Requirement already satisfied: numpy<2.0.0,>=1.23.5 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.26.4)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tensorflow) (0.45.1)
Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from keras>=3.2.0->tensorflow) (13.9.4)
Requirement already satisfied: namex in /usr/local/lib/python3.10/dist-packages (from keras>=3.2.0->tensorflow) (0.0.8)
Requirement already satisfied: optree in /usr/local/lib/python3.10/dist-packages (from keras>=3.2.0->tensorflow) (0.13.1)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (2024.8.30)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.18,>=2.17->tensorflow) (3.7)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.18,>=2.17->tensorflow) (0.7.2)
Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.18,>=2.17->tensorflow) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard<2.18,>=2.17->tensorflow) (3.0.2)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras>=3.2.0->tensorflow) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras>=3.2.0->tensorflow) (2.18.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich->keras>=3.2.0->tensorflow) (0.1.2)
Requirement already satisfied: tensorflow_hub in /usr/local/lib/python3.10/dist-packages (0.16.1)
Requirement already satisfied: numpy>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow_hub) (1.26.4)
Requirement already satisfied: protobuf>=3.19.6 in /usr/local/lib/python3.10/dist-packages (from tensorflow_hub) (4.25.5)
Requirement already satisfied: tf-keras>=2.14.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow_hub) (2.17.0)
Requirement already satisfied: tensorflow<2.18,>=2.17 in /usr/local/lib/python3.10/dist-packages (from tf-keras>=2.14.1->tensorflow_hub) (2.17.1)
Requirement already satisfied: absl-py>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (1.4.0)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (1.6.3)
Requirement already satisfied: flatbuffers>=24.3.25 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (24.3.25)
Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.6.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.2.0)
Requirement already satisfied: h5py>=3.10.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.12.1)
Requirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (18.1.1)
Requirement already satisfied: ml-dtypes<0.5.0,>=0.3.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.4.1)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.4.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (24.2)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (2.32.3)
Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (75.1.0)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (1.16.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (2.5.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (4.12.2)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (1.17.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (1.68.1)
Requirement already satisfied: tensorboard<2.18,>=2.17 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (2.17.1)
Requirement already satisfied: keras>=3.2.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.5.0)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.37.1)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.45.1)
Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from keras>=3.2.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (13.9.4)
Requirement already satisfied: namex in /usr/local/lib/python3.10/dist-packages (from keras>=3.2.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.0.8)
Requirement already satisfied: optree in /usr/local/lib/python3.10/dist-packages (from keras>=3.2.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.13.1)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (2024.8.30)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.18,>=2.17->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.7)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.18,>=2.17->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.7.2)
Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.18,>=2.17->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard<2.18,>=2.17->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.0.2)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras>=3.2.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras>=3.2.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (2.18.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich->keras>=3.2.0->tensorflow<2.18,>=2.17->tf-keras>=2.14.1->tensorflow_hub) (0.1.2)
Collecting tensorflow-addons
  Downloading tensorflow_addons-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (1.8 kB)
Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorflow-addons) (24.2)
Collecting typeguard<3.0.0,>=2.7 (from tensorflow-addons)
  Downloading typeguard-2.13.3-py3-none-any.whl.metadata (3.6 kB)
Downloading tensorflow_addons-0.23.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (611 kB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 611.8/611.8 kB 11.8 MB/s eta 0:00:00
Downloading typeguard-2.13.3-py3-none-any.whl (17 kB)
Installing collected packages: typeguard, tensorflow-addons
  Attempting uninstall: typeguard
    Found existing installation: typeguard 4.4.1
    Uninstalling typeguard-4.4.1:
      Successfully uninstalled typeguard-4.4.1
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
inflect 7.4.0 requires typeguard>=4.0.1, but you have typeguard 2.13.3 which is incompatible.
Successfully installed tensorflow-addons-0.23.0 typeguard-2.13.3
Unable to display output for mime type(s): application/vnd.colab-display-data+json
!pip uninstall tensorflow keras -y
!pip install tensorflow
Found existing installation: tensorflow 2.17.1
Uninstalling tensorflow-2.17.1:
  Successfully uninstalled tensorflow-2.17.1
Found existing installation: keras 3.5.0
Uninstalling keras-3.5.0:
  Successfully uninstalled keras-3.5.0
Collecting tensorflow
  Downloading tensorflow-2.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.1 kB)
Requirement already satisfied: absl-py>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.4.0)
Requirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.6.3)
Requirement already satisfied: flatbuffers>=24.3.25 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (24.3.25)
Requirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.6.0)
Requirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.2.0)
Requirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (18.1.1)
Requirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (3.4.0)
Requirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorflow) (24.2)
Requirement already satisfied: protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<6.0.0dev,>=3.20.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (4.25.5)
Requirement already satisfied: requests<3,>=2.21.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (2.32.3)
Requirement already satisfied: setuptools in /usr/local/lib/python3.10/dist-packages (from tensorflow) (75.1.0)
Requirement already satisfied: six>=1.12.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.16.0)
Requirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (2.5.0)
Requirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (4.12.2)
Requirement already satisfied: wrapt>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.17.0)
Requirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.68.1)
Collecting tensorboard<2.19,>=2.18 (from tensorflow)
  Downloading tensorboard-2.18.0-py3-none-any.whl.metadata (1.6 kB)
Collecting keras>=3.5.0 (from tensorflow)
  Downloading keras-3.7.0-py3-none-any.whl.metadata (5.8 kB)
Requirement already satisfied: numpy<2.1.0,>=1.26.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (1.26.4)
Requirement already satisfied: h5py>=3.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (3.12.1)
Requirement already satisfied: ml-dtypes<0.5.0,>=0.4.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.4.1)
Requirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow) (0.37.1)
Requirement already satisfied: wheel<1.0,>=0.23.0 in /usr/local/lib/python3.10/dist-packages (from astunparse>=1.6.0->tensorflow) (0.45.1)
Requirement already satisfied: rich in /usr/local/lib/python3.10/dist-packages (from keras>=3.5.0->tensorflow) (13.9.4)
Requirement already satisfied: namex in /usr/local/lib/python3.10/dist-packages (from keras>=3.5.0->tensorflow) (0.0.8)
Requirement already satisfied: optree in /usr/local/lib/python3.10/dist-packages (from keras>=3.5.0->tensorflow) (0.13.1)
Requirement already satisfied: charset-normalizer<4,>=2 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (3.4.0)
Requirement already satisfied: idna<4,>=2.5 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (3.10)
Requirement already satisfied: urllib3<3,>=1.21.1 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (2.2.3)
Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.10/dist-packages (from requests<3,>=2.21.0->tensorflow) (2024.8.30)
Requirement already satisfied: markdown>=2.6.8 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.19,>=2.18->tensorflow) (3.7)
Requirement already satisfied: tensorboard-data-server<0.8.0,>=0.7.0 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.19,>=2.18->tensorflow) (0.7.2)
Requirement already satisfied: werkzeug>=1.0.1 in /usr/local/lib/python3.10/dist-packages (from tensorboard<2.19,>=2.18->tensorflow) (3.1.3)
Requirement already satisfied: MarkupSafe>=2.1.1 in /usr/local/lib/python3.10/dist-packages (from werkzeug>=1.0.1->tensorboard<2.19,>=2.18->tensorflow) (3.0.2)
Requirement already satisfied: markdown-it-py>=2.2.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras>=3.5.0->tensorflow) (3.0.0)
Requirement already satisfied: pygments<3.0.0,>=2.13.0 in /usr/local/lib/python3.10/dist-packages (from rich->keras>=3.5.0->tensorflow) (2.18.0)
Requirement already satisfied: mdurl~=0.1 in /usr/local/lib/python3.10/dist-packages (from markdown-it-py>=2.2.0->rich->keras>=3.5.0->tensorflow) (0.1.2)
Downloading tensorflow-2.18.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl (615.3 MB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 615.3/615.3 MB 3.3 MB/s eta 0:00:00
Downloading keras-3.7.0-py3-none-any.whl (1.2 MB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 1.2/1.2 MB 53.2 MB/s eta 0:00:00
Downloading tensorboard-2.18.0-py3-none-any.whl (5.5 MB)
   ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 5.5/5.5 MB 98.6 MB/s eta 0:00:00
Installing collected packages: tensorboard, keras, tensorflow
  Attempting uninstall: tensorboard
    Found existing installation: tensorboard 2.17.1
    Uninstalling tensorboard-2.17.1:
      Successfully uninstalled tensorboard-2.17.1
ERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.
tf-keras 2.17.0 requires tensorflow<2.18,>=2.17, but you have tensorflow 2.18.0 which is incompatible.
Successfully installed keras-3.7.0 tensorboard-2.18.0 tensorflow-2.18.0
Unable to display output for mime type(s): application/vnd.colab-display-data+json
import pandas as pd
import numpy as np
import tensorflow as tf
import tensorflow_hub as hub
import random
import nltk
import os
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier, RandomForestClassifier
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics import classification_report, roc_curve, roc_auc_score, confusion_matrix, accuracy_score, f1_score, cohen_kappa_score
from sklearn.model_selection import GroupKFold, train_test_split
from sklearn import tree, metrics

from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense

from nltk.stem import PorterStemmer
from nltk.corpus import stopwords


random.seed(20)
# split data for student level cross validation: one student is nested within one fold
group_dict = dict()
groups = np.array([])

df = pd.read_csv('Annotations_final.csv')

for index, row in df.iterrows():
    s_id = row['created_by']
    if s_id not in group_dict:
        group_dict[s_id] = index
    groups = np.append(groups, group_dict[s_id])

# Set up the splitter with 5 splits
gkf = GroupKFold(n_splits = 5)

bag of words

# bag of words + neural nets
# import data
# stemming
porter = PorterStemmer() # stemming recovers root words from plurals etc
stemmed_texts = []

X_text = df['annotation_text']

for answer in X_text:
    answer = ' '.join(porter.stem(word) for word in answer.split(' '))
    stemmed_texts.append(answer)

X, y = np.array(stemmed_texts), df['relating_to_self'] # CHANGE y HERE

#vect = CountVectorizer(ngram_range=(1,1), max_features=1000, stop_words="english") #only unigram 313 features
vect = CountVectorizer(ngram_range=(2,2), max_features=1000, stop_words="english") #only bigram 728 features

X = vect.fit_transform(X).toarray()


# set up storage arrays for each round of validation
roc_auc_scores = np.array([])
accuracy_scores = np.array([])

# split, train, test and store performance metrics
for train_index, test_index in gkf.split(X, y, groups=groups):
    X_train, X_test = X[train_index], X[test_index]
    y_train, y_test = y[train_index], y[test_index]


    model = Sequential()
    model.add(Dense(12, input_shape=(738,), activation='relu'))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics = ['acc']
             )

    num_epochs = 30
    batch_size = 10

    model.fit(
        X_train,
        y_train,
        epochs=num_epochs,
        validation_split=0.1,
        shuffle=True,
        batch_size=batch_size)

    predictions = model.predict(X_test)

    # compute some metrics and store them for averaging later on
    roc_auc_scores = np.append(roc_auc_scores, roc_auc_score(y_test, predictions))

# print mean scores for the 5-fold CV
print("average roc_auc score: ", np.round(roc_auc_scores.mean(), 3))
print("stdv roc_auc score: ", np.round(roc_auc_scores.std(), 3))
print("max roc_auc score: ", np.round(roc_auc_scores.max(), 3))
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 2s 24ms/step - acc: 0.6781 - loss: 0.6864 - val_acc: 0.8947 - val_loss: 0.6716
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.8789 - loss: 0.6644 - val_acc: 0.8421 - val_loss: 0.6573
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9119 - loss: 0.6438 - val_acc: 0.8421 - val_loss: 0.6402
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9348 - loss: 0.6163 - val_acc: 0.8421 - val_loss: 0.6189
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9554 - loss: 0.5744 - val_acc: 0.8421 - val_loss: 0.5941
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9531 - loss: 0.5460 - val_acc: 0.8947 - val_loss: 0.5659
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9599 - loss: 0.4890 - val_acc: 0.8947 - val_loss: 0.5333
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9810 - loss: 0.4360 - val_acc: 0.8947 - val_loss: 0.4998
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9731 - loss: 0.3867 - val_acc: 0.8947 - val_loss: 0.4684
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9755 - loss: 0.3339 - val_acc: 0.8947 - val_loss: 0.4367
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9662 - loss: 0.2938 - val_acc: 0.8947 - val_loss: 0.4059
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9853 - loss: 0.2334 - val_acc: 0.8947 - val_loss: 0.3765
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9795 - loss: 0.2030 - val_acc: 0.8947 - val_loss: 0.3514
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9697 - loss: 0.1870 - val_acc: 0.9474 - val_loss: 0.3290
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9802 - loss: 0.1511 - val_acc: 0.9474 - val_loss: 0.3112
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9838 - loss: 0.1303 - val_acc: 0.9474 - val_loss: 0.2945
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9951 - loss: 0.1099 - val_acc: 0.9474 - val_loss: 0.2817
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9824 - loss: 0.0907 - val_acc: 0.9474 - val_loss: 0.2697
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9911 - loss: 0.0880 - val_acc: 0.9474 - val_loss: 0.2594
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9921 - loss: 0.0758 - val_acc: 0.9474 - val_loss: 0.2511
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9942 - loss: 0.0662 - val_acc: 0.9474 - val_loss: 0.2425
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9986 - loss: 0.0550 - val_acc: 0.9474 - val_loss: 0.2354
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0502 - val_acc: 0.9474 - val_loss: 0.2298
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0526 - val_acc: 0.9474 - val_loss: 0.2249
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0376 - val_acc: 0.9474 - val_loss: 0.2198
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0432 - val_acc: 0.9474 - val_loss: 0.2161
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 1.0000 - loss: 0.0403 - val_acc: 0.9474 - val_loss: 0.2127
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0344 - val_acc: 0.9474 - val_loss: 0.2090
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0291 - val_acc: 0.9474 - val_loss: 0.2056
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0341 - val_acc: 0.9474 - val_loss: 0.2028
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 51ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - acc: 0.5245 - loss: 0.6864 - val_acc: 0.7895 - val_loss: 0.6639
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7940 - loss: 0.6463 - val_acc: 0.8947 - val_loss: 0.6244
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7845 - loss: 0.6071 - val_acc: 0.8947 - val_loss: 0.5846
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8110 - loss: 0.5591 - val_acc: 0.8947 - val_loss: 0.5419
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7568 - loss: 0.5296 - val_acc: 0.8947 - val_loss: 0.4949
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8398 - loss: 0.4532 - val_acc: 0.8947 - val_loss: 0.4479
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8624 - loss: 0.4073 - val_acc: 0.9474 - val_loss: 0.3997
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.8844 - loss: 0.3696 - val_acc: 0.9474 - val_loss: 0.3602
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8985 - loss: 0.3061 - val_acc: 0.9474 - val_loss: 0.3235
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9294 - loss: 0.2441 - val_acc: 0.9474 - val_loss: 0.2955
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9398 - loss: 0.2525 - val_acc: 0.9474 - val_loss: 0.2741
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9697 - loss: 0.1773 - val_acc: 0.9474 - val_loss: 0.2546
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9642 - loss: 0.1744 - val_acc: 0.9474 - val_loss: 0.2416
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9927 - loss: 0.1317 - val_acc: 0.9474 - val_loss: 0.2291
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9963 - loss: 0.0988 - val_acc: 0.9474 - val_loss: 0.2199
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9910 - loss: 0.1061 - val_acc: 0.9474 - val_loss: 0.2142
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9870 - loss: 0.0847 - val_acc: 0.9474 - val_loss: 0.2086
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9875 - loss: 0.0773 - val_acc: 0.9474 - val_loss: 0.2032
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9936 - loss: 0.0630 - val_acc: 0.9474 - val_loss: 0.1992
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9900 - loss: 0.0480 - val_acc: 0.9474 - val_loss: 0.1967
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9978 - loss: 0.0434 - val_acc: 0.9474 - val_loss: 0.1985
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9990 - loss: 0.0373 - val_acc: 0.9474 - val_loss: 0.1925
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9907 - loss: 0.0416 - val_acc: 0.9474 - val_loss: 0.1903
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9986 - loss: 0.0265 - val_acc: 0.9474 - val_loss: 0.1872
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9889 - loss: 0.0482 - val_acc: 0.9474 - val_loss: 0.1878
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 0.9968 - loss: 0.0271 - val_acc: 0.9474 - val_loss: 0.1855
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9942 - loss: 0.0249 - val_acc: 0.9474 - val_loss: 0.1830
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9978 - loss: 0.0236 - val_acc: 0.9474 - val_loss: 0.1812
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9990 - loss: 0.0167 - val_acc: 0.9474 - val_loss: 0.1790
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9982 - loss: 0.0160 - val_acc: 0.9474 - val_loss: 0.1782
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 98ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 2s 17ms/step - acc: 0.6353 - loss: 0.6837 - val_acc: 0.8421 - val_loss: 0.6557
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.8298 - loss: 0.6397 - val_acc: 0.8947 - val_loss: 0.6169
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8125 - loss: 0.6002 - val_acc: 0.8947 - val_loss: 0.5729
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8341 - loss: 0.5418 - val_acc: 0.8947 - val_loss: 0.5222
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8520 - loss: 0.5082 - val_acc: 0.8947 - val_loss: 0.4738
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9143 - loss: 0.4262 - val_acc: 0.8947 - val_loss: 0.4205
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8948 - loss: 0.3625 - val_acc: 0.9474 - val_loss: 0.3732
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9091 - loss: 0.3269 - val_acc: 0.9474 - val_loss: 0.3339
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9427 - loss: 0.2586 - val_acc: 0.9474 - val_loss: 0.2995
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9609 - loss: 0.1972 - val_acc: 0.9474 - val_loss: 0.2726
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9694 - loss: 0.1751 - val_acc: 0.9474 - val_loss: 0.2522
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9859 - loss: 0.1300 - val_acc: 0.9474 - val_loss: 0.2340
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9770 - loss: 0.1351 - val_acc: 0.9474 - val_loss: 0.2200
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9629 - loss: 0.1182 - val_acc: 0.9474 - val_loss: 0.2064
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9886 - loss: 0.0936 - val_acc: 0.9474 - val_loss: 0.1952
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9787 - loss: 0.0841 - val_acc: 0.9474 - val_loss: 0.1843
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9871 - loss: 0.0595 - val_acc: 0.9474 - val_loss: 0.1759
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9942 - loss: 0.0493 - val_acc: 0.9474 - val_loss: 0.1661
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9805 - loss: 0.0670 - val_acc: 0.9474 - val_loss: 0.1580
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9805 - loss: 0.0443 - val_acc: 1.0000 - val_loss: 0.1510
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 1.0000 - loss: 0.0349 - val_acc: 1.0000 - val_loss: 0.1437
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0302 - val_acc: 1.0000 - val_loss: 0.1379
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 1.0000 - loss: 0.0315 - val_acc: 1.0000 - val_loss: 0.1315
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0259 - val_acc: 1.0000 - val_loss: 0.1276
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0182 - val_acc: 1.0000 - val_loss: 0.1220
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0169 - val_acc: 1.0000 - val_loss: 0.1133
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0207 - val_acc: 1.0000 - val_loss: 0.1098
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0178 - val_acc: 1.0000 - val_loss: 0.1066
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0177 - val_acc: 1.0000 - val_loss: 0.1028
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0180 - val_acc: 1.0000 - val_loss: 0.0997
WARNING:tensorflow:5 out of the last 5 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x7e145c6972e0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
1/2 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step
WARNING:tensorflow:6 out of the last 6 calls to <function TensorFlowTrainer.make_predict_function.<locals>.one_step_on_data_distributed at 0x7e145c6972e0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has reduce_retracing=True option that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 66ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - acc: 0.5848 - loss: 0.6893 - val_acc: 0.8421 - val_loss: 0.6645
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.7615 - loss: 0.6533 - val_acc: 0.8421 - val_loss: 0.6339
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.8885 - loss: 0.6101 - val_acc: 0.8421 - val_loss: 0.5981
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8433 - loss: 0.5674 - val_acc: 0.8421 - val_loss: 0.5585
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.8330 - loss: 0.5259 - val_acc: 0.8947 - val_loss: 0.5146
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8817 - loss: 0.4511 - val_acc: 0.8947 - val_loss: 0.4659
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8856 - loss: 0.4064 - val_acc: 0.8947 - val_loss: 0.4203
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8934 - loss: 0.3468 - val_acc: 0.8947 - val_loss: 0.3785
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9387 - loss: 0.2967 - val_acc: 0.8947 - val_loss: 0.3457
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9515 - loss: 0.2525 - val_acc: 0.8947 - val_loss: 0.3168
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9590 - loss: 0.1977 - val_acc: 0.8947 - val_loss: 0.2921
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9647 - loss: 0.1595 - val_acc: 0.9474 - val_loss: 0.2741
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9832 - loss: 0.1305 - val_acc: 0.9474 - val_loss: 0.2576
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9776 - loss: 0.1167 - val_acc: 0.9474 - val_loss: 0.2447
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9761 - loss: 0.1073 - val_acc: 0.9474 - val_loss: 0.2350
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9750 - loss: 0.1028 - val_acc: 0.9474 - val_loss: 0.2270
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9891 - loss: 0.0707 - val_acc: 0.9474 - val_loss: 0.2191
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 0.9932 - loss: 0.0629 - val_acc: 0.9474 - val_loss: 0.2137
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9993 - loss: 0.0474 - val_acc: 0.9474 - val_loss: 0.2083
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9942 - loss: 0.0492 - val_acc: 0.9474 - val_loss: 0.2051
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 0.9805 - loss: 0.0597 - val_acc: 0.9474 - val_loss: 0.2017
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 0.9990 - loss: 0.0326 - val_acc: 0.9474 - val_loss: 0.1981
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 1.0000 - loss: 0.0386 - val_acc: 0.9474 - val_loss: 0.1952
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 1.0000 - loss: 0.0343 - val_acc: 0.9474 - val_loss: 0.1929
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 1.0000 - loss: 0.0225 - val_acc: 0.9474 - val_loss: 0.1911
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 1.0000 - loss: 0.0275 - val_acc: 0.9474 - val_loss: 0.1897
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 1.0000 - loss: 0.0241 - val_acc: 0.9474 - val_loss: 0.1886
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0204 - val_acc: 0.9474 - val_loss: 0.1873
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0192 - val_acc: 0.9474 - val_loss: 0.1864
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 1.0000 - loss: 0.0194 - val_acc: 0.9474 - val_loss: 0.1859
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 2s 18ms/step - acc: 0.3339 - loss: 0.7035 - val_acc: 0.6842 - val_loss: 0.6910
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7067 - loss: 0.6837 - val_acc: 0.8947 - val_loss: 0.6758
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.8963 - loss: 0.6649 - val_acc: 0.8947 - val_loss: 0.6610
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9186 - loss: 0.6456 - val_acc: 0.8947 - val_loss: 0.6426
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8916 - loss: 0.6232 - val_acc: 0.8947 - val_loss: 0.6192
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9491 - loss: 0.5828 - val_acc: 0.8947 - val_loss: 0.5886
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9602 - loss: 0.5340 - val_acc: 0.8947 - val_loss: 0.5520
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9625 - loss: 0.4836 - val_acc: 0.8947 - val_loss: 0.5106
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9589 - loss: 0.4232 - val_acc: 0.8947 - val_loss: 0.4656
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9651 - loss: 0.3522 - val_acc: 0.8947 - val_loss: 0.4212
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9752 - loss: 0.2727 - val_acc: 0.8947 - val_loss: 0.3789
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9644 - loss: 0.2329 - val_acc: 0.8947 - val_loss: 0.3446
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9567 - loss: 0.2008 - val_acc: 0.9474 - val_loss: 0.3179
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9958 - loss: 0.1348 - val_acc: 0.9474 - val_loss: 0.2940
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9875 - loss: 0.1158 - val_acc: 0.9474 - val_loss: 0.2771
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9824 - loss: 0.1018 - val_acc: 0.9474 - val_loss: 0.2622
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9792 - loss: 0.0964 - val_acc: 0.9474 - val_loss: 0.2490
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9963 - loss: 0.0594 - val_acc: 0.9474 - val_loss: 0.2382
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9927 - loss: 0.0563 - val_acc: 0.9474 - val_loss: 0.2293
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9878 - loss: 0.0496 - val_acc: 0.9474 - val_loss: 0.2215
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9907 - loss: 0.0445 - val_acc: 0.9474 - val_loss: 0.2140
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0347 - val_acc: 0.9474 - val_loss: 0.2080
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0298 - val_acc: 0.9474 - val_loss: 0.2024
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0254 - val_acc: 0.9474 - val_loss: 0.1987
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0249 - val_acc: 0.9474 - val_loss: 0.1954
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 1.0000 - loss: 0.0314 - val_acc: 0.9474 - val_loss: 0.1922
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0203 - val_acc: 0.9474 - val_loss: 0.1897
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0186 - val_acc: 0.9474 - val_loss: 0.1876
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0194 - val_acc: 0.9474 - val_loss: 0.1858
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0220 - val_acc: 0.9474 - val_loss: 0.1842
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 59ms/step
average roc_auc score:  0.924
stdv roc_auc score:  0.021
max roc_auc score:  0.952

USE

%%capture
!pip install openai==1.55.3 httpx==0.27.2 --force-reinstall --quiet
from getpass import getpass
import openai
import os

#os.kill(os.getpid(), 9)

print('Enter OpenAI API key:')
openai.api_key = getpass()

os.environ['OPENAI_API_KEY']=openai.api_key
Enter OpenAI API key:
··········
import time

def generate_zero_shot(topic):
  prompt = """
  I will give you a peer review statement on a math problem solution, and this statement is identified to have this key attribute:
  "Commenting on the Process (CP): When evaluating peers’ works, learners comment on the process of the work". Your task is to simplify
  this statement by removing information that is not relevant to this attribute, leaving only the core description that represents the attribute of CP.
  A good example of statement having this attribute is: "I like how you had 20 then subtracted 12 and got 8".
  """
  system_message = {"role" : "system", "content" : prompt}

  client = openai.OpenAI()
  response = client.chat.completions.create(
      model="gpt-3.5-turbo",
      messages= [
          system_message,
          {"role" : "user", "content" : topic} # simulate a user prompt
      ],
      temperature=0.7,
      max_tokens=256,
      top_p=1,
      frequency_penalty=0,
      presence_penalty=0,
      stop=["\n"]
  )
  # I recommend putting a short wait after each call,
  # since the rate limit for the platform is 60 requests/min.
  # (This increases to 3000 requests/min after you've been using the platform for 2 days).
  time.sleep(1)

  # the response from OpenAI's API is a JSON object that contains
  # the completion to your prompt plus some other information.  Here's how to access
  # just the text of the completion.
  return response.choices[0].message.content.strip()

def update_comments_with_gpt(df):
    # Iterate over each row in the DataFrame
    for index, row in df.iterrows():
        # Check if comment_process is 1
        if row['comment_process'] == 1:
            # Extract the annotation_text
            original_text = row['annotation_text']

            # Call the generate_zero_shot function
            try:
                refined_text = generate_zero_shot(original_text)
            except Exception as e:
                print(f"Error processing row {index}: {e}")
                refined_text = original_text  # Fallback to original if GPT fails

            # Update the annotation_text in the DataFrame
            df.at[index, 'annotation_text'] = refined_text
            print(f"Row {index} updated: {original_text} -> {refined_text}")

    return df

df = pd.read_csv('Annotations_final.csv')

# Update the DataFrame
updated_df = update_comments_with_gpt(df)

# Save the updated DataFrame to a new CSV
updated_df.to_csv('updated_annotations.csv', index=False)

print("CSV updated and saved as 'updated_annotations.csv'.")
Row 15 updated: I like how you had 20 then subtected 12 and got 8 then you added 8 and got 16 then you answer was -4  -> I like how you subtracted 12 from 20 and got 8, then added 8 to get 16, but your final answer was -4.
Row 16 updated: the video is fine i like how you add the numbers  -> I like how you add the numbers.
Row 18 updated: i like how you add the number -> You did a great job adding the numbers.
Row 23 updated: I like the way you  put how much $20 he have and the lost 12 -> I like how you had 20 then subtracted 12 and got 8.
Row 27 updated: I like the way you used arrows in your problem. -> I like how you used arrows.
Row 31 updated: The way that you set up your Data is great. And how you divided to get to your mean. GOOD JOB KALLI :) -> I like how you set up your data and calculated the mean. Great job!
Row 33 updated: I like the way you sorted the data.
I also like the way you showed your work clearly so it wouldn't be confusing. -> You sorted the data nicely and presented your work clearly.
Row 36 updated: I think that Noah had the best estimate because it's about how many people get the average amount of sleep not about how many hours is good for you because that does not have anything to do with the problem -> You focused on the relevance of the estimate to the problem, which is important for understanding the context.
Row 39 updated: Hi. First, that is how you find the median, not the mean. So first you have to find the mean, add all of the numbers and then divide it by how many numbers there are. Then once you found the mean you estimate.. -> You correctly identified the process for finding the mean. Great job!
Row 42 updated: I liked how you solved your answer even though I think that's not how your supposed to solve it. But in my opinion your final answer was correct. -> You solved the problem differently, but your final answer was correct.
Row 43 updated: i think you did a good job because you wrote out all the numbers then you drew lines and circles to know where you were at -> I like how you wrote out all the numbers and used lines and circles to help keep track of your work.
Row 44 updated: I like that you put all of your thought in the recording. And that you used basic knowledge to find out your answer.  -> I appreciate the thorough explanation and the use of fundamental concepts in your solution.
Row 46 updated: I respectfully disagree with you because you were supposed to find out how much he lost.  -> You were supposed to find out how much he lost.
Row 48 updated: My strategy is like yours because I also did 3x2  -> "I like how you did 3 multiplied by 2 in a similar way as I did."
Row 50 updated: I like the way you subtracted the amount Juan spent from the amount he started with. -> You subtracted the amounts in a clear and logical manner.
Row 51 updated: My strategy is like yours because i added 8 and 8 together too.  -> I like how you added 8 and 8 together.
Row 52 updated: I like the way you added the 30 + 83 and you got 113 -> I like how you added 30 and 83 to get 113.
Row 53 updated: My strategy is like yours because we did the same thing when finding out -20.75x4 was -83 -> I like how we both found -20.75 multiplied by 4 to be -83.
Row 55 updated: I like the way you explained this and how you fixed 113 to -113 good job catching your mistake.  -> You explained well and caught the mistake in changing 113 to -113. Great job!
Row 57 updated: I respectfully disagree with you because one of the deposits was 25.25, not 22.25 making your answer wrong. -> You used 25.25 for one of the deposits, which affected the final result.
Row 58 updated: I hadn't thought of adding 30 with the 20.75 x4 -> You added 30 to 20.75 before multiplying by 4, which was a good approach.
Row 60 updated: I like the way you us drawing and your adding is right. -> I like how you used drawing and your addition is correct.
Row 66 updated: I like the way you multiplied 3 by 2 aswell  -> You multiplied 3 by 2 well.
Row 67 updated: I respectfully disagree with you on the last pieces of your math as you had added a positive with a negative. While you should have added -30 with the -83 and gotten -113 then subtractive that with the positive 76 and gotten -37. -> You added a positive with a negative instead of subtracting them, which resulted in an incorrect answer. You should have subtracted -30 from -83 to get -113, then subtracted that from 76 to get -37.
Row 76 updated: My strategy is like yours because we both multiplied 2 by 3  -> You multiplied 2 by 3 just like I did.
Row 78 updated: My strategy is like yours because I also multiplied 2 and 3 -> I appreciate your method of multiplying 2 and 3.
Row 79 updated: Why did you not mention (or include in the problem) the $8 Juan earned? -> You didn't include the $8 Juan earned in your solution.
Row 82 updated: I like the way your work is organized  also I like they  added 5+1 because I did not really think about doing that  -> I like how you added 5+1 because I did not think about doing that.
Row 98 updated: Why did you make no further mention of $76, the result of the first problem you solved? -> You didn't address the number 76 from the initial problem you worked on.
Row 110 updated: My strategy is like yours because I also did -20.75 x4   -> I appreciate your approach of multiplying -20.75 by 4.
Row 113 updated: My strategy is like yours because we did some of the same steps.  -> You and I took similar steps in our strategies.
Row 116 updated: My strategy is like yours because I also added the deposits  -> You added the deposits, which was a good strategy.
Row 119 updated: My strategy is like yours because I multiplied 20.75 by 4 to -> I like how you multiplied 20.75 by 4 in your strategy.
Row 120 updated: My strategy is like yours because I multiplied 20.75 by 4 as well -> I appreciate how you multiplied 20.75 by 4 in your solution.
Row 125 updated: When you did 20.75x4 you put the 4 in the wrong spot but when you did 2x(-15) that answer was correct. -> You misplaced the 4 in 20.75x4, but your calculation for 2x(-15) was correct.
Row 129 updated: My strategy is like yours because I also added the two deposits together. -> I appreciate how you added the two deposits together in your strategy.
Row 135 updated: My strategy is like yours because I also made -12 the same way. I also think its great you put the 20-12 on the side and explained that wasnt the actual problem. -> You explained well how you approached the problem by starting with 20 and then subtracting 12.
Row 140 updated: My strategy is like yours because i added what he bought  6+5+1= 12 so that what he lost -> You added the amounts he bought and correctly found the total.
Row 141 updated: My strategy is like yours because I did 15 x 3 also  -> You multiplied 15 by 3, which is the same as my strategy.
Row 143 updated: I like how you get 12 and subrtact by 8 to get how much  got now -> I like how you subtracted 8 from 12 to find the result.
Row 145 updated: I agree with you adding but for the positives but for the negatives your answer should have been negative. -> You should have made your answer negative for the negatives.
Row 149 updated: I respectfully disagree with you because you didn't put negative signs and the 4 is supposed to be negative or at least that's what I got. -> You need to include negative signs, as the 4 should be negative.
Row 151 updated: My strategy is like yours because  we both multiplied 2 times 15 -> I like how we both multiplied 2 times 15.
Row 154 updated: My strategy is like yours because  i added what he gained but I hadn't thought of 12-8  -> You added what he gained, but didn't think of 12-8.
Row 162 updated: My strategy is like yours because i did 3x2 -> I like how you multiplied 3 by 2 in your strategy.
Row 166 updated: My strategy is like yours because we both did the same thing, although I added 5+1 to get 6+6 and you did 6+5+1. -> I like how we both added numbers differently to reach our solutions.
Row 167 updated: I like how you  added and subtracted the number for exaple you did 20-6 and got 14 and then you did 8+8 and got 16. the other thing i like about your work is that you wrote a word problem to show how much he lost  -> I like how you added and subtracted numbers, for example, 20-6 to get 14 and then 8+8 to get 16.
Row 178 updated: My strategy is like yours because I put the information in almost the exact same way. I think I just switched the places of the two withdrawls. -> You and I used a similar strategy but with a slight difference in the order of the withdrawals.
Row 179 updated: I like the way you multiplied the withdrawals instead of adding to four times or two times. -> You multiplied the withdrawals instead of adding them multiple times, which was a good choice.
Row 180 updated: I like the way you you added all the deposits first then subtracted the withdrawls I didn't think of that -> You added all the deposits first then subtracted the withdrawals, which I didn't think of.
Row 181 updated: I like the way you started with adding your two deposits together. -> You did a great job by starting with adding your two deposits together.
Row 191 updated: My strategy is like yours because we both didn't use the $20 in the beginning. -> You and I both avoided using the initial $20 in our strategies.
Row 192 updated: I like the way you added the positive together then subtracted the negaives to make the equation more simple -> I like how you added the positives together then subtracted the negatives.
Row 195 updated: I agree with your answer but maybe next time make the numbers that are suppose to be negative negative in the equation to make it more clear. -> Next time, be sure to make the negative numbers negative in the equation for clarity.
Row 199 updated: Why did you start with 20. I kind of want to know just because many did the same but some did something different, like me. <3 -> You started with 20 then subtracted 12 to get 8.
Row 205 updated: When you did the 3*2 and you got -6 it could confuse people because the 3 and the 2 are both positives so how did you get a negative. -> You multiplied 3 by 2 and got -6, which might be confusing because both numbers were positive.
Row 206 updated: I like the way you showed your work step by step, but the question asks what was the total amount of money he lost or gained by the end of the day. -> You showed the work step by step, but the question asks for the total amount lost or gained.
Row 208 updated: I respectfully disagree with you your answer I think you might have messed up a step in the subtraction part. -> You may have made a mistake in the subtraction step.
Row 210 updated: I like the way you did it because botnh added 83 and 30. -> You added 83 and 30, which was a good approach.
Row 211 updated: Why did you add 2 and 5? -> Commenting on the Process (CP): Why did you add 2 and 5?
Row 212 updated: My strategy is like yours because  i also did 20-6 and i got 14  -> You subtracted 6 from 20 and got 14, which is similar to my approach.
Row 213 updated: Next time maybe you can add 15 x 2 and the totals you get add them butni like your video. -> You could try adding 15 x 2 next time, but I liked your video.
Row 216 updated: I like the way you added the   83.00 and 30.00. also whow you subtracted 113 and 75  -> I like how you added two numbers together and subtracted another pair.
Row 224 updated: Premium Growth feed will make the chicken's weight be about the same because the Mad for pen A is 0.512. The mad for pen B was 0.97 which is higher Martin -> You used the Mad for pen A and pen B to compare and concluded that pen B had a higher value.
CSV updated and saved as 'updated_annotations.csv'.
# loading universal sentence encoder
embed = hub.load("https://tfhub.dev/google/universal-sentence-encoder-large/5")
# import data
df = pd.read_csv("updated_annotations.csv").fillna(0)

# extract features as X
X = df[['annotation_text','created_by']]

# extract the prediction variable as y
y = df.comment_process # CHANGE y HERE

# set up storage arrays for each round of validation
roc_auc_scores = np.array([])
pred = pd.DataFrame()

# split, train, test and store performance metrics
for train_index, test_index in gkf.split(X, y, groups=groups):

    X_train = X.iloc[train_index].drop(['created_by'], axis=1)
    X_test = X.iloc[test_index].drop(['created_by'], axis=1)
    y_train = y.iloc[train_index]
    y_test = y.iloc[test_index]

    # train classifier on this round of training group
    training_embeddings = embed(X_train.annotation_text.to_list())

    model = Sequential()
    model.add(Dense(12, input_shape=(512,), activation='relu'))
    model.add(Dense(8, activation='relu'))
    model.add(Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics = ['acc'])

    num_epochs = 30
    batch_size = 10


    model.fit(
        training_embeddings,
        y_train,
        epochs=num_epochs,
        validation_split=0.1,
        shuffle=True,
        batch_size=batch_size)

    # test classifier on this round of testing group
    testing_embeddings = embed(X_test.annotation_text.to_list())
    predictions = model.predict(testing_embeddings)

    # compute some metrics and store them for averaging later on
    roc_auc_scores = np.append(roc_auc_scores, roc_auc_score(y_test, predictions))


# print mean scores for the 5-fold CV
print("average roc_auc score: ", np.round(roc_auc_scores.mean(), 3))
print("stdv roc_auc score: ", np.round(roc_auc_scores.std(), 3))
print("max roc_auc score: ", np.round(roc_auc_scores.max(), 3))
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 2s 26ms/step - acc: 0.7672 - loss: 0.6827 - val_acc: 0.7895 - val_loss: 0.6554
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 0.8264 - loss: 0.6457 - val_acc: 0.7895 - val_loss: 0.6083
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 0.8441 - loss: 0.5973 - val_acc: 0.7895 - val_loss: 0.5571
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 15ms/step - acc: 0.8704 - loss: 0.5389 - val_acc: 0.7895 - val_loss: 0.5033
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - acc: 0.8907 - loss: 0.4742 - val_acc: 0.7895 - val_loss: 0.4468
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.8795 - loss: 0.4426 - val_acc: 0.8947 - val_loss: 0.3980
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9164 - loss: 0.3718 - val_acc: 0.8947 - val_loss: 0.3568
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9228 - loss: 0.3723 - val_acc: 0.8947 - val_loss: 0.3186
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9266 - loss: 0.3338 - val_acc: 0.8947 - val_loss: 0.2836
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9440 - loss: 0.2917 - val_acc: 0.9474 - val_loss: 0.2548
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9473 - loss: 0.2750 - val_acc: 0.9474 - val_loss: 0.2283
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9476 - loss: 0.2352 - val_acc: 0.9474 - val_loss: 0.2037
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9547 - loss: 0.2274 - val_acc: 1.0000 - val_loss: 0.1842
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9509 - loss: 0.1978 - val_acc: 1.0000 - val_loss: 0.1688
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9622 - loss: 0.1879 - val_acc: 1.0000 - val_loss: 0.1515
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9459 - loss: 0.1802 - val_acc: 1.0000 - val_loss: 0.1406
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9638 - loss: 0.1491 - val_acc: 1.0000 - val_loss: 0.1306
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9801 - loss: 0.1267 - val_acc: 1.0000 - val_loss: 0.1198
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9661 - loss: 0.1462 - val_acc: 1.0000 - val_loss: 0.1118
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9804 - loss: 0.1272 - val_acc: 1.0000 - val_loss: 0.1067
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9716 - loss: 0.1134 - val_acc: 1.0000 - val_loss: 0.0979
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9615 - loss: 0.1100 - val_acc: 1.0000 - val_loss: 0.0923
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9729 - loss: 0.1219 - val_acc: 1.0000 - val_loss: 0.0906
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9612 - loss: 0.1266 - val_acc: 1.0000 - val_loss: 0.0820
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9917 - loss: 0.0696 - val_acc: 1.0000 - val_loss: 0.0791
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9862 - loss: 0.0825 - val_acc: 1.0000 - val_loss: 0.0723
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9946 - loss: 0.0646 - val_acc: 1.0000 - val_loss: 0.0708
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9968 - loss: 0.0670 - val_acc: 1.0000 - val_loss: 0.0648
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9980 - loss: 0.0616 - val_acc: 1.0000 - val_loss: 0.0645
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9960 - loss: 0.0444 - val_acc: 1.0000 - val_loss: 0.0614
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 55ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 1s 16ms/step - acc: 0.6869 - loss: 0.6863 - val_acc: 0.7368 - val_loss: 0.6591
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7116 - loss: 0.6530 - val_acc: 0.7368 - val_loss: 0.6143
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.7331 - loss: 0.5942 - val_acc: 0.7368 - val_loss: 0.5563
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.7667 - loss: 0.5336 - val_acc: 0.7368 - val_loss: 0.4934
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8021 - loss: 0.4818 - val_acc: 0.7895 - val_loss: 0.4314
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8547 - loss: 0.4324 - val_acc: 0.8947 - val_loss: 0.3705
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.8793 - loss: 0.3869 - val_acc: 0.9474 - val_loss: 0.3214
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9159 - loss: 0.3110 - val_acc: 0.9474 - val_loss: 0.2744
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9346 - loss: 0.2802 - val_acc: 0.9474 - val_loss: 0.2381
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9522 - loss: 0.2329 - val_acc: 0.9474 - val_loss: 0.2138
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 0.9527 - loss: 0.2494 - val_acc: 0.9474 - val_loss: 0.1892
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9851 - loss: 0.1728 - val_acc: 0.9474 - val_loss: 0.1719
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 0.9709 - loss: 0.1567 - val_acc: 0.9474 - val_loss: 0.1522
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9584 - loss: 0.1667 - val_acc: 0.9474 - val_loss: 0.1442
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9668 - loss: 0.1485 - val_acc: 0.9474 - val_loss: 0.1347
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - acc: 0.9639 - loss: 0.1644 - val_acc: 0.9474 - val_loss: 0.1269
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9789 - loss: 0.1043 - val_acc: 0.9474 - val_loss: 0.1182
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 0.9886 - loss: 0.0841 - val_acc: 0.9474 - val_loss: 0.1182
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 16ms/step - acc: 0.9745 - loss: 0.1022 - val_acc: 0.9474 - val_loss: 0.1065
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9796 - loss: 0.0780 - val_acc: 0.9474 - val_loss: 0.1023
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9881 - loss: 0.0650 - val_acc: 0.9474 - val_loss: 0.0968
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9936 - loss: 0.0538 - val_acc: 0.9474 - val_loss: 0.0904
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9931 - loss: 0.0653 - val_acc: 0.9474 - val_loss: 0.0886
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9787 - loss: 0.0795 - val_acc: 0.9474 - val_loss: 0.0867
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9874 - loss: 0.0512 - val_acc: 0.9474 - val_loss: 0.0793
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9956 - loss: 0.0471 - val_acc: 0.9474 - val_loss: 0.0761
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9847 - loss: 0.0566 - val_acc: 0.9474 - val_loss: 0.0760
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9942 - loss: 0.0474 - val_acc: 1.0000 - val_loss: 0.0683
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 0.9973 - loss: 0.0302 - val_acc: 0.9474 - val_loss: 0.0714
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9982 - loss: 0.0281 - val_acc: 1.0000 - val_loss: 0.0595
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 61ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 1s 18ms/step - acc: 0.6615 - loss: 0.6808 - val_acc: 0.5789 - val_loss: 0.6742
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7480 - loss: 0.6103 - val_acc: 0.5789 - val_loss: 0.6379
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.6756 - loss: 0.5769 - val_acc: 0.5789 - val_loss: 0.5882
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7107 - loss: 0.5103 - val_acc: 0.5789 - val_loss: 0.5422
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.7641 - loss: 0.4623 - val_acc: 0.6842 - val_loss: 0.4827
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8541 - loss: 0.4083 - val_acc: 0.8421 - val_loss: 0.4286
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8692 - loss: 0.3623 - val_acc: 0.8421 - val_loss: 0.3839
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9210 - loss: 0.3328 - val_acc: 0.8421 - val_loss: 0.3326
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9449 - loss: 0.2639 - val_acc: 0.8421 - val_loss: 0.3007
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9566 - loss: 0.2300 - val_acc: 0.8947 - val_loss: 0.2703
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9519 - loss: 0.1905 - val_acc: 0.9474 - val_loss: 0.2398
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9605 - loss: 0.1963 - val_acc: 0.9474 - val_loss: 0.2296
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9637 - loss: 0.1513 - val_acc: 0.9474 - val_loss: 0.2036
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9673 - loss: 0.1320 - val_acc: 0.9474 - val_loss: 0.1931
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9543 - loss: 0.1423 - val_acc: 0.9474 - val_loss: 0.1823
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9644 - loss: 0.1130 - val_acc: 0.9474 - val_loss: 0.1765
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9889 - loss: 0.0959 - val_acc: 0.9474 - val_loss: 0.1664
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9805 - loss: 0.0935 - val_acc: 0.9474 - val_loss: 0.1637
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9889 - loss: 0.0736 - val_acc: 0.9474 - val_loss: 0.1589
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9956 - loss: 0.0648 - val_acc: 0.9474 - val_loss: 0.1553
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0588 - val_acc: 0.9474 - val_loss: 0.1442
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0500 - val_acc: 0.9474 - val_loss: 0.1443
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0423 - val_acc: 0.9474 - val_loss: 0.1509
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0429 - val_acc: 0.9474 - val_loss: 0.1513
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 1.0000 - loss: 0.0550 - val_acc: 0.9474 - val_loss: 0.1302
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 13ms/step - acc: 1.0000 - loss: 0.0410 - val_acc: 0.9474 - val_loss: 0.1413
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - acc: 1.0000 - loss: 0.0330 - val_acc: 0.9474 - val_loss: 0.1409
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 1.0000 - loss: 0.0360 - val_acc: 0.9474 - val_loss: 0.1374
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 11ms/step - acc: 1.0000 - loss: 0.0307 - val_acc: 0.9474 - val_loss: 0.1332
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 12ms/step - acc: 1.0000 - loss: 0.0291 - val_acc: 0.9474 - val_loss: 0.1378
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 88ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 1s 17ms/step - acc: 0.6005 - loss: 0.6856 - val_acc: 0.6316 - val_loss: 0.6732
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7720 - loss: 0.6386 - val_acc: 0.6316 - val_loss: 0.6390
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.7723 - loss: 0.5700 - val_acc: 0.6316 - val_loss: 0.5980
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.8009 - loss: 0.4838 - val_acc: 0.6316 - val_loss: 0.5519
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7741 - loss: 0.4632 - val_acc: 0.6842 - val_loss: 0.4708
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9051 - loss: 0.3536 - val_acc: 0.8421 - val_loss: 0.3926
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9401 - loss: 0.2968 - val_acc: 0.8421 - val_loss: 0.3532
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9144 - loss: 0.3012 - val_acc: 0.8947 - val_loss: 0.3029
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9589 - loss: 0.2271 - val_acc: 0.8947 - val_loss: 0.2731
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9443 - loss: 0.2209 - val_acc: 0.8947 - val_loss: 0.2449
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9473 - loss: 0.1881 - val_acc: 0.8947 - val_loss: 0.2258
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9511 - loss: 0.1651 - val_acc: 0.9474 - val_loss: 0.2031
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9667 - loss: 0.1386 - val_acc: 0.9474 - val_loss: 0.1917
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9851 - loss: 0.1135 - val_acc: 0.9474 - val_loss: 0.1803
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9577 - loss: 0.1328 - val_acc: 0.9474 - val_loss: 0.1667
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9747 - loss: 0.1061 - val_acc: 0.9474 - val_loss: 0.1550
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9765 - loss: 0.0990 - val_acc: 0.9474 - val_loss: 0.1535
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9849 - loss: 0.0827 - val_acc: 0.9474 - val_loss: 0.1482
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9715 - loss: 0.0931 - val_acc: 0.9474 - val_loss: 0.1369
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.9882 - loss: 0.0695 - val_acc: 0.9474 - val_loss: 0.1316
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9784 - loss: 0.0787 - val_acc: 0.9474 - val_loss: 0.1240
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9932 - loss: 0.0519 - val_acc: 0.9474 - val_loss: 0.1266
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9899 - loss: 0.0617 - val_acc: 0.9474 - val_loss: 0.1133
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9963 - loss: 0.0567 - val_acc: 0.9474 - val_loss: 0.1097
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9932 - loss: 0.0554 - val_acc: 0.9474 - val_loss: 0.1130
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9949 - loss: 0.0379 - val_acc: 0.9474 - val_loss: 0.1104
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9982 - loss: 0.0551 - val_acc: 0.9474 - val_loss: 0.1037
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9986 - loss: 0.0337 - val_acc: 0.9474 - val_loss: 0.1042
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9949 - loss: 0.0348 - val_acc: 0.9474 - val_loss: 0.0976
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9949 - loss: 0.0286 - val_acc: 0.9474 - val_loss: 0.0973
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 52ms/step
Epoch 1/30
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an `Input(shape)` object as the first layer in the model instead.
  super().__init__(activity_regularizer=activity_regularizer, **kwargs)
17/17 ━━━━━━━━━━━━━━━━━━━━ 2s 31ms/step - acc: 0.4193 - loss: 0.6954 - val_acc: 0.6842 - val_loss: 0.6862
Epoch 2/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - acc: 0.7327 - loss: 0.6805 - val_acc: 0.6316 - val_loss: 0.6729
Epoch 3/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7113 - loss: 0.6601 - val_acc: 0.6316 - val_loss: 0.6484
Epoch 4/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.7861 - loss: 0.6303 - val_acc: 0.7895 - val_loss: 0.6042
Epoch 5/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.8197 - loss: 0.5825 - val_acc: 0.7895 - val_loss: 0.5506
Epoch 6/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.8887 - loss: 0.5308 - val_acc: 0.8421 - val_loss: 0.4843
Epoch 7/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9160 - loss: 0.4631 - val_acc: 0.8421 - val_loss: 0.4132
Epoch 8/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9187 - loss: 0.3989 - val_acc: 0.9474 - val_loss: 0.3467
Epoch 9/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9760 - loss: 0.3154 - val_acc: 0.9474 - val_loss: 0.2895
Epoch 10/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9452 - loss: 0.3023 - val_acc: 0.9474 - val_loss: 0.2466
Epoch 11/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9824 - loss: 0.2145 - val_acc: 0.9474 - val_loss: 0.2177
Epoch 12/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9674 - loss: 0.1980 - val_acc: 0.9474 - val_loss: 0.1910
Epoch 13/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9775 - loss: 0.1523 - val_acc: 0.9474 - val_loss: 0.1746
Epoch 14/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 0.9756 - loss: 0.1263 - val_acc: 0.9474 - val_loss: 0.1637
Epoch 15/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9793 - loss: 0.1152 - val_acc: 0.9474 - val_loss: 0.1536
Epoch 16/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9807 - loss: 0.1118 - val_acc: 0.9474 - val_loss: 0.1432
Epoch 17/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9897 - loss: 0.0782 - val_acc: 0.9474 - val_loss: 0.1402
Epoch 18/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 0.9978 - loss: 0.0794 - val_acc: 0.9474 - val_loss: 0.1318
Epoch 19/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 0.9936 - loss: 0.0705 - val_acc: 0.9474 - val_loss: 0.1337
Epoch 20/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0512 - val_acc: 0.9474 - val_loss: 0.1245
Epoch 21/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0578 - val_acc: 0.9474 - val_loss: 0.1245
Epoch 22/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0508 - val_acc: 0.9474 - val_loss: 0.1188
Epoch 23/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0373 - val_acc: 0.9474 - val_loss: 0.1292
Epoch 24/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0367 - val_acc: 0.9474 - val_loss: 0.1126
Epoch 25/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0286 - val_acc: 0.9474 - val_loss: 0.1200
Epoch 26/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 1.0000 - loss: 0.0286 - val_acc: 0.9474 - val_loss: 0.1144
Epoch 27/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0229 - val_acc: 0.9474 - val_loss: 0.1204
Epoch 28/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 8ms/step - acc: 1.0000 - loss: 0.0212 - val_acc: 0.9474 - val_loss: 0.1084
Epoch 29/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 6ms/step - acc: 1.0000 - loss: 0.0266 - val_acc: 0.9474 - val_loss: 0.1147
Epoch 30/30
17/17 ━━━━━━━━━━━━━━━━━━━━ 0s 7ms/step - acc: 1.0000 - loss: 0.0201 - val_acc: 0.9474 - val_loss: 0.1117
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 59ms/step
average roc_auc score:  0.934
stdv roc_auc score:  0.061
max roc_auc score:  0.979
Back to top