path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
2041757/cell_13
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 X_train = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_train_matrix))) X_test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_test_matrix))) from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense model = Sequential() model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 print('Random test accuracy is %1.1f%%' % accuracy)
code
2041757/cell_9
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1)
code
2041757/cell_4
[ "text_plain_output_1.png" ]
print('before:') print(X_train_matrix[0]) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 print('after:') print(X_train_matrix[0])
code
2041757/cell_6
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout from keras.models import Sequential X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary()
code
2041757/cell_2
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) print('There are %d train entries and %d test entries' % (len(X_train), len(y_test)))
code
2041757/cell_19
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 X_train = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_train_matrix))) X_test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_test_matrix))) from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense model = Sequential() model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 cb_checkpoint = ModelCheckpoint(filepath='best-cnn-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train, y_train_matrix, batch_size=50, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-cnn-model.hdf5') score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 data_test = pd.read_csv('../input/test.csv') test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), data_test.as_matrix()))) test.shape pred = model.predict(test, batch_size=32, verbose=1) pred[0] predicted_labels = [np.argmax(r, axis=0) for r in pred] predicted_labels
code
2041757/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2041757/cell_7
[ "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 print('Random test accuracy is %1.1f%%' % accuracy)
code
2041757/cell_18
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 X_train = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_train_matrix))) X_test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_test_matrix))) from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense model = Sequential() model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 cb_checkpoint = ModelCheckpoint(filepath='best-cnn-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train, y_train_matrix, batch_size=50, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-cnn-model.hdf5') score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 data_test = pd.read_csv('../input/test.csv') test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), data_test.as_matrix()))) test.shape pred = model.predict(test, batch_size=32, verbose=1) pred[0]
code
2041757/cell_8
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1)
code
2041757/cell_15
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 X_train = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_train_matrix))) X_test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_test_matrix))) from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense model = Sequential() model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 cb_checkpoint = ModelCheckpoint(filepath='best-cnn-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train, y_train_matrix, batch_size=50, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-cnn-model.hdf5') score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 print("Best Model's test accuracy is %1.1f%%" % accuracy)
code
2041757/cell_3
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.cm as cm X_train_matrix = X_train.as_matrix() y_train_matrix = y_train.as_matrix() X_test_matrix = X_test.as_matrix() y_test_matrix = y_test.as_matrix() fig = plt.figure(figsize=(20, 20)) for i in range(10): axis = fig.add_subplot(1, 10, i + 1, xticks=[], yticks=[]) axis.imshow(np.reshape(X_train_matrix[i], (28, 28)), cmap='gray') axis.set_title(str(y_train_matrix[i]))
code
2041757/cell_17
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 X_train = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_train_matrix))) X_test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_test_matrix))) data_test = pd.read_csv('../input/test.csv') test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), data_test.as_matrix()))) test.shape
code
2041757/cell_14
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils from sklearn.model_selection import train_test_split import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) from sklearn.model_selection import train_test_split seed = 5 np.random.seed(seed) data_train = pd.read_csv('../input/train.csv') X = data_train.drop('label', axis=1) y = data_train['label'] X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=seed) X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 X_train = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_train_matrix))) X_test = np.array(list(map(lambda x: np.reshape(x, (28, 28, 1)), X_test_matrix))) from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense model = Sequential() model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test, y_test_matrix, verbose=0) accuracy = score[1] * 100 cb_checkpoint = ModelCheckpoint(filepath='best-cnn-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train, y_train_matrix, batch_size=50, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1)
code
2041757/cell_10
[ "text_plain_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 print("Best Model's test accuracy is %1.1f%%" % accuracy)
code
2041757/cell_12
[ "image_output_1.png" ]
from keras.callbacks import ModelCheckpoint from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense from keras.layers import Dense, Dropout from keras.models import Sequential from keras.utils import np_utils X_train_matrix = X_train_matrix.astype('float32') / 255 X_test_matrix = X_test_matrix.astype('float32') / 255 from keras.utils import np_utils y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) from keras.models import Sequential from keras.layers import Dense, Dropout model = Sequential() model.add(Dense(784, input_shape=(len(X_train_matrix[0]),))) model.add(Dense(392, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(196, activation='relu')) model.add(Dropout(0.2)) model.add(Dense(10, activation='softmax')) model.summary() model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy']) score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 history = model.fit(X_train_matrix, y_train_matrix, batch_size=100, epochs=6, verbose=1) from keras.callbacks import ModelCheckpoint cb_checkpoint = ModelCheckpoint(filepath='best-model.hdf5', verbose=1, save_best_only=True) history = model.fit(X_train_matrix, y_train_matrix, batch_size=400, epochs=6, validation_split=0.2, shuffle=True, callbacks=[cb_checkpoint], verbose=1) model.load_weights('best-model.hdf5') score = model.evaluate(X_test_matrix, y_test_matrix, verbose=0) accuracy = score[1] * 100 from keras.layers import Conv2D, MaxPooling2D, Flatten, Dense model = Sequential() model.add(Conv2D(filters=32, kernel_size=5, padding='same', activation='relu', input_shape=(28, 28, 1))) model.add(MaxPooling2D(pool_size=2)) model.add(Conv2D(filters=64, kernel_size=5, padding='same', activation='relu')) model.add(MaxPooling2D(pool_size=2)) model.add(Flatten()) model.add(Dense(1024, activation='relu')) model.add(Dropout(0.3)) model.add(Dense(10, activation='softmax')) model.summary()
code
2041757/cell_5
[ "text_plain_output_1.png" ]
from keras.utils import np_utils from keras.utils import np_utils print('Before:') print(y_train_matrix[:8]) y_train_matrix = np_utils.to_categorical(y_train_matrix, 10) y_test_matrix = np_utils.to_categorical(y_test_matrix, 10) print('After:') print(y_train_matrix[:8])
code
122247504/cell_4
[ "text_plain_output_1.png" ]
from PIL import Image import numpy as np # linear algebra import os import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image import tensorflow as tf import keras import os os.listdir('/kaggle/input/') import cv2 real = '/kaggle/input/real-and-fake-face-detection/real_and_fake_face/training_real/' imagePaths = os.listdir(real) for pic in imagePaths: path = real + pic pic = Image.open(path) real_pictures = np.array(pic) print(real_pictures.shape)
code
122247504/cell_6
[ "application_vnd.jupyter.stderr_output_1.png" ]
from PIL import Image import numpy as np # linear algebra import os import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import numpy as np import pandas as pd import matplotlib.pyplot as plt from PIL import Image import tensorflow as tf import keras import os os.listdir('/kaggle/input/') import cv2 real = '/kaggle/input/real-and-fake-face-detection/real_and_fake_face/training_real/' imagePaths = os.listdir(real) for pic in imagePaths: path = real + pic pic = Image.open(path) real_pictures = np.array(pic) realdf = pd.DataFrame(real_pictures)
code
90127204/cell_21
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns import tensorflow as tf train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')]) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False) history = model.fit(X_train, y_train, epochs=100, validation_split=0.3, batch_size=50, callbacks=[es])
code
90127204/cell_9
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) y_train.nunique()
code
90127204/cell_25
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns import tensorflow as tf train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')]) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False) history = model.fit(X_train, y_train, epochs=100, validation_split=0.3, batch_size=50, callbacks=[es]) model.evaluate(X_train, y_train)
code
90127204/cell_23
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns import tensorflow as tf train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() fig, ax = plt.subplots(2, 5, figsize=(8, 4)) fig.suptitle('Average shape per digit', fontsize=16) ax = ax.ravel() for i in range(10): ax[i].imshow(np.array(train[train['label'] == i].drop('label', axis=1).mean()).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(i, fontsize = 12) tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')]) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False) history = model.fit(X_train, y_train, epochs=100, validation_split=0.3, batch_size=50, callbacks=[es]) history.history.keys() plt.title('Loss') plt.plot(range(len(history.history['loss'])), history.history['loss'], marker='o', c='gray') plt.plot(range(len(history.history['loss'])), history.history['val_loss'], marker='o') plt.show()
code
90127204/cell_30
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns import tensorflow as tf train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() fig, ax = plt.subplots(2, 5, figsize=(8, 4)) fig.suptitle('Average shape per digit', fontsize=16) ax = ax.ravel() for i in range(10): ax[i].imshow(np.array(train[train['label'] == i].drop('label', axis=1).mean()).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(i, fontsize = 12) tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')]) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False) history = model.fit(X_train, y_train, epochs=100, validation_split=0.3, batch_size=50, callbacks=[es]) history.history.keys() model.evaluate(X_train, y_train) test = test / 255 test_result = model.predict(test) test_result = np.array(pd.DataFrame(test_result).idxmax(axis=1)) fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, test.shape[0]) ax[i].imshow(np.array(test.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(test_result[sample_n], fontsize=12) plt.subplots_adjust(hspace=0.3) fig.show()
code
90127204/cell_6
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train.head()
code
90127204/cell_11
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import pandas as pd # data processing, CSV file I/O import seaborn as sns train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) y_train.nunique() sns.countplot(x=y_train) plt.title('# Of samples') plt.show()
code
90127204/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import seaborn as sns import matplotlib.pyplot as plt import random import tensorflow as tf import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
90127204/cell_7
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train.info()
code
90127204/cell_18
[ "image_output_1.png" ]
import tensorflow as tf tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')])
code
90127204/cell_15
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() fig, ax = plt.subplots(2, 5, figsize=(8, 4)) fig.suptitle('Average shape per digit', fontsize=16) ax = ax.ravel() for i in range(10): ax[i].imshow(np.array(train[train['label'] == i].drop('label', axis=1).mean()).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(i, fontsize=12)
code
90127204/cell_24
[ "application_vnd.jupyter.stderr_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns import tensorflow as tf train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() fig, ax = plt.subplots(2, 5, figsize=(8, 4)) fig.suptitle('Average shape per digit', fontsize=16) ax = ax.ravel() for i in range(10): ax[i].imshow(np.array(train[train['label'] == i].drop('label', axis=1).mean()).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(i, fontsize = 12) tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')]) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False) history = model.fit(X_train, y_train, epochs=100, validation_split=0.3, batch_size=50, callbacks=[es]) history.history.keys() plt.title('accuracy') plt.plot(range(len(history.history['loss'])), history.history['accuracy'], c='gray', marker='o') plt.plot(range(len(history.history['loss'])), history.history['val_accuracy'], marker='o') plt.show()
code
90127204/cell_14
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.imshow(np.array(X_train.mean()).reshape(28, 28), cmap='inferno') plt.colorbar() plt.title('average shape', {'fontsize': 16}) plt.show()
code
90127204/cell_22
[ "image_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns import tensorflow as tf train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize = 12) plt.subplots_adjust(hspace=0.3) plt.colorbar() tf.random.set_seed(42) model = tf.keras.Sequential([tf.keras.layers.Dense(units=784, activation='relu', input_shape=(784,)), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=392, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=151, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=50, activation='relu'), tf.keras.layers.Dropout(0.2), tf.keras.layers.Dense(units=10, activation='softmax')]) model.compile(optimizer='Adam', loss='sparse_categorical_crossentropy', metrics=['accuracy']) es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=0, mode='auto', baseline=None, restore_best_weights=False) history = model.fit(X_train, y_train, epochs=100, validation_split=0.3, batch_size=50, callbacks=[es]) history.history.keys()
code
90127204/cell_12
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import numpy as np # linear algebra import pandas as pd # data processing, CSV file I/O import random import seaborn as sns train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) X_train = X_train / 255 y_train.nunique() fig, ax = plt.subplots(5, 5, figsize=(8, 8)) fig.suptitle('Digits images and labels', fontsize=16) ax = ax.ravel() for i in range(25): sample_n = random.randint(0, X_train.shape[0]) ax[i].imshow(np.array(X_train.iloc[sample_n]).reshape(28, 28), cmap='inferno') ax[i].get_xaxis().set_visible(False) ax[i].get_yaxis().set_visible(False) ax[i].set_title(y_train[sample_n], fontsize=12) plt.subplots_adjust(hspace=0.3)
code
90127204/cell_5
[ "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O train = pd.read_csv('/kaggle/input/digit-recognizer/train.csv') test = pd.read_csv('/kaggle/input/digit-recognizer/test.csv') y_train = train['label'] X_train = train.drop('label', axis=1) y_train.head()
code
74067422/cell_21
[ "text_html_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[Titanic.PassengerId == 148] FirstClass = Titanic[Titanic.Pclass == 1].Name print(''.join([f'{str(p)}, ' for p in FirstClass]))
code
74067422/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:]
code
74067422/cell_25
[ "text_plain_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[Titanic.PassengerId == 148] FirstClass = Titanic[Titanic.Pclass == 1].Name Titanic['NoUnderAge'] = Titanic.Age >= 18 Titanic[:] print(Titanic.Fare.max()) Titanic[Titanic.Fare == Titanic.Fare.max()]
code
74067422/cell_23
[ "text_html_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[Titanic.PassengerId == 148] FirstClass = Titanic[Titanic.Pclass == 1].Name Titanic['NoUnderAge'] = Titanic.Age >= 18 Titanic[:]
code
74067422/cell_30
[ "text_html_output_1.png", "text_plain_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[Titanic.PassengerId == 148] FirstClass = Titanic[Titanic.Pclass == 1].Name Titanic['NoUnderAge'] = Titanic.Age >= 18 Titanic[:] Titanic[Titanic.Fare == Titanic.Fare.max()] Titanic[Titanic.Fare == Titanic.Fare.min()] f = open('titanic.json', 'w') f.write(Titanic.to_json()) f.close() TitanicSurvived = Titanic[Titanic.Survived == 1] TitanicDied = Titanic[Titanic.Survived == 0] TitanicSurvivedMalesCount = TitanicSurvived[TitanicSurvived.Sex == 'male'].Sex.count() TitanicSurvivedFemalesCount = TitanicSurvived[TitanicSurvived.Sex == 'female'].Sex.count() TitanicSurvivedNoUnderAgeCount = TitanicSurvived[TitanicSurvived.NoUnderAge == True].NoUnderAge.count() TitanicSurvivedUnderAgeCount = TitanicSurvived[TitanicSurvived.NoUnderAge == False].NoUnderAge.count() print(f'1 - La clase de pasajero media fue mas alta en los sobrevivientes {TitanicSurvived.Pclass.median()} que del resto {TitanicDied.Pclass.median()}') print(f'2 - El precio del tiquete medio fue mas alto en los sobrevivientes {TitanicSurvived.Fare.median()} que del resto {TitanicDied.Fare.median()}') print(f'3 - Sobrevivieron mas mujeres {TitanicSurvivedFemalesCount} que hombres {TitanicSurvivedMalesCount}') print(f'4 - Sobrevivieron mas mayores de edad {TitanicSurvivedNoUnderAgeCount} que menores de edad {TitanicSurvivedUnderAgeCount}') TitanicSurvived[:]
code
74067422/cell_26
[ "text_html_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[Titanic.PassengerId == 148] FirstClass = Titanic[Titanic.Pclass == 1].Name Titanic['NoUnderAge'] = Titanic.Age >= 18 Titanic[:] Titanic[Titanic.Fare == Titanic.Fare.max()] print(Titanic.Fare.min()) Titanic[Titanic.Fare == Titanic.Fare.min()]
code
74067422/cell_11
[ "text_plain_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) print(f'Dimensiones: {Titanic.shape[0]}x{Titanic.shape[1]}') print(f'Numero de Datos: {Titanic.shape[0]}') print(f'Numero de Columnas: {Titanic.shape[1]}')
code
74067422/cell_19
[ "text_html_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[Titanic.PassengerId == 148]
code
74067422/cell_7
[ "text_plain_output_1.png", "image_output_1.png" ]
def fibonacci(terms: int=1): out: list[int] = [] for i in range(terms): if i == 0: out.append(0) elif i == 1: out.append(1) else: out.append(out[-2] + out[-1]) return out def countWords(string: str='', sep: str=', '): out: dict[str, int] = {} for t in string.split(sep): out[t] = string.count(t) return out def frequentWord(wordCount: dict={}): out: tuple[str, int] = ('None', 0) for key, val in wordCount.items(): if val > out[1]: out = (key, val) return out Words = 'Perro, Gato, Conejo, Perro Lobo, Gato Leon, Gato Blanco' WordsCount = countWords(Words) print(WordsCount) FrequentWord = frequentWord(WordsCount) print(f'{FrequentWord[0]} se repite {FrequentWord[1]}', 'vez' if FrequentWord[1] == 1 else 'veces')
code
74067422/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[:10]
code
74067422/cell_17
[ "text_html_output_1.png" ]
import pandas as pd TitanicPath = '../input/titanic-data/train.csv' import pandas as pd Titanic = pd.read_csv(TitanicPath) Columns = pd.DataFrame({'Nombre': Titanic.columns, 'Tipo': [str(type(c)) for c in Titanic.columns]}) Columns[:] Titanic[-10:]
code
74067422/cell_5
[ "text_html_output_1.png", "text_plain_output_1.png" ]
def fibonacci(terms: int=1): out: list[int] = [] for i in range(terms): if i == 0: out.append(0) elif i == 1: out.append(1) else: out.append(out[-2] + out[-1]) return out for i in range(15): print(fibonacci(i))
code
325674/cell_4
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt']) global_temperatures[global_temperatures.index.year > 2000]['LandAverageTemperature'].plot(figsize=(13, 7))
code
325674/cell_6
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt']) global_temperatures.groupby(global_temperatures.index.year)['LandAverageTemperature'].mean().plot(figsize=(13, 7))
code
325674/cell_2
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8')) from matplotlib import pyplot as plt
code
325674/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) global_temperatures = pd.read_csv('../input/GlobalTemperatures.csv', infer_datetime_format=True, index_col='dt', parse_dates=['dt']) print(global_temperatures.info())
code
73069103/cell_21
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') img.shape IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) data_iter = iter(train_loader) images, labels = data_iter.next() images.shape dataiter = iter(train_loader) images, labels = dataiter.next() plt.subplots(figsize=(20, 32)) for i in range(10): plt.subplot(10 / 2, 10, i + 1) img = images[i].detach().numpy().transpose((1, 2, 0)) mean = np.array([0.5, 0.5, 0.5]) std = np.array([0.5, 0.5, 0.5]) img = std * img + mean img = np.clip(img, 0, 1) plt.title(labels[i]) plt.imshow(img) plt.show()
code
73069103/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') data_lebel.head()
code
73069103/cell_23
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as f import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(3 * 40 * 40, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 200) self.fc4 = nn.Linear(200, 200) self.fc5 = nn.Linear(200, 200) self.fc6 = nn.Linear(200, 10) def forward(self, x): x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = f.relu(self.fc3(x)) x = f.relu(self.fc4(x)) x = f.relu(self.fc5(x)) x = self.fc6(x) return x net = Net() net.cuda()
code
73069103/cell_20
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) data_iter = iter(train_loader) images, labels = data_iter.next() images.shape
code
73069103/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') data_lebel['digit'].value_counts()
code
73069103/cell_29
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(3 * 40 * 40, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 200) self.fc4 = nn.Linear(200, 200) self.fc5 = nn.Linear(200, 200) self.fc6 = nn.Linear(200, 10) def forward(self, x): x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = f.relu(self.fc3(x)) x = f.relu(self.fc4(x)) x = f.relu(self.fc5(x)) x = self.fc6(x) return x net = Net() net.cuda() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) criterion = torch.nn.CrossEntropyLoss() device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') Path = './net_final.pth' traininglosses = [] trainingaccuracy = [] testinglosses = [] testaccuracy = [] totalsteps = [] epochs = 20 steps = 0 running_loss = 0 print_every = 5000 for epoch in range(epochs): accuracy = 0 for inputs, labels in train_loader: net.train() steps += 1 inputs, labels = (inputs.to(device), labels.to(device)) optimizer.zero_grad() logps = net.forward(inputs.view(-1, 3 * 40 * 40)) loss = criterion(logps, labels) loss.backward() optimizer.step() pred = torch.argmax(logps, dim=1) correct = pred.eq(labels) running_loss += loss.item() accuracy += torch.mean(correct.float()) if steps % print_every == 0: after_train_accuracy = accuracy / print_every test_loss = 0 accuracy = 0 net.eval() with torch.no_grad(): for inputs, labels in test_loader: inputs, labels = (inputs.to(device), labels.to(device)) logps = net.forward(inputs.view(-1, 3 * 40 * 40)) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() pred = torch.argmax(logps, dim=1) correct = pred.eq(labels) accuracy += torch.mean(correct.float()) traininglosses.append(running_loss / print_every) trainingaccuracy.append(after_train_accuracy) testinglosses.append(test_loss / len(test_loader)) testaccuracy.append(accuracy / len(test_loader)) totalsteps.append(steps) running_loss = 0 accuracy = 0 net.train() torch.save(net.state_dict(), Path) correct = 0 total = 0 with torch.no_grad(): for data in test_loader: X, y = data X, y = (X.to(device), y.to(device)) output = net(X.view(-1, 3 * 40 * 40)) for idx, i in enumerate(output): if torch.argmax(i) == y[idx]: correct += 1 total += 1 print(round(correct / total, 3))
code
73069103/cell_26
[ "application_vnd.jupyter.stderr_output_1.png", "image_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(3 * 40 * 40, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 200) self.fc4 = nn.Linear(200, 200) self.fc5 = nn.Linear(200, 200) self.fc6 = nn.Linear(200, 10) def forward(self, x): x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = f.relu(self.fc3(x)) x = f.relu(self.fc4(x)) x = f.relu(self.fc5(x)) x = self.fc6(x) return x net = Net() net.cuda() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) criterion = torch.nn.CrossEntropyLoss() device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') device
code
73069103/cell_19
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) type(train_loader)
code
73069103/cell_18
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) len(train_loader)
code
73069103/cell_28
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import matplotlib.pyplot as plt import numpy as np import numpy as np import numpy as np # linear algebra import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') img.shape IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) data_iter = iter(train_loader) images, labels = data_iter.next() images.shape dataiter = iter(train_loader) images, labels = dataiter.next() for i in range(10): img = images[i].detach().numpy().transpose((1, 2, 0)) mean = np.array([0.5, 0.5, 0.5]) std = np.array([0.5, 0.5, 0.5]) img = std * img + mean img = np.clip(img, 0, 1) class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(3 * 40 * 40, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 200) self.fc4 = nn.Linear(200, 200) self.fc5 = nn.Linear(200, 200) self.fc6 = nn.Linear(200, 10) def forward(self, x): x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = f.relu(self.fc3(x)) x = f.relu(self.fc4(x)) x = f.relu(self.fc5(x)) x = self.fc6(x) return x net = Net() net.cuda() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) criterion = torch.nn.CrossEntropyLoss() device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') Path = './net_final.pth' traininglosses = [] trainingaccuracy = [] testinglosses = [] testaccuracy = [] totalsteps = [] epochs = 20 steps = 0 running_loss = 0 print_every = 5000 for epoch in range(epochs): accuracy = 0 for inputs, labels in train_loader: net.train() steps += 1 inputs, labels = (inputs.to(device), labels.to(device)) optimizer.zero_grad() logps = net.forward(inputs.view(-1, 3 * 40 * 40)) loss = criterion(logps, labels) loss.backward() optimizer.step() pred = torch.argmax(logps, dim=1) correct = pred.eq(labels) running_loss += loss.item() accuracy += torch.mean(correct.float()) if steps % print_every == 0: after_train_accuracy = accuracy / print_every test_loss = 0 accuracy = 0 net.eval() with torch.no_grad(): for inputs, labels in test_loader: inputs, labels = (inputs.to(device), labels.to(device)) logps = net.forward(inputs.view(-1, 3 * 40 * 40)) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() pred = torch.argmax(logps, dim=1) correct = pred.eq(labels) accuracy += torch.mean(correct.float()) traininglosses.append(running_loss / print_every) trainingaccuracy.append(after_train_accuracy) testinglosses.append(test_loss / len(test_loader)) testaccuracy.append(accuracy / len(test_loader)) totalsteps.append(steps) running_loss = 0 accuracy = 0 net.train() torch.save(net.state_dict(), Path) plt.figure(figsize=(50, 10)) plt.plot(totalsteps, traininglosses, label='Train Loss') plt.plot(totalsteps, trainingaccuracy, label='Test Accuracy') plt.plot(totalsteps, testinglosses, label='Test Loss') plt.plot(totalsteps, testaccuracy, label='Test Accuracy') plt.legend() plt.grid() plt.show()
code
73069103/cell_16
[ "text_plain_output_1.png" ]
from torchvision.transforms import transforms import torchvision IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) len(dataset)
code
73069103/cell_22
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as f import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(3 * 40 * 40, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 200) self.fc4 = nn.Linear(200, 200) self.fc5 = nn.Linear(200, 200) self.fc6 = nn.Linear(200, 10) def forward(self, x): x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = f.relu(self.fc3(x)) x = f.relu(self.fc4(x)) x = f.relu(self.fc5(x)) x = self.fc6(x) return x net = Net() print(net)
code
73069103/cell_10
[ "text_html_output_1.png" ]
import cv2 import cv2 img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') img.shape
code
73069103/cell_27
[ "text_plain_output_1.png" ]
from torch.utils.data import Dataset, DataLoader, ConcatDataset from torchvision.transforms import transforms import cv2 import cv2 import os import os import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import torch import torch.nn as nn import torch.nn.functional as f import torch.optim as optim import torchvision import numpy as np import pandas as pd import os data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') img = cv2.imread('/kaggle/input/soft-computing-even-id-dataset/training-a/a00000.png') IMAGE_SIZE = 40 transform = transforms.Compose([transforms.ToPILImage(), torchvision.transforms.ColorJitter(brightness=0.4, saturation=0.4, contrast=0.4, hue=0.4), transforms.RandomRotation(20, expand=True), transforms.Resize(IMAGE_SIZE), transforms.CenterCrop(IMAGE_SIZE), transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) class digit_Dataset(Dataset): def __init__(self, csv_file, root_dir, transform=True): self.annotations = pd.read_csv(csv_file) self.root_dir = root_dir self.transform = transform def __len__(self): return len(self.annotations) def __getitem__(self, index): img_path = os.path.join(self.root_dir, self.annotations.iloc[index, 0]) image = cv2.imread(img_path) y_label = torch.tensor(int(self.annotations.iloc[index, 3])) if self.transform: image = self.transform(image) return (image, y_label) dataset = digit_Dataset(csv_file='/kaggle/input/soft-computing-even-id-dataset/training-a.csv', root_dir='/kaggle/input/soft-computing-even-id-dataset/training-a', transform=transform) train_set, test_set = torch.utils.data.random_split(dataset, [15702, 4000]) train_loader = DataLoader(dataset=train_set, batch_size=16, shuffle=True) test_loader = DataLoader(dataset=test_set, batch_size=16, shuffle=True) class Net(nn.Module): def __init__(self): super().__init__() self.fc1 = nn.Linear(3 * 40 * 40, 200) self.fc2 = nn.Linear(200, 200) self.fc3 = nn.Linear(200, 200) self.fc4 = nn.Linear(200, 200) self.fc5 = nn.Linear(200, 200) self.fc6 = nn.Linear(200, 10) def forward(self, x): x = f.relu(self.fc1(x)) x = f.relu(self.fc2(x)) x = f.relu(self.fc3(x)) x = f.relu(self.fc4(x)) x = f.relu(self.fc5(x)) x = self.fc6(x) return x net = Net() net.cuda() optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) criterion = torch.nn.CrossEntropyLoss() device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu') Path = './net_final.pth' traininglosses = [] trainingaccuracy = [] testinglosses = [] testaccuracy = [] totalsteps = [] epochs = 20 steps = 0 running_loss = 0 print_every = 5000 for epoch in range(epochs): accuracy = 0 for inputs, labels in train_loader: net.train() steps += 1 inputs, labels = (inputs.to(device), labels.to(device)) optimizer.zero_grad() logps = net.forward(inputs.view(-1, 3 * 40 * 40)) loss = criterion(logps, labels) loss.backward() optimizer.step() pred = torch.argmax(logps, dim=1) correct = pred.eq(labels) running_loss += loss.item() accuracy += torch.mean(correct.float()) if steps % print_every == 0: after_train_accuracy = accuracy / print_every test_loss = 0 accuracy = 0 net.eval() with torch.no_grad(): for inputs, labels in test_loader: inputs, labels = (inputs.to(device), labels.to(device)) logps = net.forward(inputs.view(-1, 3 * 40 * 40)) batch_loss = criterion(logps, labels) test_loss += batch_loss.item() pred = torch.argmax(logps, dim=1) correct = pred.eq(labels) accuracy += torch.mean(correct.float()) traininglosses.append(running_loss / print_every) trainingaccuracy.append(after_train_accuracy) testinglosses.append(test_loss / len(test_loader)) testaccuracy.append(accuracy / len(test_loader)) totalsteps.append(steps) print(f'Device {device} Epoch {epoch + 1}/{epochs} Step {steps} Train loss: {running_loss / print_every:f} Train accuracy: {after_train_accuracy:f} Test loss: {test_loss / len(test_loader):f} Test accuracy: {accuracy / len(test_loader):f}') running_loss = 0 accuracy = 0 net.train() print('Finish Train') torch.save(net.state_dict(), Path)
code
73069103/cell_5
[ "image_output_1.png" ]
import pandas as pd import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) data_lebel = pd.read_csv('/kaggle/input/soft-computing-even-id-dataset/training-a.csv') data_lebel['digit'].unique()
code
18155655/cell_9
[ "image_output_1.png" ]
from IPython.display import Image Image('../input/hogweed-outscreenshot/screenshot_out_example.jpg')
code
18155655/cell_2
[ "image_output_1.png" ]
from IPython.display import Image from IPython.display import Image Image('../input/hogweednew/heracleum_lanantum_maxima_03.jpg')
code
18155655/cell_1
[ "text_plain_output_1.png" ]
import os import os print(os.listdir('../input/hogweed-outscreenshot'))
code
18155655/cell_5
[ "image_output_1.png" ]
from IPython.display import Image Image('../input/hogweed-screenshot/screenshot_example.jpg')
code
2016010/cell_9
[ "application_vnd.jupyter.stderr_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape test.dtypes
code
2016010/cell_4
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape
code
2016010/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') test.shape test.head()
code
2016010/cell_1
[ "text_plain_output_1.png" ]
from subprocess import check_output import numpy as np import pandas as pd from subprocess import check_output print(check_output(['ls', '../input']).decode('utf8'))
code
2016010/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') sub.head()
code
2016010/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape train.dtypes
code
2016010/cell_15
[ "text_plain_output_1.png" ]
from sklearn import model_selection from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.linear_model import LogisticRegression import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) import re import time train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape test.shape train.dtypes test.dtypes import re def clean_text(text): text = re.sub("[^A-za-z0-9^,?!.\\/'+-=]", ' ', text) text = re.sub("what's", 'what is ', text) text = re.sub("\\'s", ' ', text) text = re.sub("\\'ve", ' have ', text) text = re.sub("can't", 'cannot ', text) text = re.sub("n't", ' not ', text) text = re.sub("i'm", 'i am ', text) text = re.sub("\\'re", ' are ', text) text = re.sub("\\'d", ' would ', text) text = re.sub("\\'ll", ' will ', text) text = re.sub("\\'scuse", ' excuse ', text) text = re.sub(',', ' ', text) text = re.sub('\\.', ' ', text) text = re.sub('!', ' _exclamationmark_ ', text) text = re.sub('\\?', ' _questionmark_ ', text) return text def build_data_set(ngram=3, stem=False, max_features=2000, min_df=2, remove_stopwords=True): train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') test.fillna('missing', inplace=True) clean_train_comments = [] for i in range(train.shape[0]): clean_train_comments.append(clean_text(train['comment_text'][i])) for i in range(test.shape[0]): clean_train_comments.append(clean_text(test['comment_text'][i])) qs = pd.Series(clean_train_comments).astype(str) if not stem: vect = TfidfVectorizer(analyzer=u'word', stop_words='english', min_df=min_df, ngram_range=(1, ngram), max_features=max_features) ifidf_vect = vect.fit_transform(qs) X = ifidf_vect.toarray() X_train = X[:train.shape[0]] X_test = X[train.shape[0]:] else: vect_stem = StemmedTfidfVectorizer(analyzer=u'word', stop_words='english', min_df=min_df, ngram_range=(1, ngram), max_features=max_features) ifidf_vect_stem = vect_stem.fit_transform(qs) X = ifidf_vect_stem.toarray() X_train = X[:train.shape[0]] X_test = X[train.shape[0]:] Y_train = train[['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate']] assert Y_train.shape[0] == X_train.shape[0] del train, test return (X_train, X_test, Y_train) labels = ['toxic', 'severe_toxic', 'obscene', 'threat', 'insult', 'identity_hate'] params = {'toxic': {'ngrams': 1, 'stem': True, 'max_features': 1000, 'C': 10}, 'threat': {'ngrams': 1, 'stem': False, 'max_features': 1000, 'C': 10}, 'severe_toxic': {'ngrams': 1, 'stem': True, 'max_features': 1000, 'C': 1.2}, 'obscene': {'ngrams': 1, 'stem': True, 'max_features': 1000, 'C': 10}, 'insult': {'ngrams': 1, 'stem': True, 'max_features': 1000, 'C': 1.2}, 'identity_hate': {'ngrams': 1, 'stem': True, 'max_features': 1000, 'C': 10}} start_time = time.time() for label in labels: print('>>> processing ', label) X_train, X_test, Y_train = build_data_set(ngram=params[label]['ngrams'], stem=params[label]['stem'], max_features=params[label]['max_features'], min_df=2, remove_stopwords=True) Y_train_lab = Y_train[label] seed = 7 scoring = 'accuracy' models = [] models.append(('LR', LogisticRegression())) results = [] names = [] for name, model in models: kfold = model_selection.KFold(n_splits=10, random_state=seed) cv_results = model_selection.cross_val_score(model, X_train, Y_train_lab, cv=kfold, scoring=scoring) results.append(cv_results) names.append(name) msg = '%s: %f (%f)' % (name, cv_results.mean(), cv_results.std()) print(msg)
code
2016010/cell_16
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') sub[label] = output sub.to_csv('output_LR.csv', index=False)
code
2016010/cell_3
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape
code
2016010/cell_5
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') sub = pd.read_csv('../input/sample_submission.csv') train.shape train.head()
code
1008715/cell_13
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp import numpy as np full['singlton'] = np.where(full.family == 1, 1, 0) full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0) full['large'] = np.where(full.family > 4, 1, 0) full['Fare'].fillna(full.Fare.mean(), inplace=True) full.info()
code
1008715/cell_4
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full.info()
code
1008715/cell_23
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp import numpy as np full['singlton'] = np.where(full.family == 1, 1, 0) full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0) full['large'] = np.where(full.family > 4, 1, 0) full['Fare'].fillna(full.Fare.mean(), inplace=True) full.Age.isnull().sum() rand = np.random.randint(full.Age.mean() - full.Age.std(), full.Age.mean() + full.Age.std(), full.Age.isnull().sum()) full.loc[full.Age.isnull(), 'Age'] = rand full.drop(['Cabin', 'Name', 'Ticket', 'Sex'], axis=1, inplace=True) full.head()
code
1008715/cell_11
[ "text_html_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp import numpy as np full['singlton'] = np.where(full.family == 1, 1, 0) full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0) full['large'] = np.where(full.family > 4, 1, 0) full.info()
code
1008715/cell_7
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp sns.countplot(x='family', hue='Survived', data=full)
code
1008715/cell_18
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import seaborn as sns import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp import numpy as np full['singlton'] = np.where(full.family == 1, 1, 0) full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0) full['large'] = np.where(full.family > 4, 1, 0) full['Fare'].fillna(full.Fare.mean(), inplace=True) full.Age.isnull().sum() rand = np.random.randint(full.Age.mean() - full.Age.std(), full.Age.mean() + full.Age.std(), full.Age.isnull().sum()) full.loc[full.Age.isnull(), 'Age'] = rand sns.factorplot(x='Age', hue='Survived', row='Sex', data=full, kind='count', ci=None, aspect=5)
code
1008715/cell_3
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full.head()
code
1008715/cell_17
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp import numpy as np full['singlton'] = np.where(full.family == 1, 1, 0) full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0) full['large'] = np.where(full.family > 4, 1, 0) full['Fare'].fillna(full.Fare.mean(), inplace=True) full.Age.isnull().sum() rand = np.random.randint(full.Age.mean() - full.Age.std(), full.Age.mean() + full.Age.std(), full.Age.isnull().sum()) full.loc[full.Age.isnull(), 'Age'] = rand full.info()
code
1008715/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import pandas as pd from sklearn.linear_model import LogisticRegression from sklearn.svm import SVC, LinearSVC from sklearn.ensemble import RandomForestClassifier from sklearn.neighbors import KNeighborsClassifier from sklearn.naive_bayes import GaussianNB train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') full = pd.concat([train, test], join='outer') full['family'] = full.Parch + full.SibSp import numpy as np full['singlton'] = np.where(full.family == 1, 1, 0) full['small'] = np.where(np.logical_and(full.family > 1, full.family < 5), 1, 0) full['large'] = np.where(full.family > 4, 1, 0) full['Fare'].fillna(full.Fare.mean(), inplace=True) full.Age.isnull().sum()
code
129000049/cell_42
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) mae = np.mean(np.absolute(prediction - test_y)) variance_score = model.score(test_X, test_y) prediction = np.round(prediction, 2) results = pd.DataFrame({'Actual': test_y, 'Prediction': prediction, 'Difference': test_y - prediction}) model = LogisticRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) mae = np.mean(np.absolute(prediction - test_y)) print('Mean Absolute Error:', mae)
code
129000049/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.head()
code
129000049/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.describe()
code
129000049/cell_25
[ "text_plain_output_1.png" ]
train_y.shape train_y.head()
code
129000049/cell_4
[ "image_output_3.png", "image_output_2.png", "image_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.head(5)
code
129000049/cell_34
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) mae = np.mean(np.absolute(prediction - test_y)) print('Mean Absolute Error:', mae) variance_score = model.score(test_X, test_y) print('Variance score:', variance_score) prediction = np.round(prediction, 2) results = pd.DataFrame({'Actual': test_y, 'Prediction': prediction, 'Difference': test_y - prediction}) print(results)
code
129000049/cell_23
[ "text_plain_output_1.png" ]
train_y.shape
code
129000049/cell_44
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LinearRegression from sklearn.linear_model import LinearRegression from sklearn.linear_model import LogisticRegression import numpy as np import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') train_X.shape train_y.shape model = LinearRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) mae = np.mean(np.absolute(prediction - test_y)) variance_score = model.score(test_X, test_y) prediction = np.round(prediction, 2) results = pd.DataFrame({'Actual': test_y, 'Prediction': prediction, 'Difference': test_y - prediction}) model = LogisticRegression() model.fit(train_X, train_y) prediction = model.predict(test_X) variance_score = model.score(test_X, test_y) print('Variance score:', variance_score)
code
129000049/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns
code
129000049/cell_29
[ "text_html_output_1.png" ]
import matplotlib.pyplot as plt import matplotlib.pyplot as plt import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any() wdf_num = wdf.loc[:, ['mintempC', 'tempC', 'HeatIndexC', 'pressure']] wdf_num.shape wdf_num.columns weth = wdf_num['2019':'2020'] plt.hexbin(weth.mintempC, weth.tempC, gridsize=20) plt.xlabel('Minimum Temperature') plt.ylabel('Temperature') plt.show() plt.hexbin(weth.HeatIndexC, weth.tempC, gridsize=20) plt.xlabel('Heat Index') plt.ylabel('Temperature') plt.show() plt.hexbin(weth.pressure, weth.tempC, gridsize=20) plt.xlabel('Pressure') plt.ylabel('Temperature') plt.show()
code
129000049/cell_11
[ "text_html_output_1.png" ]
import pandas as pd wdf = pd.read_csv('/kaggle/input/historical-weather-data-for-indian-cities/jaipur.csv', parse_dates=['date_time'], index_col='date_time') wdf.columns wdf.shape wdf.isnull().any()
code
README.md exists but content is empty.
Downloads last month
43