path
stringlengths
13
17
screenshot_names
sequencelengths
1
873
code
stringlengths
0
40.4k
cell_type
stringclasses
1 value
129016727/cell_1
[ "text_plain_output_1.png" ]
import os import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
129016727/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/phishing-website-dataset/dataset.csv') dataset['Page_Rank'].value_counts()
code
129016727/cell_32
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score LR = LogisticRegression() LR.fit(X_train, y_train) y_pred = LR.predict(X_test) accuracy = accuracy_score(y_test, y_pred) from sklearn.tree import DecisionTreeClassifier DT = DecisionTreeClassifier() DT.fit(X_train, y_train) y_pred = DT.predict(X_test) accuracy = accuracy_score(y_test, y_pred) from sklearn.svm import SVC SVM = SVC() SVM.fit(X_train, y_train) y_pred = SVM.predict(X_test) accuracy = accuracy_score(y_test, y_pred) from sklearn.ensemble import RandomForestClassifier RF = RandomForestClassifier() RF.fit(X_train, y_train) y_pred = RF.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy)
code
129016727/cell_28
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score LR = LogisticRegression() LR.fit(X_train, y_train) y_pred = LR.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy)
code
129016727/cell_15
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/phishing-website-dataset/dataset.csv') dataset['double_slash_redirecting'].value_counts()
code
129016727/cell_17
[ "text_plain_output_1.png" ]
import pandas as pd dataset = pd.read_csv('/kaggle/input/phishing-website-dataset/dataset.csv') dataset['Prefix_Suffix'].value_counts()
code
129016727/cell_31
[ "text_plain_output_1.png" ]
from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score from sklearn.svm import SVC from sklearn.tree import DecisionTreeClassifier from sklearn.linear_model import LogisticRegression from sklearn.metrics import accuracy_score LR = LogisticRegression() LR.fit(X_train, y_train) y_pred = LR.predict(X_test) accuracy = accuracy_score(y_test, y_pred) from sklearn.tree import DecisionTreeClassifier DT = DecisionTreeClassifier() DT.fit(X_train, y_train) y_pred = DT.predict(X_test) accuracy = accuracy_score(y_test, y_pred) from sklearn.svm import SVC SVM = SVC() SVM.fit(X_train, y_train) y_pred = SVM.predict(X_test) accuracy = accuracy_score(y_test, y_pred) print('Accuracy:', accuracy)
code
129016727/cell_27
[ "text_html_output_1.png" ]
print(X_train.shape) print(X_test.shape)
code
105200156/cell_13
[ "text_plain_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() common_ids = set(df_fb2021_agg.index) & set(df_fb3.text_id) len(common_ids) len(common_ids) / len(df_fb3)
code
105200156/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item()
code
105200156/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg
code
105200156/cell_6
[ "text_plain_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb2021_agg.text.str.len().plot.hist(bins=100)
code
105200156/cell_18
[ "text_plain_output_1.png", "image_output_1.png" ]
import numpy as np import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() common_ids = set(df_fb2021_agg.index) & set(df_fb3.text_id) len(common_ids) df_fb3.query('text_id == "009F4E9310CB"').full_text.item() df_fb2021_agg.loc['009F4E9310CB'].item() import numpy as np all_fb3 = ' '.join(df_fb3.full_text) all_fb2021 = ' '.join(df_fb2021_agg.text) print(np.mean([r.full_text in all_fb2021 for i, r in df_fb3.sample(1000).iterrows()]), ' fb3 strings are in fb2021 dataset')
code
105200156/cell_8
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3
code
105200156/cell_15
[ "text_html_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() common_ids = set(df_fb2021_agg.index) & set(df_fb3.text_id) len(common_ids) df_fb2021_agg.loc['009F4E9310CB'].item()
code
105200156/cell_16
[ "text_plain_output_1.png" ]
import numpy as np import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() common_ids = set(df_fb2021_agg.index) & set(df_fb3.text_id) len(common_ids) df_fb3.query('text_id == "009F4E9310CB"').full_text.item() import numpy as np all_fb3 = ' '.join(df_fb3.full_text) print(np.mean([r.discourse_text in all_fb3 for i, r in df_fb2021.sample(1000).iterrows()]), ' fb2021 strings are in fb3 dataset')
code
105200156/cell_3
[ "text_plain_output_1.png" ]
import pandas as pd import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021
code
105200156/cell_14
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() common_ids = set(df_fb2021_agg.index) & set(df_fb3.text_id) len(common_ids) df_fb3.query('text_id == "009F4E9310CB"').full_text.item()
code
105200156/cell_10
[ "text_html_output_1.png" ]
import pandas as pd import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() df_fb3.full_text.str.len().plot.hist(bins=100)
code
105200156/cell_12
[ "text_html_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item() df_fb3 = pd.read_csv('../input/feedback-prize-english-language-learning/train.csv') df_fb3.sample().full_text.item() common_ids = set(df_fb2021_agg.index) & set(df_fb3.text_id) len(common_ids)
code
105200156/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd import re import pandas as pd df_fb2021 = pd.read_csv('../input/feedback-prize-2021/train.csv', dtype={'discourse_id': int}) df_fb2021['textlen'] = df_fb2021.discourse_text.str.len() df_fb2021 import re def join_texts(v): return re.sub(' +', ' ', ' '.join(v.discourse_text).replace('\n', ' ').replace('\x07', ' ')) df_fb2021_agg = df_fb2021.groupby('id').apply(lambda v: join_texts(v)).to_frame('text') df_fb2021_agg df_fb2021_agg.sample().text.item()
code
90147420/cell_6
[ "text_plain_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd import plotly.graph_objects as go df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df['Adj Close'],name='Adj Close') ) fig.update_layout(xaxis_title='Date', yaxis_title='Price' ) fig.show() def bolinger(data): data['MA20'] = data.Close.rolling(window=20).mean() data['std'] = data.Close.rolling(window=20).std() data['upper_20'] = data.MA20 + 2 * data['std'] data['lower_20'] = data.MA20 - 2 * data['std'] data.drop('std', axis=1, inplace=True) data.dropna(inplace=True) return data df = bolinger(df) df.ffill(inplace=True) df.reset_index(drop=True, inplace=True) df
code
90147420/cell_2
[ "text_plain_output_1.png" ]
import pandas as pd df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df
code
90147420/cell_11
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import math import pandas as pd import plotly.graph_objects as go df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df['Adj Close'],name='Adj Close') ) fig.update_layout(xaxis_title='Date', yaxis_title='Price' ) fig.show() def bolinger(data): data['MA20'] = data.Close.rolling(window=20).mean() data['std'] = data.Close.rolling(window=20).std() data['upper_20'] = data.MA20 + 2 * data['std'] data['lower_20'] = data.MA20 - 2 * data['std'] data.drop('std', axis=1, inplace=True) data.dropna(inplace=True) return data df = bolinger(df) df.ffill(inplace=True) df.reset_index(drop=True, inplace=True) df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df.upper_20,name='Upper 20') ) fig.add_trace( go.Scatter(x=df.Date,y=df.lower_20,name='Lower 20') ) fig.update_layout(title='ploting bolinger band', xaxis_title='Date', yaxis_title='Price' ) fig.show() total_balance = 1000000 def backtesting(df, total_balance): buy_price = [] buy_date = [] sell_price = [] sell_date = [] long_earn = [] num_bought = 0 df_balance = [] df_date = [] balance_date = [] trade_num = 0 buy_num = -1 sell_num = -1 trade_num = -1 position = 'none' for i in range(len(df.Close)): if total_balance <= 0: break elif position == 'none': if df.Close[i] <= df['lower_20'][i] and abs(df.Close[i] - df['lower_20'][i]) <= 1000: position = 'long' buy_price.append(df.Close[i]) buy_date.append(df.Date[i]) if trade_num >= 0: total_balance = df_balance[trade_num] num_bought = 0 num_count = math.floor(total_balance / df.Close[i]) num_bought += num_count remain = total_balance - df.Close[i] * num_count df_balance.append(total_balance) df_date.append(df.Date[i]) trade_num += 1 buy_num += 1 elif position == 'long': if df.Close[i] >= df['upper_20'][i] and abs(df.Close[i] - df['upper_20'][i]) <= 1000: position = 'none' sell_price.append(df.Close[i]) sell_date.append(df.Date[i]) total_balance = remain earn = df.Close[i] - buy_price[buy_num] total_earn = earn * num_bought + buy_price[buy_num] * num_bought total_balance += total_earn df_balance.append(total_balance) df_date.append(df.Date[i]) sell_num += 1 trade_num += 1 if i % (len(df) - 1) == 0: if position == 'long': sell_price.append(df.Close[i]) sell_date.append(df.Date[i]) total_balance = remain earn = df.Close[i] - buy_price[buy_num] total_earn = earn * num_bought + buy_price[buy_num] * num_bought total_balance += total_earn df_balance.append(total_balance) df_date.append(df.Date[i]) balance = pd.DataFrame({'Date': df_date, 'balance': df_balance}) final_return_ratio = (balance.iloc[-1][1] / 1000000 - 1) * 100 return balance df_balance = backtesting(df, total_balance)
code
90147420/cell_7
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd import plotly.graph_objects as go df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df['Adj Close'],name='Adj Close') ) fig.update_layout(xaxis_title='Date', yaxis_title='Price' ) fig.show() def bolinger(data): data['MA20'] = data.Close.rolling(window=20).mean() data['std'] = data.Close.rolling(window=20).std() data['upper_20'] = data.MA20 + 2 * data['std'] data['lower_20'] = data.MA20 - 2 * data['std'] data.drop('std', axis=1, inplace=True) data.dropna(inplace=True) return data df = bolinger(df) df.ffill(inplace=True) df.reset_index(drop=True, inplace=True) df fig = make_subplots(specs=[[{'secondary_y': True}]]) fig.add_trace(go.Scatter(x=df.Date, y=df.Close, name='Close')) fig.add_trace(go.Scatter(x=df.Date, y=df.upper_20, name='Upper 20')) fig.add_trace(go.Scatter(x=df.Date, y=df.lower_20, name='Lower 20')) fig.update_layout(title='ploting bolinger band', xaxis_title='Date', yaxis_title='Price') fig.show()
code
90147420/cell_3
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd import plotly.graph_objects as go df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df fig = make_subplots(specs=[[{'secondary_y': True}]]) fig.add_trace(go.Scatter(x=df.Date, y=df.Close, name='Close')) fig.add_trace(go.Scatter(x=df.Date, y=df['Adj Close'], name='Adj Close')) fig.update_layout(xaxis_title='Date', yaxis_title='Price') fig.show()
code
90147420/cell_12
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import math import pandas as pd import plotly.graph_objects as go df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df['Adj Close'],name='Adj Close') ) fig.update_layout(xaxis_title='Date', yaxis_title='Price' ) fig.show() def bolinger(data): data['MA20'] = data.Close.rolling(window=20).mean() data['std'] = data.Close.rolling(window=20).std() data['upper_20'] = data.MA20 + 2 * data['std'] data['lower_20'] = data.MA20 - 2 * data['std'] data.drop('std', axis=1, inplace=True) data.dropna(inplace=True) return data df = bolinger(df) df.ffill(inplace=True) df.reset_index(drop=True, inplace=True) df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df.upper_20,name='Upper 20') ) fig.add_trace( go.Scatter(x=df.Date,y=df.lower_20,name='Lower 20') ) fig.update_layout(title='ploting bolinger band', xaxis_title='Date', yaxis_title='Price' ) fig.show() total_balance = 1000000 def backtesting(df, total_balance): buy_price = [] buy_date = [] sell_price = [] sell_date = [] long_earn = [] num_bought = 0 df_balance = [] df_date = [] balance_date = [] trade_num = 0 buy_num = -1 sell_num = -1 trade_num = -1 position = 'none' for i in range(len(df.Close)): if total_balance <= 0: break elif position == 'none': if df.Close[i] <= df['lower_20'][i] and abs(df.Close[i] - df['lower_20'][i]) <= 1000: position = 'long' buy_price.append(df.Close[i]) buy_date.append(df.Date[i]) if trade_num >= 0: total_balance = df_balance[trade_num] num_bought = 0 num_count = math.floor(total_balance / df.Close[i]) num_bought += num_count remain = total_balance - df.Close[i] * num_count df_balance.append(total_balance) df_date.append(df.Date[i]) trade_num += 1 buy_num += 1 elif position == 'long': if df.Close[i] >= df['upper_20'][i] and abs(df.Close[i] - df['upper_20'][i]) <= 1000: position = 'none' sell_price.append(df.Close[i]) sell_date.append(df.Date[i]) total_balance = remain earn = df.Close[i] - buy_price[buy_num] total_earn = earn * num_bought + buy_price[buy_num] * num_bought total_balance += total_earn df_balance.append(total_balance) df_date.append(df.Date[i]) sell_num += 1 trade_num += 1 if i % (len(df) - 1) == 0: if position == 'long': sell_price.append(df.Close[i]) sell_date.append(df.Date[i]) total_balance = remain earn = df.Close[i] - buy_price[buy_num] total_earn = earn * num_bought + buy_price[buy_num] * num_bought total_balance += total_earn df_balance.append(total_balance) df_date.append(df.Date[i]) balance = pd.DataFrame({'Date': df_date, 'balance': df_balance}) final_return_ratio = (balance.iloc[-1][1] / 1000000 - 1) * 100 return balance df_balance = backtesting(df, total_balance) fig = make_subplots() fig.add_trace(go.Scatter(x=df_balance.Date, y=df_balance.balance, name='your Balance')) fig.update_layout(xaxis_title='Date', yaxis_title='your Balance') fig.show()
code
90147420/cell_5
[ "text_html_output_1.png" ]
from plotly.subplots import make_subplots import pandas as pd import plotly.graph_objects as go df = pd.read_csv('../input/samsung-electronics-stock-historical-price/005930.KS.csv') df fig = make_subplots(specs=[[{'secondary_y':True}]]) fig.add_trace( go.Scatter(x=df.Date,y=df.Close,name='Close') ) fig.add_trace( go.Scatter(x=df.Date,y=df['Adj Close'],name='Adj Close') ) fig.update_layout(xaxis_title='Date', yaxis_title='Price' ) fig.show() 665 % (len(df) - 1)
code
2014493/cell_9
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) train_cleared = all_data[:train.shape[0]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_cleared, train.Survived, random_state=0) X_val = all_data[train.shape[0]:] from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(random_state=0) tree.fit(X_train, y_train) print('Train score: {:.3f}'.format(tree.score(X_train, y_train))) print('Test score: {:.3f}'.format(tree.score(X_test, y_test))) decision_tree_predicts = tree.predict(X_val)
code
2014493/cell_4
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.info()
code
2014493/cell_6
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data.info()
code
2014493/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) all_data.head()
code
2014493/cell_8
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) train_cleared = all_data[:train.shape[0]] train_cleared.info() from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_cleared, train.Survived, random_state=0) X_train.info() X_val = all_data[train.shape[0]:] X_val.info()
code
2014493/cell_10
[ "text_plain_output_1.png" ]
from sklearn.model_selection import train_test_split from sklearn.tree import DecisionTreeClassifier from subprocess import check_output import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data = all_data.drop(['Name'], axis=1) all_data = all_data.drop(['Ticket'], axis=1) all_data = all_data.drop(['Cabin'], axis=1) all_data = pd.get_dummies(all_data) train_cleared = all_data[:train.shape[0]] from sklearn.model_selection import train_test_split X_train, X_test, y_train, y_test = train_test_split(train_cleared, train.Survived, random_state=0) X_val = all_data[train.shape[0]:] from sklearn.tree import DecisionTreeClassifier tree = DecisionTreeClassifier(random_state=0) tree.fit(X_train, y_train) decision_tree_predicts = tree.predict(X_val) result = pd.DataFrame({'PassengerId': test.PassengerId, 'Survived': decision_tree_predicts}) result.to_csv('DecisionTree.csv', index=False) result.info()
code
2014493/cell_5
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) train = pd.read_csv('../input/train.csv') test = pd.read_csv('../input/test.csv') all_data = pd.concat((train.loc[:, 'Pclass':'Embarked'], test.loc[:, 'Pclass':'Embarked'])) all_data.Age = all_data.Age.fillna(all_data.Age.median()) all_data.Fare = all_data.Fare.fillna(all_data.Fare.median()) all_data.Embarked = all_data.Embarked.fillna(all_data.Embarked.mode()[0]) all_data.info()
code
34120753/cell_13
[ "text_html_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes f"The 3 sentiments are '{', '.join(train_df.sentiment.unique().tolist())}'"
code
34120753/cell_9
[ "image_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes
code
34120753/cell_25
[ "text_plain_output_1.png" ]
from nltk import tokenize import os import pandas as pd import string base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes f"The 3 sentiments are '{', '.join(train_df.sentiment.unique().tolist())}'" train_df.dropna(inplace=True) train_df.columns full_df = pd.concat([train_df, test_df]) full_df.loc[:, 'text_lower'] = full_df.text.str.lower() def count_punctuations(text): text_punctuations = [] punctuation_list = list(string.punctuation) for token in tokenize.word_tokenize(text): if not token.isalpha(): if len(token) > 1: sub_tokens = list(token) for sub_token in sub_tokens: if sub_token in punctuation_list: text_punctuations.append(sub_token) else: text_punctuations.append(token) return pd.Series(text_punctuations).value_counts() count_punctuation_df = full_df.text.apply(count_punctuations)
code
34120753/cell_11
[ "text_plain_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) test_df.dtypes
code
34120753/cell_19
[ "text_plain_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes f"The 3 sentiments are '{', '.join(train_df.sentiment.unique().tolist())}'" train_df.dropna(inplace=True) train_df.columns
code
34120753/cell_1
[ "text_plain_output_2.png", "text_plain_output_1.png" ]
!pip install --upgrade pandas-profiling !pip install --upgrade hypertools !pip install --upgrade pandas
code
34120753/cell_7
[ "application_vnd.jupyter.stderr_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.head()
code
34120753/cell_8
[ "image_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text']
code
34120753/cell_15
[ "text_plain_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes f"The 3 sentiments are '{', '.join(train_df.sentiment.unique().tolist())}'" print(f"Train NaN's:\n{train_df.isna().sum()}")
code
34120753/cell_16
[ "text_plain_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes print(f"Test NaN's:\n{test_df.isna().sum()}")
code
34120753/cell_22
[ "text_plain_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes f"The 3 sentiments are '{', '.join(train_df.sentiment.unique().tolist())}'" train_df.dropna(inplace=True) train_df.columns full_df = pd.concat([train_df, test_df]) full_df.head()
code
34120753/cell_10
[ "text_html_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) test_df.head()
code
34120753/cell_27
[ "text_plain_output_1.png" ]
from nltk import tokenize import matplotlib.pyplot as plt import os import pandas as pd import string base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes f"The 3 sentiments are '{', '.join(train_df.sentiment.unique().tolist())}'" train_df.dropna(inplace=True) train_df.columns full_df = pd.concat([train_df, test_df]) full_df.loc[:, 'text_lower'] = full_df.text.str.lower() def count_punctuations(text): text_punctuations = [] punctuation_list = list(string.punctuation) for token in tokenize.word_tokenize(text): if not token.isalpha(): if len(token) > 1: sub_tokens = list(token) for sub_token in sub_tokens: if sub_token in punctuation_list: text_punctuations.append(sub_token) else: text_punctuations.append(token) return pd.Series(text_punctuations).value_counts() count_punctuation_df = full_df.text.apply(count_punctuations) count_punctuation_df.fillna(0, inplace=True) count_punctuation_df.loc[:, 'sentiment'] = full_df.sentiment grouped_df = count_punctuation_df.groupby('sentiment').sum() groups = ['negative', 'neutral', 'positive'] colors = ['red', 'silver', 'green'] fig = plt.figure(figsize=(15, 8)) for i, (group, color) in enumerate(zip(groups, colors), start=1): ax = fig.add_subplot(1, 3, i) grouped_df.loc[group].sort_values(ascending=False).head(10).plot(kind='bar', color=color, title=f'Top 10 punctuation for {group} sentiment', ax=ax, rot=0) fig.tight_layout()
code
34120753/cell_12
[ "text_plain_output_1.png" ]
import os import pandas as pd base_path = '/kaggle' if os.path.exists(base_path): input_path = os.path.join(base_path, 'input', 'tweet-sentiment-extraction') output_path = os.path.join(base_path, 'working') else: base_path = 'data' input_path = base_path output_path = os.path.join(base_path, 'submissions') train_file = os.path.join(input_path, 'train.csv') test_file = os.path.join(input_path, 'test.csv') train_df = pd.read_csv(train_file) test_df = pd.read_csv(test_file) train_df.sample(n=1)['text'] train_df.dtypes test_df.dtypes print(f'Train size {train_df.shape[0]}') print(f'Text size {test_df.shape[0]}')
code
50228570/cell_9
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_digits from sklearn.metrics import accuracy_score import numpy as np digits = load_digits() X = digits.data y = digits.target p = 0.75 idx = int(p * X.shape[0]) + 1 X_train, X_test = np.split(X, [idx]) y_train, y_test = np.split(y, [idx]) def euclidian_metric(x, y): return np.sqrt(np.sum((x - y) ** 2)) y_pred_knn = [] for test_value in X_test: ind_min_metric = 0 min_metric = euclidian_metric(test_value, X_train[0]) for index, train_value in enumerate(X_train): metric = euclidian_metric(test_value, train_value) if metric < min_metric: min_metric = metric ind_min_metric = index y_pred_knn.append(y_train[ind_min_metric]) knn_err_rate = 1 - accuracy_score(y_test, y_pred_knn) print('1nn classifier error: ' + str(knn_err_rate))
code
50228570/cell_4
[ "text_plain_output_1.png" ]
from sklearn.datasets import load_digits digits = load_digits() X = digits.data y = digits.target print(digits.DESCR)
code
50228570/cell_11
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier rf_clf = RandomForestClassifier(n_estimators=1000) rf_clf.fit(X_train, y_train)
code
50228570/cell_12
[ "text_plain_output_1.png" ]
from sklearn.ensemble import RandomForestClassifier from sklearn.metrics import accuracy_score rf_clf = RandomForestClassifier(n_estimators=1000) rf_clf.fit(X_train, y_train) y_pred_rf = rf_clf.predict(X_test) rf_err_rate = 1 - accuracy_score(y_test, y_pred_rf) print('Random forest classifier error: ' + str(rf_err_rate))
code
32072376/cell_9
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0]
code
32072376/cell_20
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) youtube['sentiment'] = round(youtube['dislikes'] / (youtube['likes'] + youtube['dislikes']) * 100) avg_senti = youtube['sentiment'].mean() avg_senti
code
32072376/cell_29
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) # for the sake of being concise, we've directly modified the matplotlib object returned by the .plot() method # you could plot these two scatter plots separately and still see the relationship! ax1 = youtube.plot(kind='scatter', x='views', y='likes', color='g') # likes are in green ax2 = youtube.plot(kind='scatter', x='views', y='dislikes', color='r', ax=ax1) # dislikes are in red ax2.set_ylabel('viewer interactions (likes/dislikes)') youtube.groupby('title')['views', 'sentiment'].mean().sort_values('views', ascending=False).head() total_views = youtube.groupby('category_id')['views'].sum().sort_values(ascending=False).head(10) total_views.plot.barh()
code
32072376/cell_2
[ "text_plain_output_1.png" ]
import os import json import numpy as np import pandas as pd import os for dirname, _, filenames in os.walk('/kaggle/input'): for filename in filenames: print(os.path.join(dirname, filename))
code
32072376/cell_7
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns
code
32072376/cell_18
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100)
code
32072376/cell_16
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views
code
32072376/cell_35
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) # for the sake of being concise, we've directly modified the matplotlib object returned by the .plot() method # you could plot these two scatter plots separately and still see the relationship! ax1 = youtube.plot(kind='scatter', x='views', y='likes', color='g') # likes are in green ax2 = youtube.plot(kind='scatter', x='views', y='dislikes', color='r', ax=ax1) # dislikes are in red ax2.set_ylabel('viewer interactions (likes/dislikes)') youtube.groupby('title')['views', 'sentiment'].mean().sort_values('views', ascending=False).head() total_views = youtube.groupby('category_id')['views'].sum().sort_values(ascending=False).head(10) total_views.plot.barh() average_views = youtube.groupby('category_id')['views'].mean().sort_values(ascending=False).head(10) average_views.plot.barh() youtube.groupby('channel_title')['title'].size().sort_values(ascending=False).head(10)
code
32072376/cell_31
[ "text_html_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) # for the sake of being concise, we've directly modified the matplotlib object returned by the .plot() method # you could plot these two scatter plots separately and still see the relationship! ax1 = youtube.plot(kind='scatter', x='views', y='likes', color='g') # likes are in green ax2 = youtube.plot(kind='scatter', x='views', y='dislikes', color='r', ax=ax1) # dislikes are in red ax2.set_ylabel('viewer interactions (likes/dislikes)') youtube.groupby('title')['views', 'sentiment'].mean().sort_values('views', ascending=False).head() total_views = youtube.groupby('category_id')['views'].sum().sort_values(ascending=False).head(10) total_views.plot.barh() average_views = youtube.groupby('category_id')['views'].mean().sort_values(ascending=False).head(10) average_views.plot.barh()
code
32072376/cell_24
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) # for the sake of being concise, we've directly modified the matplotlib object returned by the .plot() method # you could plot these two scatter plots separately and still see the relationship! ax1 = youtube.plot(kind='scatter', x='views', y='likes', color='g') # likes are in green ax2 = youtube.plot(kind='scatter', x='views', y='dislikes', color='r', ax=ax1) # dislikes are in red ax2.set_ylabel('viewer interactions (likes/dislikes)') youtube.groupby('title')['views', 'sentiment'].mean().sort_values('views', ascending=False).head()
code
32072376/cell_22
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) ax1 = youtube.plot(kind='scatter', x='views', y='likes', color='g') ax2 = youtube.plot(kind='scatter', x='views', y='dislikes', color='r', ax=ax1) ax2.set_ylabel('viewer interactions (likes/dislikes)')
code
32072376/cell_27
[ "text_plain_output_2.png", "application_vnd.jupyter.stderr_output_1.png" ]
# return the top 10 categories by trending video frequency using .value_counts() youtube['category_id']\ .value_counts()\
code
32072376/cell_37
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] avg_views = round(youtube['views'].mean()) avg_views condition = youtube[youtube.views >= avg_views].shape[0] round(condition / youtube.shape[0] * 100) # for the sake of being concise, we've directly modified the matplotlib object returned by the .plot() method # you could plot these two scatter plots separately and still see the relationship! ax1 = youtube.plot(kind='scatter', x='views', y='likes', color='g') # likes are in green ax2 = youtube.plot(kind='scatter', x='views', y='dislikes', color='r', ax=ax1) # dislikes are in red ax2.set_ylabel('viewer interactions (likes/dislikes)') youtube.groupby('title')['views', 'sentiment'].mean().sort_values('views', ascending=False).head() total_views = youtube.groupby('category_id')['views'].sum().sort_values(ascending=False).head(10) total_views.plot.barh() average_views = youtube.groupby('category_id')['views'].mean().sort_values(ascending=False).head(10) average_views.plot.barh() youtube.groupby('channel_title')['title'].size().sort_values(ascending=False).head(10) youtube.groupby('channel_title')['views'].mean().sort_values(ascending=False).head(10)
code
32072376/cell_12
[ "text_plain_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.columns youtube_df.shape[0] youtube = youtube_df[['video_id', 'title', 'channel_title', 'category_id', 'views', 'likes', 'dislikes']] youtube.tail(3)
code
32072376/cell_5
[ "text_plain_output_1.png", "image_output_1.png" ]
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv) youtube_df = pd.read_csv('/kaggle/input/youtube-new/USvideos.csv') youtube_df.head()
code
122247715/cell_42
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['embarked'].value_counts()
code
122247715/cell_63
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() pd.get_dummies(titanic['who']) pd.get_dummies(titanic['who'], drop_first=True) pd.get_dummies(titanic['class'], drop_first=True) pd.get_dummies(titanic['embarked'], drop_first=True) titanic = pd.concat([titanic, pd.get_dummies(titanic['who'], drop_first=True)], axis=1) titanic
code
122247715/cell_9
[ "text_html_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.info()
code
122247715/cell_57
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() pd.get_dummies(titanic['who'])
code
122247715/cell_56
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() titanic['who'].value_counts()
code
122247715/cell_34
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['who'].value_counts()
code
122247715/cell_23
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['embarked'].value_counts()
code
122247715/cell_30
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum()
code
122247715/cell_39
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['pclass'].value_counts()
code
122247715/cell_26
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['embark_town'].mode()[0]
code
122247715/cell_65
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() pd.get_dummies(titanic['who']) pd.get_dummies(titanic['who'], drop_first=True) pd.get_dummies(titanic['class'], drop_first=True) pd.get_dummies(titanic['embarked'], drop_first=True) titanic = pd.concat([titanic, pd.get_dummies(titanic['who'], drop_first=True)], axis=1) titanic = pd.concat([titanic, pd.get_dummies(titanic['class'], drop_first=True)], axis=1) titanic['man'].dtype
code
122247715/cell_41
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['sibsp'].value_counts()
code
122247715/cell_54
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum()
code
122247715/cell_11
[ "text_html_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum()
code
122247715/cell_60
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() pd.get_dummies(titanic['who']) pd.get_dummies(titanic['who'], drop_first=True) pd.get_dummies(titanic['class'], drop_first=True) pd.get_dummies(titanic['embarked'], drop_first=True)
code
122247715/cell_19
[ "text_plain_output_1.png", "image_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['age'].median()
code
122247715/cell_45
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['survived'].value_counts()
code
122247715/cell_18
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['age'].mean()
code
122247715/cell_32
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.head()
code
122247715/cell_51
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() x = ['class', 'age', 'sibsp', 'parch', 'fare', 'embarked', 'who', 'alone'] y = ['survived'] titanic[x].info()
code
122247715/cell_59
[ "text_html_output_1.png" ]
import pandas as pd import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() pd.get_dummies(titanic['who']) pd.get_dummies(titanic['who'], drop_first=True) pd.get_dummies(titanic['class'], drop_first=True)
code
122247715/cell_58
[ "text_plain_output_1.png" ]
import pandas as pd import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic.isnull().sum() pd.get_dummies(titanic['who']) pd.get_dummies(titanic['who'], drop_first=True)
code
122247715/cell_28
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum()
code
122247715/cell_8
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.head()
code
122247715/cell_15
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0]
code
122247715/cell_38
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['class'].value_counts()
code
122247715/cell_17
[ "text_plain_output_1.png" ]
import matplotlib.pyplot as plt import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] plt.hist(titanic['age'])
code
122247715/cell_35
[ "text_html_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['sex'].value_counts()
code
122247715/cell_43
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['embark_town'].value_counts()
code
122247715/cell_46
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic.isnull().sum() titanic.drop('deck', axis=1, inplace=True) titanic.isnull().sum() titanic['alive'].value_counts()
code
122247715/cell_24
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0] titanic.isnull().sum() / titanic.shape[0] titanic['embarked'].mode()[0]
code
122247715/cell_14
[ "text_plain_output_1.png" ]
import seaborn as sns titanic = sns.load_dataset('titanic') titanic.isnull().sum() titanic.shape[0]
code